Mentions légales du service

Skip to content
Snippets Groups Projects
Commit 76711fdc authored by CORNILLET Remi's avatar CORNILLET Remi
Browse files

Update W, commentary, test incoming

parent 0248e57d
No related branches found
No related tags found
No related merge requests found
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 13 14:43:03 2023
@author: rcornill
"""
import torch as tn
from wdnmf_dual.methods.base_function import *
from wdnmf_dual.methods.grad_pb_dual import *
def optim_pb_dual(g1, g2, rho1, h, data, sigma, dictionary, reg, ent, matK,
stepg1, stepg2, iterg1, iterg2, iterdual = 10):
"""
Parameters
----------
g1 : 2d tn.tensor
First variable of the dual pb, size n*m
g2 : 2d tn.tensor
Second variable of the dual pb, size n*r
rho1 : float64
Regulation rate for the nonnegativity constraint w.r.t W
h : 2d tn.tensor
Abundance matrix, size r*m
data : 2d tn.tensor
Data entry, of size n*m
sigma : 1d array-like
Array of size r, corresponding to the r selected template of the dictionary
dictionary : 2d tn.tensor
Dictionary matrix, each columns is a different template, size n*d with d>r
reg : float64
Dictionary-based regulation rate
ent : float64
Entropic regulation rate
matK : 2d tn.tensor
Precalculated matrix, $\matK = \exp^{-\frac{\matM}{reg}}
stepg1 : float
Step size for gradient descent for g1
stepg2 : float
Step size for gradient descent for g2
iterg1 : int
Number of iteration to update g1 during iternal loop
iterg2 : int
Number of iteration to update g2 during iternal loop
iterdual : int, optional
Number of total loop to update g1 and g2 before updating W. The default is 10.
Returns
-------
g1 : 2d tn.tensor
First variable of the dual pb, size n*m, updated
g2 : 2d tn.tensor
Second variable of the dual pb, size n*r, updated
"""
n1, m1 = g1.shape
n2, m2 = g2.shape
rep_g1 = tn.clone(g1, dtype = tn.float64)
rep_g2 = tn.clone(g2, dtype = tn.float64)
for _ in range(iterdual):
for _ in range(iterg1):
g1_grad = grad1_pb_dual(rep_g1, rep_g2, rho1, h, data, ent, matK)
rep_g1 = rep_g1 + stepg1*g1_grad
for _ in range(iterg2):
g2_grad = grad2_pb_dual(g1, g2, rho1, h, sigma, dictionary, reg, ent, matK)
rep_g2 = rep_g2 + stepg2*g2_grad
return (g1, g2)
def solW_primal(g1, g2, h, rho1):
w = grad_e_dual((tn.matmul(g1, tn.transpose(h, 0, 1)) + g2 )/rho1)
return w
def optim_w(w_init, h, data, sigma, dictionary, reg, ent, rho1, rho2, cost,
method = 'sinkhorn', iterg1 = 1, iterg2 = 1, iterdual = 10, itermax = 10,
numItermax = 1000, thr = 1e-9, verbose = False):
init_pb = pb(w = w_init, h = h, sigma = sigma, data = data, dictionary = dictionary, reg = reg,
ent = ent, rho1 = rho1, rho2=rho2, cost = cost, method = method, numItermax= numItermax,
thr = thr, verbose = verbose)
\ No newline at end of file
......@@ -314,6 +314,22 @@ def f_mat_aux(aim, cost, ent, method = 'sinkhorn', numItermax = 1000, thr = 1e-9
return f
def matK(cost, ent):
"""
Return the matrix K to shorten the calculus. This matrix is $ e^{-Cost/ent}$
Parameters
----------
cost : 2d tensor
Cost matrix
ent : float
Entropic regulation rate.
Returns
-------
w : 2d tensor
$K = e^{-Cost/ent}$
"""
n, m = cost.shape
w = tn.zeros(n, m, dtype=tn.float64)
for i in range(n):
......
......@@ -5,13 +5,55 @@ Created on Tue Jan 25 19:43:38 2022
@author: remic
"""
import ot as ot
import torch as tn
import numpy as np
import wdnmf_dual.methods.dual_grad as dg
import wdnmf_dual.methods.base_function as dg
def pb(w, h, sigma, data, dictionary, lbda, reg, ent, rho1, rho2, cost,
def pb(w, h, sigma, data, dictionary, reg, ent, rho1, rho2, cost,
method = 'sinkhorn', numItermax = 1000, thr = 1e-9, verbose = False):
"""
Return the cost of the primal problem
Parameters
----------
w : 2D tn.tensor
Template matrix, each column being liked to the matching one in dictionary. Size n \times r
h : 2D tn.tensor
Weight matrix, size t \times m
sigma : np.array
Matching between each column of W and Dictionary
data : 2D tn.tensor
Data, each column is a pixel / a spectrum, size n \times m
dictionary : 2D tn.tensor
Each column is a known template, size n \times d with d > r.
reg : float
parameter \lambda > 0, the lower it is, the less importance the dictionary have.
\lambda = 0 leads to the base case.
ent : float
Entropic regulation rate. The lower this rate is, the sharper the
rho1 : float
Regulation to inforce the nonnegativity constraint for W.
rho2 : float
Regulation to inforce the nonnegativity constraint for H.
cost : 2D tn.tensor
Distance matrix between each bin.
method : str, optional
‘sinkhorn’,’sinkhorn_log’, ‘greenkhorn’, ‘sinkhorn_stabilized’ or ‘sinkhorn_epsilon_scaling’
depending on what solver to use. Sinkhorn stabilized should be the best for
sharper transport with the smaller reg rate. The default is 'sinkhorn'.
numItermax : int, optional
Number of iteration before stoping the calculation of the transport plan. The default is 1000.
thr : float, optional
Precision threshold, the calculation of the transport plan stop if the \delta between
two iterations of the computation is smaller than thr. The default is 1e-9.
verbose : Boolean, optional
Verbose or not verbose, this is a debug question. The default is False.
Returns
-------
temp : float
Total cost of the primal problem.
"""
temp = 0
n, m = data.shape
approx = tn.matmul(w, h)
......@@ -25,8 +67,37 @@ def pb(w, h, sigma, data, dictionary, lbda, reg, ent, rho1, rho2, cost,
return temp
def pb_dual(g1, g2, rho1, h, data, sigma, dictionary, reg, ent, matK):
""" Potentially have matK1 and matK2 with cost1 for approximation,
cost2 for dictionary"""
"""
Compute the dual of the primal pb
Parameters
----------
g1 : 2d tn.tensor
First variable of the dual pb, size n*m
g2 : 2d tn.tensor
Second variable of the dual pb, size n*r
rho1 : float64
Regulation rate for the nonnegativity constraint w.r.t W
h : 2d tn.tensor
Abundance matrix, size r*m
data : 2d tn.tensor
Data entry, of size n*m
sigma : 1d array-like
Array of size r, corresponding to the r selected template of the dictionary
dictionary : 2d tn.tensor
Dictionary matrix, each columns is a different template, size n*d with d>r
reg : float64
Dictionary-based regulation rate
ent : float64
Entropic regulation rate
matK : 2d tn.tensor
Precalculated matrix, $\matK = \exp^{-\frac{\matM}{reg}}
Returns
-------
temp : TYPE
DESCRIPTION.
"""
n1, m1 = g1.shape
_, m2 = g2.shape
temp = 0
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment