Mentions légales du service

Skip to content
Snippets Groups Projects
Commit a5ac7963 authored by CORNILLET Remi's avatar CORNILLET Remi
Browse files

Merge branch 'Jeremysversion' into 'main'

jeremy's vectorization

See merge request !3
parents 20a88f13 a4ca8bbb
No related branches found
No related tags found
1 merge request!3jeremy's vectorization
......@@ -10,19 +10,20 @@ import torch as tn
import numpy as np
def log_mat(v):
n, m = v.shape
w = tn.zeros(n, m, dtype=tn.float64)
for i in range(n):
for j in range(m):
w[i, j] = np.log(v[i, j])
return w
return tn.log(v)
#n, m = v.shape
#w = tn.zeros(n, m, dtype=tn.float64)
#for i in range(n):
#for j in range(m):
#w[i, j] = np.log(v[i, j])
#return w
def simplex_prox_mat(v, z=1):
"""
For a given matrix V seen as a collection of vectors, return the matrix W
with all vectors being the L2 projection of V's on the simplex.
Warning : it does promote sparcity in each vector due to L2 projection,
Warning : it does promote sparsity in each vector due to L2 projection,
this may be problematic within the code to use that if not needed.
Parameters
......@@ -40,6 +41,7 @@ def simplex_prox_mat(v, z=1):
"""
# Tu veux que ca marche en colonne pour la matrice v j'imagine?
# je propose de modifier comme ca, c'est pareil mais plus lisible je trouve
# TODO: POT is slow... consider using self-make implem
n, m = v.shape
w = tn.zeros(n, m, dtype=tn.float64)
for i in range(m):
......@@ -67,11 +69,12 @@ def simplex_norm(v) :
w = tn.zeros(n, m, dtype = tn.float64)
if not(tn.all(vprime == v)) :
print("\nWarning : The entry vector had a negative value. It has been removed but it may be an issue somehow, somewhen, somewhere. \n")
# Pourquoi faire ca? si on normalise on normalise, et puis c'est tout. Tu peux laisser le print mais normalise v pas vprime
for i in range(m):
w[:, i] = vprime[:,i] / sum(vprime[:,i])
return w
def e_vec(v):
def e_vec(v):
"""
Non-negativity constraint, compute the Frobenius inner product of v with log(v), v being a vector
the log been calculated elementwise. This function is concave.
......@@ -88,12 +91,15 @@ def e_vec(v):
Result of \langle v, \log(v) \rangle
"""
n = v.shape[0]
temp = 0
for i in range(n):
if v[i]!=0:
temp += v[i]*np.log(v[i])
return temp
# Useless function?
#return tn.dot(v,tn.log(v))
return tn.sum(v*tn.log(v))
#n = v.shape[0]
#temp = 0
#for i in range(n):
#if v[i]!=0:
#temp += v[i]*np.log(v[i])
#return temp
def e_mat(v):
"""
......@@ -111,13 +117,14 @@ def e_mat(v):
Result of \langle v, \log(v) \rangle
"""
n, m = v.shape
temp = 0
for j in range(m):
for i in range(n):
if v[i, j]!=0:
temp += v[i, j]*np.log(v[i, j])
return temp
return tn.dot(v.flatten(),tn.log(v).flatten())
#n, m = v.shape
#temp = 0
#for j in range(m):
#for i in range(n):
#if v[i, j]!=0:
#temp += v[i, j]*np.log(v[i, j])
#return temp
def e_dual(v):
"""
......@@ -134,11 +141,14 @@ def e_dual(v):
The value of the dual of e for the given vector v
"""
n = v.shape[0]
temp = 0
for i in range(n):
temp += np.exp(v[i])
return - np.log(temp)
# note: only for 1d input
return - tn.logsumexp(v, 0)
#return -tn.log(tn.sum(tn.exp(v)))
#n = v.shape[0]
#temp = 0
#for i in range(n):
#temp += np.exp(v[i])
#return - np.log(temp)
def grad_e_dual(v):
"""
......@@ -155,11 +165,12 @@ def grad_e_dual(v):
softmax(v), nonnegative and sum to 1.
"""
n = v.shape[0]
w = tn.zeros(n, dtype=tn.float64)
for i in range(n):
w[i] = np.exp(v[i])
return -w/sum(w)
return -tn.softmax(v,0)
#n = v.shape[0]
#w = tn.zeros(n, dtype=tn.float64)
#for i in range(n):
#w[i] = np.exp(v[i])
#return -w/sum(w)
def f_vec(source, aim, cost, ent, method = 'sinkhorn', numItermax = 1000, thr = 1e-9, verbose = False):
"""
......@@ -236,6 +247,7 @@ def f_mat(source, aim, cost, ent, method = 'sinkhorn', numItermax = 1000, thr =
Sum of the distances between the source's and aim's vectors
"""
# TODO: merge with vec version? annoying to have two different functions for mat and vec.
temp = 0
n, m = source.shape
for j in range(m):
......@@ -327,29 +339,33 @@ def matK(cost, ent):
$K = e^{-Cost/ent}$
"""
n, m = cost.shape
w = tn.zeros(n, m, dtype=tn.float64)
for i in range(n):
for j in range(m):
w[i, j] = np.exp(-cost[n, m]/ent)
return w
return tn.exp(-cost/ent)
#n, m = cost.shape
#w = tn.zeros(n, m, dtype=tn.float64)
#for i in range(n):
#for j in range(m):
#w[i, j] = np.exp(-cost[n, m]/ent)
#return w
def vecA(g, ent):
n = g.shape[0]
w = tn.zeros(n, dtype=tn.float64)
for i in range(n):
w[i] = np.exp(g[i]/ent)
return w
# nom pas clair
return tn.exp(g/ent)
#n = g.shape[0]
#w = tn.zeros(n, dtype=tn.float64)
#for i in range(n):
#w[i] = np.exp(g[i]/ent)
#return w
def f_dual(g, aim, ent, matK):
alpha = vecA(g, ent)
s = 0
n = aim.shape[0]
matKalpha = log_mat(tn.matmul(matK, alpha))
for i in range(n):
s += aim[i]*matKalpha[i]
a = e_vec(aim) + s
return ent * a
#alpha = vecA(g, ent)
#s = 0
#n = aim.shape[0]
matKalpha = log_mat(tn.matmul(matK, vecA(g, ent)))
return ent*(e_vec(aim) + tn.sum(aim*matKalpha))
#for i in range(n):
#s += aim[i]*matKalpha[i]
#a = e_vec(aim) + s
#return ent * a
def grad_f_dual(g, aim, ent, matK):
"""
......@@ -375,6 +391,6 @@ def grad_f_dual(g, aim, ent, matK):
"""
matKT = tn.transpose(matK, 0, 1)
alpha = vecA(g, ent)
a = tn.mul(matKT, tn.div(aim, tn.matmul(matK, alpha)))
b = tn.mul(alpha, a)
return b
a = matKT*(aim/tn.matmul(matK, alpha))
return alpha*a
#return b
......@@ -54,10 +54,8 @@ def pb(w, h, sigma, data, dictionary, reg, ent, rho1, rho2, cost,
Total cost of the primal problem.
"""
temp = 0
n, m = data.shape
approx = tn.matmul(w, h)
temp += dg.f_mat(source = approx, aim = data, cost = cost, ent = ent,
temp = dg.f_mat(source = approx, aim = data, cost = cost, ent = ent,
method = method, numItermax= numItermax, thr = thr,
verbose = False, log = False, warn = False)
temp -= rho1 * dg.e_mat(w) + rho2 * dg.e_mat(h)
......@@ -98,18 +96,31 @@ def pb_dual(g1, g2, rho1, h, data, sigma, dictionary, reg, ent, matK):
DESCRIPTION.
"""
n1, m1 = g1.shape
_, m1 = g1.shape
_, m2 = g2.shape
temp = 0
for j in range(m2):
temp += rho1 * dg.e_dual( (tn.matmul(g1, tn.transpose(h, 0, 1))[:,j] + g2[:,j])/rho1 )
for j in range(m1):
temp -= dg.f_dual(g1[:,j], data[:,j], ent, matK)
for j in range(m2):
temp -= reg * dg.f_dual(g2[:,j]/reg, dictionary[:,sigma[j]], ent, matK)
# j'ai utilisé directement les fonctions natives de pytroch -> pas de boucle
temp = -rho1 * tn.sum(tn.logsumexp(tn.matmul(g1, tn.transpose(h, 0, 1)) + g2/rho1, 0))
#for j in range(m2):
#temp2 += rho1 * dg.e_dual( (tn.matmul(g1, tn.transpose(h, 0, 1))[:,j] + g2[:,j])/rho1 )
temp2 = 0
matKT = tn.transpose(matK, 0, 1)
#alpha = vecA(g, ent)
#a = matKT*(aim/tn.matmul(matK, alpha))
#return alpha*a
# TODO: correct bug in dimensions
temp += (g1/ent)*matKT*data/tn.matmul(matKT, tn.exp(g1/ent))
#for j in range(m1):
#temp2 += dg.f_dual(g1[:,j], data[:,j], ent, matK)
# restrict dictionary to sigma
# TODO: idem dimensions problem
dic_sigma = dictionary[:,sigma]
#temp3=0
temp += -reg*(g2/reg/ent)*matKT*dic_sigma/tn.matmul(matKT, tn.exp(g2/ent/reg))
#for j in range(m2):
#temp3 -= reg * dg.f_dual(g2[:,j]/reg, dictionary[:,sigma[j]], ent, matK)
return temp
def grad1_pb_dual(g1, g2, rho1, h, data, ent, matK):
......@@ -145,12 +156,20 @@ def grad1_pb_dual(g1, g2, rho1, h, data, ent, matK):
hT = tn.transpose(h,0,1)
for j in range(m2):
temp += tn.matmul(dg.grad_e_dual((tn.matmul(g1,hT)[:,j] + g2[:,j])/rho1), hT)
for j in range(m1):
temp -= dg.grad_f_dual(g1[:,j], data[:,j], ent, matK)
# pas de boucle stp!!
# TODO: A CHECKER
a = -tn.softmax((tn.matmul(g1,hT) + g2)/rho1, 0)
temp += tn.sum(tn.matmul(a, h), 1) # not ht ??
#for j in range(m2):
#temp += tn.matmul(dg.grad_e_dual((tn.matmul(g1,hT)[:,j] + g2[:,j])/rho1), hT) ##??
#print(temp2,temp)
toto = -dg.grad_f_dual(g1, data, ent, matK) # should work out of the box
#temp=0
#for j in range(m1):
# temp -= dg.grad_f_dual(g1[:,j], data[:,j], ent, matK)
#print(toto,temp)
return temp
def grad2_pb_dual(g1, g2, rho1, h, sigma, dictionary, reg, ent, matK):
......@@ -190,6 +209,7 @@ def grad2_pb_dual(g1, g2, rho1, h, sigma, dictionary, reg, ent, matK):
hT = tn.transpose(h, 0, 1)
# TODO: comme au dessus
for j in range(m2):
temp += dg.grad_e_dual((tn.matmul(g1,hT)[:,j] + g2[:,j])/rho1)
temp += dg.grad_f_dual(g2[:,j]/reg, dictionary[:,sigma[j]], ent, matK)
......
......@@ -21,7 +21,8 @@ class TestClass:
v = tn.zeros(2, 3, dtype=tn.float64)
x[1, 1] = 2
v[1, 1] = np.log(2)
assert tn.all(x == log_mat(v))
# changed x to v
assert tn.all(v == log_mat(x))
def test_simplex_prox_mat_projection():
x = tn.ones(2, 3, dtype=tn.float64)
......
import wdnmf_dual as wd
import torch as tn
from wdnmf_dual.methods.grad_pb_dual import pb_dual, grad1_pb_dual
n = 3
m = 4
r = 2
rho1 = 1
g1 = tn.randn(n,m)
W = tn.rand(n,r)
h = tn.rand(r,m)
data = W@h
sigma = [0,1]
reg = 1
ent = 1
matK = tn.rand(n,m) #??
dictionary = tn.rand(n,2)
out = pb_dual(g1, W, rho1, h, data, sigma, dictionary, reg, ent, matK)
out2 = grad1_pb_dual(g1, W, rho1, h, data, ent, matK)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment