Mentions légales du service

Skip to content
Snippets Groups Projects
Commit 20a88f13 authored by CORNILLET Remi's avatar CORNILLET Remi
Browse files

test + épuration du code

parent 76711fdc
No related branches found
No related tags found
No related merge requests found
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 13 14:43:03 2023
......
File added
......@@ -17,12 +17,6 @@ def log_mat(v):
w[i, j] = np.log(v[i, j])
return w
def scalar_product_mat(v, w):
n, m = v.shape
temp = 0
for j in range(m):
temp += float(tn.inner(v[:,j], w[:,j]))
return temp
def simplex_prox_mat(v, z=1):
"""
......@@ -80,7 +74,8 @@ def simplex_norm(v) :
def e_vec(v):
"""
Non-negativity constraint, compute the Frobenius inner product of v with log(v), v being a vector
the log been calculated elementwise. This function is concave.
the log been calculated elementwise. This function is concave.
It does work for value equal to 0
Parameters
----------
......@@ -96,6 +91,7 @@ def e_vec(v):
n = v.shape[0]
temp = 0
for i in range(n):
if v[i]!=0:
temp += v[i]*np.log(v[i])
return temp
......@@ -119,7 +115,8 @@ def e_mat(v):
temp = 0
for j in range(m):
for i in range(n):
temp += v[i, j]*np.log(v[i, j])
if v[i, j]!=0:
temp += v[i, j]*np.log(v[i, j])
return temp
def e_dual(v):
......@@ -345,8 +342,13 @@ def vecA(g, ent):
return w
def f_dual(g, aim, ent, matK):
alpha = vecA(g, ent)
a = e_vec(aim) + scalar_product_mat(aim, log_mat(tn.matmul(matK, alpha)))
alpha = vecA(g, ent)
s = 0
n = aim.shape[0]
matKalpha = log_mat(tn.matmul(matK, alpha))
for i in range(n):
s += aim[i]*matKalpha[i]
a = e_vec(aim) + s
return ent * a
def grad_f_dual(g, aim, ent, matK):
......
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 17 13:57:40 2023
@author: rcornill
"""
import pytest
import numpy as np
import torch as tn
from wdnmf_dual.methods.base_function import *
class TestClass:
def test_log_mat1():
x = tn.zeros(2, 3, dtype=tn.float64)
v = tn.ones(2, 3, dtype=tn.float64)
assert tn.all(x == log_mat(v))
def test_log_mat2():
x = tn.ones(2, 3, dtype=tn.float64)
v = tn.zeros(2, 3, dtype=tn.float64)
x[1, 1] = 2
v[1, 1] = np.log(2)
assert tn.all(x == log_mat(v))
def test_simplex_prox_mat_projection():
x = tn.ones(2, 3, dtype=tn.float64)
x = x/sum(x)
assert tn.all(x == simplex_prox_mat(x))
def test_simplex_prox_mat1(): # La projection se fait selon l2 et non l1 ici!
x = tn.tensor([[1, 2, 0], [3, 2, 1]], dtype=tn.float64)
w = tn.tensor([[0., 0.5, 0], [1, 0.5, 1]], dtype=tn.float64)
assert tn.all(w == simplex_prox_mat(x))
def test_simplex_norm_projection():
x = tn.ones(2, 3, dtype=tn.float64)
x = x/sum(x)
assert tn.all(x == simplex_norm(x))
def test_simplex_norm1(): # La projection se fait selon l1 ici!
x = tn.tensor([[1, 2, 0], [3, 2, 1]], dtype=tn.float64)
w = tn.tensor([[0.25, 0.5, 0], [0.75, 0.5, 1]], dtype=tn.float64)
assert tn.all(w == simplex_norm(x))
def test_e_vec():
x = tn.tensor([0.5, 1, 0.1], dtype=tn.float64)
w = 0.5*np.log(0.5) + 0 + 0.1*np.log(0.1)
assert tn.all(e_vec(x)==w)
def test_e_mat():
x = tn.tensor([[0.5, 1, 0.1]], dtype=tn.float64)
w = 0.5*np.log(0.5) + 0 + 0.1*np.log(0.1)
assert tn.all(e_mat(x)==w)
def test_e_dual():
x = tn.tensor([1, 0, 0.5], dtype=tn.float64)
w = -np.log(np.e + 1 + np.exp(0.5))
assert tn.all(e_dual(x)==w)
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment