...
 
Commits (2)
......@@ -56,7 +56,7 @@ All of these results are for a single model with no data augmentation (I never f
There are several reasons for that:
* This is a reimplementation which features slightly different architectures than the original paper.
* Whereas the results of the NIPS paper were obtained by optimizing hyper-parameters on a validation set, before reporting results on the test set (which results in about 0.4% loss in accuracy compared to optimizing hyper-parameters on the test set), this is not the case here. If you want to report the accuracy of the CKNs in your papers, please use the numbers provided in the NIPS paper (unless you are yourself doing experiments without a proper train/val or cross-validation split).
* Whereas the results of the NIPS paper were obtained by optimizing hyper-parameters on a validation set, before reporting results on the test set (which results in about 0.4% loss in accuracy compared to optimizing hyper-parameters on the test set), this is not the case here. If you want to report the accuracy of the CKNs in your papers, please use the numbers provided in the NIPS paper.
[1]: https://hal.inria.fr/hal-01387399/document
......@@ -24,8 +24,10 @@ nepochs=105;
%train_ckn_supervised_gpu(npatches,subsampling,nfilters,sigmas,type_kernel,zero_pad,centering,whitening,type_learning_init,lambda,lambda2,alt_optim,it_eval,init_rate,ndecrease_rate,nepochs,device,threads,dataset);
% produces about 90.5% accuracy on cifar, with single model and no data augmentation.
npatches=[3 1 3 1 3];
subsampling=[2 1 2 1 3];
nfilters=[128 128 128 128 128];
type_kernel=[0 1 0 1 0]
train_ckn_supervised_gpu(npatches,subsampling,nfilters,sigmas,type_kernel,zero_pad,centering,whitening,type_learning_init,lambda,lambda2,alt_optim,it_eval,init_rate,ndecrease_rate,nepochs,device,threads,dataset);
% npatches=[3 1 3 1 3];
% subsampling=[2 1 2 1 3];
% nfilters=[128 128 128 128 128];
% type_kernel=[0 1 0 1 0]
%ndecrease_rate=50;
% nepochs=155;
% train_ckn_supervised_gpu(npatches,subsampling,nfilters,sigmas,type_kernel,zero_pad,centering,whitening,type_learning_init,lambda,lambda2,alt_optim,it_eval,init_rate,ndecrease_rate,nepochs,device,threads,dataset);