SHOGUN  4.1.0
Kernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 1999-2008 Gunnar Raetsch
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
12 #include <shogun/lib/config.h>
13 #include <shogun/lib/common.h>
14 #include <shogun/io/SGIO.h>
15 #include <shogun/io/File.h>
16 #include <shogun/lib/Time.h>
17 #include <shogun/lib/Signal.h>
18 
19 #include <shogun/base/Parallel.h>
20 
21 #include <shogun/kernel/Kernel.h>
24 #include <shogun/base/Parameter.h>
25 
27 
28 #include <string.h>
29 #include <unistd.h>
31 
32 #ifdef HAVE_PTHREAD
33 #include <pthread.h>
34 #endif
35 
36 using namespace shogun;
37 
39 {
40  init();
42 }
43 
44 CKernel::CKernel(int32_t size) : CSGObject()
45 {
46  init();
47 
48  if (size<10)
49  size=10;
50 
51  cache_size=size;
53 }
54 
55 
56 CKernel::CKernel(CFeatures* p_lhs, CFeatures* p_rhs, int32_t size) : CSGObject()
57 {
58  init();
59 
60  if (size<10)
61  size=10;
62 
63  cache_size=size;
64 
66  init(p_lhs, p_rhs);
68 }
69 
71 {
72  if (get_is_initialized())
73  SG_ERROR("Kernel still initialized on destruction.\n")
74 
77 
78  SG_INFO("Kernel deleted (%p).\n", this)
79 }
80 
81 
82 
83 bool CKernel::init(CFeatures* l, CFeatures* r)
84 {
85  /* make sure that features are not deleted if same ones are used */
86  SG_REF(l);
87  SG_REF(r);
88 
89  //make sure features were indeed supplied
90  REQUIRE(l, "CKernel::init(%p, %p): Left hand side features required!\n", l, r)
91  REQUIRE(r, "CKernel::init(%p, %p): Right hand side features required!\n", l, r)
92 
93  //make sure features are compatible
94  if (l->support_compatible_class())
95  {
97  "Right hand side of features (%s) must be compatible with left hand side features (%s)\n",
98  l->get_name(), r->get_name());
99  }
100  else
101  {
103  "Right hand side of features (%s) must be compatible with left hand side features (%s)\n",
104  l->get_name(), r->get_name())
105  }
107 
108  //remove references to previous features
110 
111  //increase reference counts
112  SG_REF(l);
113  if (l==r)
114  lhs_equals_rhs=true;
115  else // l!=r
116  SG_REF(r);
117 
118  lhs=l;
119  rhs=r;
120 
123 
126 
127  /* unref "safety" refs from beginning */
128  SG_UNREF(r);
129  SG_UNREF(l);
130 
131  SG_DEBUG("leaving CKernel::init(%p, %p)\n", l, r)
132  return true;
133 }
134 
136 {
137  SG_REF(n);
138  if (lhs && rhs)
139  n->init(this);
140 
142  normalizer=n;
143 
144  return (normalizer!=NULL);
145 }
146 
148 {
150  return normalizer;
151 }
152 
154 {
155  return normalizer->init(this);
156 }
157 
159 {
161 }
162 
163 
164 
165 void CKernel::load(CFile* loader)
166 {
169 }
170 
171 void CKernel::save(CFile* writer)
172 {
173  SGMatrix<float64_t> k_matrix=get_kernel_matrix<float64_t>();
175  writer->set_matrix(k_matrix.matrix, k_matrix.num_rows, k_matrix.num_cols);
177 }
178 
180 {
181  SG_DEBUG("entering CKernel::remove_lhs_and_rhs\n")
182  if (rhs!=lhs)
183  SG_UNREF(rhs);
184  rhs = NULL;
185  num_rhs=0;
186 
187  SG_UNREF(lhs);
188  lhs = NULL;
189  num_lhs=0;
190  lhs_equals_rhs=false;
191 
192 
193  SG_DEBUG("leaving CKernel::remove_lhs_and_rhs\n")
194 }
195 
197 {
198  if (rhs==lhs)
199  rhs=NULL;
200  SG_UNREF(lhs);
201  lhs = NULL;
202  num_lhs=0;
203  lhs_equals_rhs=false;
204 
205 }
206 
209 {
210  if (rhs!=lhs)
211  SG_UNREF(rhs);
212  rhs = NULL;
213  num_rhs=0;
214  lhs_equals_rhs=false;
215 
216 
217 }
218 
219 #define ENUM_CASE(n) case n: SG_INFO(#n " ") break;
220 
222 {
223  SG_INFO("%p - \"%s\" weight=%1.2f OPT:%s", this, get_name(),
225  get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" :
226  "SLOWBUTMEMEFFICIENT");
227 
228  switch (get_kernel_type())
229  {
291  }
292 
293  switch (get_feature_class())
294  {
305  ENUM_CASE(C_WD)
317  }
318 
319  switch (get_feature_type())
320  {
335  }
336  SG_INFO("\n")
337 }
338 #undef ENUM_CASE
339 
341  int32_t count, int32_t *IDX, float64_t * weights)
342 {
343  SG_ERROR("kernel does not support linadd optimization\n")
344  return false ;
345 }
346 
348 {
349  SG_ERROR("kernel does not support linadd optimization\n")
350  return false;
351 }
352 
354 {
355  SG_ERROR("kernel does not support linadd optimization\n")
356  return 0;
357 }
358 
360  int32_t num_vec, int32_t* vec_idx, float64_t* target, int32_t num_suppvec,
361  int32_t* IDX, float64_t* weights, float64_t factor)
362 {
363  SG_ERROR("kernel does not support batch computation\n")
364 }
365 
366 void CKernel::add_to_normal(int32_t vector_idx, float64_t weight)
367 {
368  SG_ERROR("kernel does not support linadd optimization, add_to_normal not implemented\n")
369 }
370 
372 {
373  SG_ERROR("kernel does not support linadd optimization, clear_normal not implemented\n")
374 }
375 
377 {
378  return 1;
379 }
380 
382  int32_t vector_idx, float64_t * subkernel_contrib)
383 {
384  SG_ERROR("kernel compute_by_subkernel not implemented\n")
385 }
386 
387 const float64_t* CKernel::get_subkernel_weights(int32_t &num_weights)
388 {
389  num_weights=1 ;
390  return &combined_kernel_weight ;
391 }
392 
394 {
395  int num_weights = 1;
396  const float64_t* weight = get_subkernel_weights(num_weights);
397  return SGVector<float64_t>(const_cast<float64_t*>(weight),1,false);
398 }
399 
401 {
402  ASSERT(weights.vector)
403  if (weights.vlen!=1)
404  SG_ERROR("number of subkernel weights should be one ...\n")
405 
406  combined_kernel_weight = weights.vector[0] ;
407 }
408 
410 {
411  if (kernel)
412  {
413  CKernel* casted=dynamic_cast<CKernel*>(kernel);
414  REQUIRE(casted, "CKernel::obtain_from_generic(): Error, provided object"
415  " of class \"%s\" is not a subclass of CKernel!\n",
416  kernel->get_name());
417  return casted;
418  }
419  else
420  return NULL;
421 }
422 
424 {
425  int32_t num_suppvec=svm->get_num_support_vectors();
426  int32_t* sv_idx=SG_MALLOC(int32_t, num_suppvec);
427  float64_t* sv_weight=SG_MALLOC(float64_t, num_suppvec);
428 
429  for (int32_t i=0; i<num_suppvec; i++)
430  {
431  sv_idx[i] = svm->get_support_vector(i);
432  sv_weight[i] = svm->get_alpha(i);
433  }
434  bool ret = init_optimization(num_suppvec, sv_idx, sv_weight);
435 
436  SG_FREE(sv_idx);
437  SG_FREE(sv_weight);
438  return ret;
439 }
440 
442 {
444  if (lhs_equals_rhs)
445  rhs=lhs;
446 }
447 
449 {
451 
452  if (lhs_equals_rhs)
453  rhs=NULL;
454 }
455 
457 {
459 
460  if (lhs_equals_rhs)
461  rhs=lhs;
462 }
463 
465  SG_ADD(&cache_size, "cache_size",
466  "Cache size in MB.", MS_NOT_AVAILABLE);
467  SG_ADD((CSGObject**) &lhs, "lhs",
468  "Feature vectors to occur on left hand side.", MS_NOT_AVAILABLE);
469  SG_ADD((CSGObject**) &rhs, "rhs",
470  "Feature vectors to occur on right hand side.", MS_NOT_AVAILABLE);
471  SG_ADD(&lhs_equals_rhs, "lhs_equals_rhs",
472  "If features on lhs are the same as on rhs.", MS_NOT_AVAILABLE);
473  SG_ADD(&num_lhs, "num_lhs", "Number of feature vectors on left hand side.",
475  SG_ADD(&num_rhs, "num_rhs", "Number of feature vectors on right hand side.",
477  SG_ADD(&combined_kernel_weight, "combined_kernel_weight",
478  "Combined kernel weight.", MS_AVAILABLE);
479  SG_ADD(&optimization_initialized, "optimization_initialized",
480  "Optimization is initialized.", MS_NOT_AVAILABLE);
481  SG_ADD((machine_int_t*) &opt_type, "opt_type",
482  "Optimization type.", MS_NOT_AVAILABLE);
483  SG_ADD(&properties, "properties", "Kernel properties.", MS_NOT_AVAILABLE);
484  SG_ADD((CSGObject**) &normalizer, "normalizer", "Normalize the kernel.",
485  MS_AVAILABLE);
486 }
487 
488 
489 void CKernel::init()
490 {
491  cache_size=10;
492  kernel_matrix=NULL;
493  lhs=NULL;
494  rhs=NULL;
495  num_lhs=0;
496  num_rhs=0;
497  lhs_equals_rhs=false;
502  normalizer=NULL;
503 
504 
505 
507 }
508 
509 namespace shogun
510 {
512 template <class T> struct K_THREAD_PARAM
513 {
517  int32_t start;
519  int32_t end;
521  int64_t total_start;
523  int64_t total_end;
525  int32_t m;
527  int32_t n;
529  T* result;
531  bool symmetric;
533  bool verbose;
534 };
535 }
536 
538  bool no_diag)
539 {
540  SG_DEBUG("Entering\n");
541 
542  REQUIRE(has_features(), "No features assigned to kernel\n")
543  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
544  REQUIRE(block_begin>=0 && block_begin<num_rhs,
545  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
546  REQUIRE(block_begin+block_size<=num_rhs,
547  "Invalid block size (%d) at starting index (%d, %d)! "
548  "Please use smaller blocks!", block_size, block_begin, block_begin)
549  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
550 
551  float64_t sum=0.0;
552 
553  // since the block is symmetric with main diagonal inside, we can save half
554  // the computation with using only the upper triangular part.
555  // this can be done in parallel
556 #pragma omp parallel for
557  for (index_t i=0; i<block_size; ++i)
558  {
559  // compute the kernel values on the upper triangular part of the kernel
560  // matrix and compute sum on the fly
561  for (index_t j=i+1; j<block_size; ++j)
562  {
563  float64_t k=kernel(i+block_begin, j+block_begin);
564 #pragma omp atomic
565  sum+=k;
566  }
567  }
568 
569  // the actual sum would be twice of what we computed
570  sum*=2;
571 
572  // add the diagonal elements if required - keeping this check
573  // outside of the loop to save cycles
574  if (!no_diag)
575  {
576 #pragma omp parallel for
577  for (index_t i=0; i<block_size; ++i)
578  {
579  float64_t diag=kernel(i+block_begin, i+block_begin);
580 #pragma omp atomic
581  sum+=diag;
582  }
583  }
584 
585  SG_DEBUG("Leaving\n");
586 
587  return sum;
588 }
589 
590 float64_t CKernel::sum_block(index_t block_begin_row, index_t block_begin_col,
591  index_t block_size_row, index_t block_size_col, bool no_diag)
592 {
593  SG_DEBUG("Entering\n");
594 
595  REQUIRE(has_features(), "No features assigned to kernel\n")
596  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
597  block_begin_col>=0 && block_begin_col<num_rhs,
598  "Invalid block begin index (%d, %d)!\n",
599  block_begin_row, block_begin_col)
600  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
601  block_begin_col+block_size_col<=num_rhs,
602  "Invalid block size (%d, %d) at starting index (%d, %d)! "
603  "Please use smaller blocks!", block_size_row, block_size_col,
604  block_begin_row, block_begin_col)
605  REQUIRE(block_size_row>=1 && block_size_col>=1,
606  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
607 
608  // check if removal of diagonal is required/valid
609  if (no_diag && block_size_row!=block_size_col)
610  {
611  SG_WARNING("Not removing the main diagonal since block is not square!\n");
612  no_diag=false;
613  }
614 
615  float64_t sum=0.0;
616 
617  // this can be done in parallel for the rows/cols
618 #pragma omp parallel for
619  for (index_t i=0; i<block_size_row; ++i)
620  {
621  // compute the kernel values and compute sum on the fly
622  for (index_t j=0; j<block_size_col; ++j)
623  {
624  float64_t k=no_diag && i==j ? 0 :
625  kernel(i+block_begin_row, j+block_begin_col);
626 #pragma omp atomic
627  sum+=k;
628  }
629  }
630 
631  SG_DEBUG("Leaving\n");
632 
633  return sum;
634 }
635 
637  index_t block_size, bool no_diag)
638 {
639  SG_DEBUG("Entering\n");
640 
641  REQUIRE(has_features(), "No features assigned to kernel\n")
642  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
643  REQUIRE(block_begin>=0 && block_begin<num_rhs,
644  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
645  REQUIRE(block_begin+block_size<=num_rhs,
646  "Invalid block size (%d) at starting index (%d, %d)! "
647  "Please use smaller blocks!", block_size, block_begin, block_begin)
648  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
649 
650  // initialize the vector that accumulates the row/col-wise sum on the go
651  SGVector<float64_t> row_sum(block_size);
652  row_sum.set_const(0.0);
653 
654  // since the block is symmetric with main diagonal inside, we can save half
655  // the computation with using only the upper triangular part.
656  // this can be done in parallel for the rows/cols
657 #pragma omp parallel for
658  for (index_t i=0; i<block_size; ++i)
659  {
660  // compute the kernel values on the upper triangular part of the kernel
661  // matrix and compute row-wise sum on the fly
662  for (index_t j=i+1; j<block_size; ++j)
663  {
664  float64_t k=kernel(i+block_begin, j+block_begin);
665 #pragma omp critical
666  {
667  row_sum[i]+=k;
668  row_sum[j]+=k;
669  }
670  }
671  }
672 
673  // add the diagonal elements if required - keeping this check
674  // outside of the loop to save cycles
675  if (!no_diag)
676  {
677 #pragma omp parallel for
678  for (index_t i=0; i<block_size; ++i)
679  {
680  float64_t diag=kernel(i+block_begin, i+block_begin);
681  row_sum[i]+=diag;
682  }
683  }
684 
685  SG_DEBUG("Leaving\n");
686 
687  return row_sum;
688 }
689 
691  block_begin, index_t block_size, bool no_diag)
692 {
693  SG_DEBUG("Entering\n");
694 
695  REQUIRE(has_features(), "No features assigned to kernel\n")
696  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
697  REQUIRE(block_begin>=0 && block_begin<num_rhs,
698  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
699  REQUIRE(block_begin+block_size<=num_rhs,
700  "Invalid block size (%d) at starting index (%d, %d)! "
701  "Please use smaller blocks!", block_size, block_begin, block_begin)
702  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
703 
704  // initialize the matrix that accumulates the row/col-wise sum on the go
705  // the first column stores the sum of kernel values
706  // the second column stores the sum of squared kernel values
707  SGMatrix<float64_t> row_sum(block_size, 2);
708  row_sum.set_const(0.0);
709 
710  // since the block is symmetric with main diagonal inside, we can save half
711  // the computation with using only the upper triangular part
712  // this can be done in parallel for the rows/cols
713 #pragma omp parallel for
714  for (index_t i=0; i<block_size; ++i)
715  {
716  // compute the kernel values on the upper triangular part of the kernel
717  // matrix and compute row-wise sum and squared sum on the fly
718  for (index_t j=i+1; j<block_size; ++j)
719  {
720  float64_t k=kernel(i+block_begin, j+block_begin);
721 #pragma omp critical
722  {
723  row_sum(i, 0)+=k;
724  row_sum(j, 0)+=k;
725  row_sum(i, 1)+=k*k;
726  row_sum(j, 1)+=k*k;
727  }
728  }
729  }
730 
731  // add the diagonal elements if required - keeping this check
732  // outside of the loop to save cycles
733  if (!no_diag)
734  {
735 #pragma omp parallel for
736  for (index_t i=0; i<block_size; ++i)
737  {
738  float64_t diag=kernel(i+block_begin, i+block_begin);
739  row_sum(i, 0)+=diag;
740  row_sum(i, 1)+=diag*diag;
741  }
742  }
743 
744  SG_DEBUG("Leaving\n");
745 
746  return row_sum;
747 }
748 
750  index_t block_begin_col, index_t block_size_row,
751  index_t block_size_col, bool no_diag)
752 {
753  SG_DEBUG("Entering\n");
754 
755  REQUIRE(has_features(), "No features assigned to kernel\n")
756  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
757  block_begin_col>=0 && block_begin_col<num_rhs,
758  "Invalid block begin index (%d, %d)!\n",
759  block_begin_row, block_begin_col)
760  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
761  block_begin_col+block_size_col<=num_rhs,
762  "Invalid block size (%d, %d) at starting index (%d, %d)! "
763  "Please use smaller blocks!", block_size_row, block_size_col,
764  block_begin_row, block_begin_col)
765  REQUIRE(block_size_row>=1 && block_size_col>=1,
766  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
767 
768  // check if removal of diagonal is required/valid
769  if (no_diag && block_size_row!=block_size_col)
770  {
771  SG_WARNING("Not removing the main diagonal since block is not square!\n");
772  no_diag=false;
773  }
774 
775  // initialize the vector that accumulates the row/col-wise sum on the go
776  // the first block_size_row entries store the row-wise sum of kernel values
777  // the nextt block_size_col entries store the col-wise sum of kernel values
778  SGVector<float64_t> sum(block_size_row+block_size_col);
779  sum.set_const(0.0);
780 
781  // this can be done in parallel for the rows/cols
782 #pragma omp parallel for
783  for (index_t i=0; i<block_size_row; ++i)
784  {
785  // compute the kernel values and compute sum on the fly
786  for (index_t j=0; j<block_size_col; ++j)
787  {
788  float64_t k=no_diag && i==j ? 0 :
789  kernel(i+block_begin_row, j+block_begin_col);
790 #pragma omp critical
791  {
792  sum[i]+=k;
793  sum[j+block_size_row]+=k;
794  }
795  }
796  }
797 
798  SG_DEBUG("Leaving\n");
799 
800  return sum;
801 }
802 
803 template <class T> void* CKernel::get_kernel_matrix_helper(void* p)
804 {
805  K_THREAD_PARAM<T>* params= (K_THREAD_PARAM<T>*) p;
806  int32_t i_start=params->start;
807  int32_t i_end=params->end;
808  CKernel* k=params->kernel;
809  T* result=params->result;
810  bool symmetric=params->symmetric;
811  int32_t n=params->n;
812  int32_t m=params->m;
813  bool verbose=params->verbose;
814  int64_t total_start=params->total_start;
815  int64_t total_end=params->total_end;
816  int64_t total=total_start;
817 
818  for (int32_t i=i_start; i<i_end; i++)
819  {
820  int32_t j_start=0;
821 
822  if (symmetric)
823  j_start=i;
824 
825  for (int32_t j=j_start; j<n; j++)
826  {
827  float64_t v=k->kernel(i,j);
828  result[i+j*m]=v;
829 
830  if (symmetric && i!=j)
831  result[j+i*m]=v;
832 
833  if (verbose)
834  {
835  total++;
836 
837  if (symmetric && i!=j)
838  total++;
839 
840  if (total%100 == 0)
841  SG_OBJ_PROGRESS(k, total, total_start, total_end)
842 
844  break;
845  }
846  }
847 
848  }
849 
850  return NULL;
851 }
852 
853 template <class T>
855 {
856  T* result = NULL;
857 
858  REQUIRE(has_features(), "no features assigned to kernel\n")
859 
860  int32_t m=get_num_vec_lhs();
861  int32_t n=get_num_vec_rhs();
862 
863  int64_t total_num = int64_t(m)*n;
864 
865  // if lhs == rhs and sizes match assume k(i,j)=k(j,i)
866  bool symmetric= (lhs && lhs==rhs && m==n);
867 
868  SG_DEBUG("returning kernel matrix of size %dx%d\n", m, n)
869 
870  result=SG_MALLOC(T, total_num);
871 
872  int32_t num_threads=parallel->get_num_threads();
873  if (num_threads < 2)
874  {
875  K_THREAD_PARAM<T> params;
876  params.kernel=this;
877  params.result=result;
878  params.start=0;
879  params.end=m;
880  params.total_start=0;
881  params.total_end=total_num;
882  params.n=n;
883  params.m=m;
884  params.symmetric=symmetric;
885  params.verbose=true;
886  get_kernel_matrix_helper<T>((void*) &params);
887  }
888  else
889  {
890  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
891  K_THREAD_PARAM<T>* params = SG_MALLOC(K_THREAD_PARAM<T>, num_threads);
892  int64_t step= total_num/num_threads;
893 
894  int32_t t;
895 
896  num_threads--;
897  for (t=0; t<num_threads; t++)
898  {
899  params[t].kernel = this;
900  params[t].result = result;
901  params[t].start = compute_row_start(t*step, n, symmetric);
902  params[t].end = compute_row_start((t+1)*step, n, symmetric);
903  params[t].total_start=t*step;
904  params[t].total_end=(t+1)*step;
905  params[t].n=n;
906  params[t].m=m;
907  params[t].symmetric=symmetric;
908  params[t].verbose=false;
909 
910  int code=pthread_create(&threads[t], NULL,
911  CKernel::get_kernel_matrix_helper<T>, (void*)&params[t]);
912 
913  if (code != 0)
914  {
915  SG_WARNING("Thread creation failed (thread %d of %d) "
916  "with error:'%s'\n",t, num_threads, strerror(code));
917  num_threads=t;
918  break;
919  }
920  }
921 
922  params[t].kernel = this;
923  params[t].result = result;
924  params[t].start = compute_row_start(t*step, n, symmetric);
925  params[t].end = m;
926  params[t].total_start=t*step;
927  params[t].total_end=total_num;
928  params[t].n=n;
929  params[t].m=m;
930  params[t].symmetric=symmetric;
931  params[t].verbose=true;
932  get_kernel_matrix_helper<T>(&params[t]);
933 
934  for (t=0; t<num_threads; t++)
935  {
936  if (pthread_join(threads[t], NULL) != 0)
937  SG_WARNING("pthread_join of thread %d/%d failed\n", t, num_threads)
938  }
939 
940  SG_FREE(params);
941  SG_FREE(threads);
942  }
943 
944  SG_DONE()
945 
946  return SGMatrix<T>(result,m,n,true);
947 }
948 
949 
950 template SGMatrix<float64_t> CKernel::get_kernel_matrix<float64_t>();
951 template SGMatrix<float32_t> CKernel::get_kernel_matrix<float32_t>();
952 
953 template void* CKernel::get_kernel_matrix_helper<float64_t>(void* p);
954 template void* CKernel::get_kernel_matrix_helper<float32_t>(void* p);
955 
virtual void clear_normal()
Definition: Kernel.cpp:371
virtual const char * get_name() const =0
virtual void load_serializable_post()
Definition: Kernel.cpp:441
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:83
virtual bool support_compatible_class() const
Definition: Features.h:323
int32_t compute_row_start(int64_t offs, int32_t n, bool symmetric)
Definition: Kernel.h:801
#define SG_INFO(...)
Definition: SGIO.h:118
virtual void cleanup()
Definition: Kernel.cpp:158
#define SG_RESET_LOCALE
Definition: SGIO.h:86
#define SG_DONE()
Definition: SGIO.h:157
virtual void set_matrix(const bool *matrix, int32_t num_feat, int32_t num_vec)
Definition: File.cpp:126
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
Definition: Kernel.cpp:381
virtual bool get_feature_class_compatibility(EFeatureClass rhs) const
Definition: Features.cpp:355
int32_t get_num_threads() const
Definition: Parallel.cpp:64
int32_t index_t
Definition: common.h:62
int32_t num_rhs
number of feature vectors on right hand side
Definition: Kernel.h:885
static void * get_kernel_matrix_helper(void *p)
Definition: Kernel.cpp:803
Class ShogunException defines an exception which is thrown whenever an error inside of shogun occurs...
virtual bool set_normalizer(CKernelNormalizer *normalizer)
Definition: Kernel.cpp:135
virtual float64_t sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:590
virtual int32_t get_num_vectors() const =0
virtual void save_serializable_pre()
Definition: SGObject.cpp:1068
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
virtual bool delete_optimization()
Definition: Kernel.cpp:347
float64_t kernel(int32_t idx_a, int32_t idx_b)
Definition: Kernel.h:203
#define ENUM_CASE(n)
Definition: Kernel.cpp:219
uint64_t properties
Definition: Kernel.h:898
Parallel * parallel
Definition: SGObject.h:499
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
Definition: Kernel.cpp:208
virtual int32_t get_num_vec_lhs()
Definition: Kernel.h:513
SGMatrix< float64_t > get_kernel_matrix()
Definition: Kernel.h:216
#define SG_REF(x)
Definition: SGObject.h:51
#define SG_SET_LOCALE_C
Definition: SGIO.h:85
int32_t cache_size
cache_size in MB
Definition: Kernel.h:866
bool get_is_initialized()
Definition: Kernel.h:635
virtual SGMatrix< float64_t > row_wise_sum_squared_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:690
float64_t combined_kernel_weight
Definition: Kernel.h:888
virtual void register_params()
Definition: Kernel.cpp:464
void save(CFile *writer)
Definition: Kernel.cpp:171
virtual void remove_lhs_and_rhs()
Definition: Kernel.cpp:179
virtual CKernelNormalizer * get_normalizer()
Definition: Kernel.cpp:147
#define ASSERT(x)
Definition: SGIO.h:201
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:112
virtual SGVector< float64_t > row_col_wise_sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:749
#define SG_OBJ_PROGRESS(o,...)
Definition: SGIO.h:147
virtual float64_t sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:537
virtual SGVector< float64_t > get_subkernel_weights()
Definition: Kernel.cpp:393
double float64_t
Definition: common.h:50
virtual EFeatureType get_feature_type()=0
KERNELCACHE_ELEM * kernel_matrix
Definition: Kernel.h:872
A File access base class.
Definition: File.h:34
virtual void save_serializable_post()
Definition: Kernel.cpp:456
virtual float64_t compute_optimized(int32_t vector_idx)
Definition: Kernel.cpp:353
EOptimizationType get_optimization_type()
Definition: Kernel.h:623
index_t num_rows
Definition: SGMatrix.h:376
virtual void save_serializable_post()
Definition: SGObject.cpp:1073
void list_kernel()
Definition: Kernel.cpp:221
float64_t get_alpha(int32_t idx)
float64_t get_combined_kernel_weight()
Definition: Kernel.h:684
virtual SGVector< float64_t > row_wise_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:636
virtual EFeatureClass get_feature_class() const =0
Identity Kernel Normalization, i.e. no normalization is applied.
index_t num_cols
Definition: SGMatrix.h:378
int32_t num_lhs
number of feature vectors on left hand side
Definition: Kernel.h:883
The class Kernel Normalizer defines a function to post-process kernel values.
int32_t get_support_vector(int32_t idx)
static bool cancel_computations()
Definition: Signal.h:86
virtual int32_t get_num_vec_rhs()
Definition: Kernel.h:522
virtual void set_subkernel_weights(SGVector< float64_t > weights)
Definition: Kernel.cpp:400
void set_const(T const_elem)
Definition: SGVector.cpp:152
virtual bool init_normalizer()
Definition: Kernel.cpp:153
bool optimization_initialized
Definition: Kernel.h:891
EOptimizationType opt_type
Definition: Kernel.h:895
void load(CFile *loader)
Definition: Kernel.cpp:165
virtual void load_serializable_post()
Definition: SGObject.cpp:1063
CFeatures * rhs
feature vectors to occur on right hand side
Definition: Kernel.h:877
static CKernel * obtain_from_generic(CSGObject *kernel)
Definition: Kernel.cpp:409
#define SG_UNREF(x)
Definition: SGObject.h:52
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual bool init(CKernel *k)=0
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
Definition: Kernel.cpp:359
bool lhs_equals_rhs
lhs
Definition: Kernel.h:880
int machine_int_t
Definition: common.h:59
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
Definition: Kernel.cpp:340
CFeatures * lhs
feature vectors to occur on left hand side
Definition: Kernel.h:875
The class Features is the base class of all feature objects.
Definition: Features.h:68
virtual void save_serializable_pre()
Definition: Kernel.cpp:448
virtual void remove_lhs()
Definition: Kernel.cpp:196
virtual int32_t get_num_subkernels()
Definition: Kernel.cpp:376
bool init_optimization_svm(CSVM *svm)
Definition: Kernel.cpp:423
A generic Support Vector Machine Interface.
Definition: SVM.h:49
The Kernel base class.
Definition: Kernel.h:155
CKernelNormalizer * normalizer
Definition: Kernel.h:902
void set_const(T const_elem)
Definition: SGMatrix.cpp:133
#define SG_WARNING(...)
Definition: SGIO.h:128
#define SG_ADD(...)
Definition: SGObject.h:81
virtual bool has_features()
Definition: Kernel.h:531
virtual ~CKernel()
Definition: Kernel.cpp:70
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
Definition: Kernel.cpp:366
virtual EFeatureType get_feature_type() const =0
index_t vlen
Definition: SGVector.h:494
virtual EFeatureClass get_feature_class()=0

SHOGUN Machine Learning Toolbox - Documentation