SHOGUN  6.1.3
Kernel.cpp
Go to the documentation of this file.
1 /*
2  * EXCEPT FOR THE KERNEL CACHING FUNCTIONS WHICH ARE (W) THORSTEN JOACHIMS
3  * COPYRIGHT (C) 1999 UNIVERSITAET DORTMUND - ALL RIGHTS RESERVED
4  *
5  * this program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 3 of the License, or
8  * (at your option) any later version.
9  *
10  * Written (W) 1999-2009 Soeren Sonnenburg
11  * Written (W) 1999-2008 Gunnar Raetsch
12  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
13  */
14 
15 #include <shogun/base/progress.h>
16 #include <shogun/io/File.h>
17 #include <shogun/io/SGIO.h>
18 #include <shogun/lib/Signal.h>
19 #include <shogun/lib/Time.h>
20 #include <shogun/lib/common.h>
21 #include <shogun/lib/config.h>
22 
23 #include <shogun/base/Parallel.h>
24 
25 #include <shogun/kernel/Kernel.h>
28 #include <shogun/base/Parameter.h>
29 
31 
32 #include <string.h>
33 #ifndef _WIN32
34 #include <unistd.h>
35 #endif
37 
38 using namespace shogun;
39 
41 {
42  init();
44 }
45 
46 CKernel::CKernel(int32_t size) : CSGObject()
47 {
48  init();
49 
50  if (size<10)
51  size=10;
52 
53  cache_size=size;
55 }
56 
57 
58 CKernel::CKernel(CFeatures* p_lhs, CFeatures* p_rhs, int32_t size) : CSGObject()
59 {
60  init();
61 
62  if (size<10)
63  size=10;
64 
65  cache_size=size;
66 
68  init(p_lhs, p_rhs);
70 }
71 
73 {
74  if (get_is_initialized())
75  SG_ERROR("Kernel still initialized on destruction.\n")
76 
79 
80  SG_INFO("Kernel deleted (%p).\n", this)
81 }
82 
83 #ifdef USE_SVMLIGHT
84 void CKernel::resize_kernel_cache(KERNELCACHE_IDX size, bool regression_hack)
85 {
86  if (size<10)
87  size=10;
88 
90  cache_size=size;
91 
92  if (has_features() && get_num_vec_lhs())
93  kernel_cache_init(cache_size, regression_hack);
94 }
95 #endif //USE_SVMLIGHT
96 
97 bool CKernel::init(CFeatures* l, CFeatures* r)
98 {
99  /* make sure that features are not deleted if same ones are used */
100  SG_REF(l);
101  SG_REF(r);
102 
103  //make sure features were indeed supplied
104  REQUIRE(l, "CKernel::init(%p, %p): Left hand side features required!\n", l, r)
105  REQUIRE(r, "CKernel::init(%p, %p): Right hand side features required!\n", l, r)
106 
107  //make sure features are compatible
108  if (l->support_compatible_class())
109  {
111  "Right hand side of features (%s) must be compatible with left hand side features (%s)\n",
112  l->get_name(), r->get_name());
113  }
114  else
115  {
117  "Right hand side of features (%s) must be compatible with left hand side features (%s)\n",
118  l->get_name(), r->get_name())
119  }
121 
122  //remove references to previous features
124 
125  //increase reference counts
126  SG_REF(l);
127  if (l==r)
128  lhs_equals_rhs=true;
129  else // l!=r
130  SG_REF(r);
131 
132  lhs=l;
133  rhs=r;
134 
137 
140 
141  /* unref "safety" refs from beginning */
142  SG_UNREF(r);
143  SG_UNREF(l);
144 
145  SG_DEBUG("leaving CKernel::init(%p, %p)\n", l, r)
146  return true;
147 }
148 
150 {
151  SG_REF(n);
152  if (lhs && rhs)
153  n->init(this);
154 
156  normalizer=n;
157 
158  return (normalizer!=NULL);
159 }
160 
162 {
164  return normalizer;
165 }
166 
168 {
169  return normalizer->init(this);
170 }
171 
173 {
175 }
176 
177 #ifdef USE_SVMLIGHT
178 /****************************** Cache handling *******************************/
179 
180 void CKernel::kernel_cache_init(int32_t buffsize, bool regression_hack)
181 {
182  int32_t totdoc=get_num_vec_lhs();
183  if (totdoc<=0)
184  {
185  SG_ERROR("kernel has zero rows: num_lhs=%d num_rhs=%d\n",
187  }
188  uint64_t buffer_size=0;
189  int32_t i;
190 
191  //in regression the additional constraints are made by doubling the training data
192  if (regression_hack)
193  totdoc*=2;
194 
195  buffer_size=((uint64_t) buffsize)*1024*1024/sizeof(KERNELCACHE_ELEM);
196  if (buffer_size>((uint64_t) totdoc)*totdoc)
197  buffer_size=((uint64_t) totdoc)*totdoc;
198 
199  SG_INFO("using a kernel cache of size %lld MB (%lld bytes) for %s Kernel\n", buffer_size*sizeof(KERNELCACHE_ELEM)/1024/1024, buffer_size*sizeof(KERNELCACHE_ELEM), get_name())
200 
201  //make sure it fits in the *signed* KERNELCACHE_IDX type
202  ASSERT(buffer_size < (((uint64_t) 1) << (sizeof(KERNELCACHE_IDX)*8-1)))
203 
204  kernel_cache.index = SG_MALLOC(int32_t, totdoc);
205  kernel_cache.occu = SG_MALLOC(int32_t, totdoc);
206  kernel_cache.lru = SG_MALLOC(int32_t, totdoc);
207  kernel_cache.invindex = SG_MALLOC(int32_t, totdoc);
208  kernel_cache.active2totdoc = SG_MALLOC(int32_t, totdoc);
209  kernel_cache.totdoc2active = SG_MALLOC(int32_t, totdoc);
210  kernel_cache.buffer = SG_MALLOC(KERNELCACHE_ELEM, buffer_size);
211  kernel_cache.buffsize=buffer_size;
212  kernel_cache.max_elems=(int32_t) (kernel_cache.buffsize/totdoc);
213 
214  if(kernel_cache.max_elems>totdoc) {
215  kernel_cache.max_elems=totdoc;
216  }
217 
218  kernel_cache.elems=0; // initialize cache
219  for(i=0;i<totdoc;i++) {
220  kernel_cache.index[i]=-1;
221  kernel_cache.lru[i]=0;
222  }
223  for(i=0;i<totdoc;i++) {
224  kernel_cache.occu[i]=0;
225  kernel_cache.invindex[i]=-1;
226  }
227 
228  kernel_cache.activenum=totdoc;;
229  for(i=0;i<totdoc;i++) {
230  kernel_cache.active2totdoc[i]=i;
231  kernel_cache.totdoc2active[i]=i;
232  }
233 
234  kernel_cache.time=0;
235 }
236 
238  int32_t docnum, int32_t *active2dnum, float64_t *buffer, bool full_line)
239 {
240  int32_t i,j;
241  KERNELCACHE_IDX start;
242 
243  int32_t num_vectors = get_num_vec_lhs();
244  if (docnum>=num_vectors)
245  docnum=2*num_vectors-1-docnum;
246 
247  /* is cached? */
248  if(kernel_cache.index[docnum] != -1)
249  {
250  kernel_cache.lru[kernel_cache.index[docnum]]=kernel_cache.time; /* lru */
251  start=((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[docnum];
252 
253  if (full_line)
254  {
255  for(j=0;j<get_num_vec_lhs();j++)
256  {
257  if(kernel_cache.totdoc2active[j] >= 0)
258  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
259  else
260  buffer[j]=(float64_t) kernel(docnum, j);
261  }
262  }
263  else
264  {
265  for(i=0;(j=active2dnum[i])>=0;i++)
266  {
267  if(kernel_cache.totdoc2active[j] >= 0)
268  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
269  else
270  {
271  int32_t k=j;
272  if (k>=num_vectors)
273  k=2*num_vectors-1-k;
274  buffer[j]=(float64_t) kernel(docnum, k);
275  }
276  }
277  }
278  }
279  else
280  {
281  if (full_line)
282  {
283  for(j=0;j<get_num_vec_lhs();j++)
284  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, j);
285  }
286  else
287  {
288  for(i=0;(j=active2dnum[i])>=0;i++)
289  {
290  int32_t k=j;
291  if (k>=num_vectors)
292  k=2*num_vectors-1-k;
293  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, k);
294  }
295  }
296  }
297 }
298 
299 
300 // Fills cache for the row m
302 {
303  register int32_t j,k,l;
304  register KERNELCACHE_ELEM *cache;
305 
306  int32_t num_vectors = get_num_vec_lhs();
307 
308  if (m>=num_vectors)
309  m=2*num_vectors-1-m;
310 
311  if(!kernel_cache_check(m)) // not cached yet
312  {
313  cache = kernel_cache_clean_and_malloc(m);
314  if(cache) {
315  l=kernel_cache.totdoc2active[m];
316 
317  for(j=0;j<kernel_cache.activenum;j++) // fill cache
318  {
319  k=kernel_cache.active2totdoc[j];
320 
321  if((kernel_cache.index[k] != -1) && (l != -1) && (k != m)) {
322  cache[j]=kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)
323  *kernel_cache.index[k]+l];
324  }
325  else
326  {
327  if (k>=num_vectors)
328  k=2*num_vectors-1-k;
329 
330  cache[j]=kernel(m, k);
331  }
332  }
333  }
334  else
335  perror("Error: Kernel cache full! => increase cache size");
336  }
337 }
338 
339 
340 void* CKernel::cache_multiple_kernel_row_helper(void* p)
341 {
342  int32_t j,k,l;
343  S_KTHREAD_PARAM* params = (S_KTHREAD_PARAM*) p;
344 
345  for (int32_t i=params->start; i<params->end; i++)
346  {
347  KERNELCACHE_ELEM* cache=params->cache[i];
348  int32_t m = params->uncached_rows[i];
349  l=params->kernel_cache->totdoc2active[m];
350 
351  for(j=0;j<params->kernel_cache->activenum;j++) // fill cache
352  {
353  k=params->kernel_cache->active2totdoc[j];
354 
355  if((params->kernel_cache->index[k] != -1) && (l != -1) && (!params->needs_computation[k])) {
356  cache[j]=params->kernel_cache->buffer[((KERNELCACHE_IDX) params->kernel_cache->activenum)
357  *params->kernel_cache->index[k]+l];
358  }
359  else
360  {
361  if (k>=params->num_vectors)
362  k=2*params->num_vectors-1-k;
363 
364  cache[j]=params->kernel->kernel(m, k);
365  }
366  }
367 
368  //now line m is cached
369  params->needs_computation[m]=0;
370  }
371  return NULL;
372 }
373 
374 // Fills cache for the rows in key
375 void CKernel::cache_multiple_kernel_rows(int32_t* rows, int32_t num_rows)
376 {
377  int32_t nthreads=parallel->get_num_threads();
378 
379  if (nthreads<2)
380  {
381  for(int32_t i=0;i<num_rows;i++)
382  cache_kernel_row(rows[i]);
383  }
384  else
385  {
386  // fill up kernel cache
387  int32_t* uncached_rows = SG_MALLOC(int32_t, num_rows);
388  KERNELCACHE_ELEM** cache = SG_MALLOC(KERNELCACHE_ELEM*, num_rows);
389  S_KTHREAD_PARAM params;
390  int32_t num_threads=nthreads-1;
391  int32_t num_vec=get_num_vec_lhs();
392  ASSERT(num_vec>0)
393  uint8_t* needs_computation=SG_CALLOC(uint8_t, num_vec);
394 
395  int32_t step=0;
396  int32_t num=0;
397  int32_t end=0;
398 
399  // allocate cachelines if necessary
400  for (int32_t i=0; i<num_rows; i++)
401  {
402  int32_t idx=rows[i];
403  if (idx>=num_vec)
404  idx=2*num_vec-1-idx;
405 
406  if (kernel_cache_check(idx))
407  continue;
408 
409  needs_computation[idx]=1;
410  uncached_rows[num]=idx;
411  cache[num]= kernel_cache_clean_and_malloc(idx);
412 
413  if (!cache[num])
414  SG_ERROR("Kernel cache full! => increase cache size\n")
415 
416  num++;
417  }
418 
419  if (num>0)
420  {
421  step = num/nthreads;
422 
423  if (step<1)
424  {
425  num_threads=num-1;
426  step=1;
427  }
428 
429  #pragma omp parallel for private(params)
430  for (int32_t t=0; t<num_threads; t++)
431  {
432  params.kernel = this;
433  params.kernel_cache = &kernel_cache;
434  params.cache = cache;
435  params.uncached_rows = uncached_rows;
436  params.needs_computation = needs_computation;
437  params.num_uncached = num;
438  params.start = t*step;
439  params.end = (t+1)*step;
440  params.num_vectors = get_num_vec_lhs();
441  end=params.end;
442 
443  cache_multiple_kernel_row_helper(&params);
444  }
445  }
446  else
447  num_threads=-1;
448 
449 
450  S_KTHREAD_PARAM last_param;
451  last_param.kernel = this;
452  last_param.kernel_cache = &kernel_cache;
453  last_param.cache = cache;
454  last_param.uncached_rows = uncached_rows;
455  last_param.needs_computation = needs_computation;
456  last_param.start = end;
457  last_param.num_uncached = num;
458  last_param.end = num;
459  last_param.num_vectors = get_num_vec_lhs();
460 
461  cache_multiple_kernel_row_helper(&last_param);
462 
463  SG_FREE(needs_computation);
464  SG_FREE(cache);
465  SG_FREE(uncached_rows);
466  }
467 }
468 
469 // remove numshrink columns in the cache
470 // which correspond to examples marked
472  int32_t totdoc, int32_t numshrink, int32_t *after)
473 {
474  ASSERT(totdoc > 0);
475  register int32_t i,j,jj,scount; // 0 in after.
476  KERNELCACHE_IDX from=0,to=0;
477  int32_t *keep;
478 
479  keep=SG_MALLOC(int32_t, totdoc);
480  for(j=0;j<totdoc;j++) {
481  keep[j]=1;
482  }
483  scount=0;
484  for(jj=0;(jj<kernel_cache.activenum) && (scount<numshrink);jj++) {
485  j=kernel_cache.active2totdoc[jj];
486  if(!after[j]) {
487  scount++;
488  keep[j]=0;
489  }
490  }
491 
492  for(i=0;i<kernel_cache.max_elems;i++) {
493  for(jj=0;jj<kernel_cache.activenum;jj++) {
494  j=kernel_cache.active2totdoc[jj];
495  if(!keep[j]) {
496  from++;
497  }
498  else {
499  kernel_cache.buffer[to]=kernel_cache.buffer[from];
500  to++;
501  from++;
502  }
503  }
504  }
505 
506  kernel_cache.activenum=0;
507  for(j=0;j<totdoc;j++) {
508  if((keep[j]) && (kernel_cache.totdoc2active[j] != -1)) {
509  kernel_cache.active2totdoc[kernel_cache.activenum]=j;
510  kernel_cache.totdoc2active[j]=kernel_cache.activenum;
511  kernel_cache.activenum++;
512  }
513  else {
514  kernel_cache.totdoc2active[j]=-1;
515  }
516  }
517 
518  kernel_cache.max_elems= (int32_t) kernel_cache.buffsize;
519 
520  if (kernel_cache.activenum>0)
521  kernel_cache.buffsize/=kernel_cache.activenum;
522 
523  if(kernel_cache.max_elems>totdoc)
524  kernel_cache.max_elems=totdoc;
525 
526  SG_FREE(keep);
527 
528 }
529 
531 {
532  int32_t maxlru=0,k;
533 
534  for(k=0;k<kernel_cache.max_elems;k++) {
535  if(maxlru < kernel_cache.lru[k])
536  maxlru=kernel_cache.lru[k];
537  }
538  for(k=0;k<kernel_cache.max_elems;k++) {
539  kernel_cache.lru[k]-=maxlru;
540  }
541 }
542 
544 {
545  SG_FREE(kernel_cache.index);
546  SG_FREE(kernel_cache.occu);
547  SG_FREE(kernel_cache.lru);
548  SG_FREE(kernel_cache.invindex);
549  SG_FREE(kernel_cache.active2totdoc);
550  SG_FREE(kernel_cache.totdoc2active);
551  SG_FREE(kernel_cache.buffer);
552  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
553 }
554 
555 int32_t CKernel::kernel_cache_malloc()
556 {
557  int32_t i;
558 
560  for(i=0;i<kernel_cache.max_elems;i++) {
561  if(!kernel_cache.occu[i]) {
562  kernel_cache.occu[i]=1;
563  kernel_cache.elems++;
564  return(i);
565  }
566  }
567  }
568  return(-1);
569 }
570 
571 void CKernel::kernel_cache_free(int32_t cacheidx)
572 {
573  kernel_cache.occu[cacheidx]=0;
574  kernel_cache.elems--;
575 }
576 
577 // remove least recently used cache
578 // element
579 int32_t CKernel::kernel_cache_free_lru()
580 {
581  register int32_t k,least_elem=-1,least_time;
582 
583  least_time=kernel_cache.time+1;
584  for(k=0;k<kernel_cache.max_elems;k++) {
585  if(kernel_cache.invindex[k] != -1) {
586  if(kernel_cache.lru[k]<least_time) {
587  least_time=kernel_cache.lru[k];
588  least_elem=k;
589  }
590  }
591  }
592 
593  if(least_elem != -1) {
594  kernel_cache_free(least_elem);
595  kernel_cache.index[kernel_cache.invindex[least_elem]]=-1;
596  kernel_cache.invindex[least_elem]=-1;
597  return(1);
598  }
599  return(0);
600 }
601 
602 // Get a free cache entry. In case cache is full, the lru
603 // element is removed.
604 KERNELCACHE_ELEM* CKernel::kernel_cache_clean_and_malloc(int32_t cacheidx)
605 {
606  int32_t result;
607  if((result = kernel_cache_malloc()) == -1) {
608  if(kernel_cache_free_lru()) {
609  result = kernel_cache_malloc();
610  }
611  }
612  kernel_cache.index[cacheidx]=result;
613  if(result == -1) {
614  return(0);
615  }
616  kernel_cache.invindex[result]=cacheidx;
617  kernel_cache.lru[kernel_cache.index[cacheidx]]=kernel_cache.time; // lru
618  return &kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[cacheidx]];
619 }
620 #endif //USE_SVMLIGHT
621 
622 void CKernel::load(CFile* loader)
623 {
626 }
627 
628 void CKernel::save(CFile* writer)
629 {
630  SGMatrix<float64_t> k_matrix=get_kernel_matrix<float64_t>();
632  writer->set_matrix(k_matrix.matrix, k_matrix.num_rows, k_matrix.num_cols);
634 }
635 
637 {
638  SG_DEBUG("entering CKernel::remove_lhs_and_rhs\n")
639  if (rhs!=lhs)
640  SG_UNREF(rhs);
641  rhs = NULL;
642  num_rhs=0;
643 
644  SG_UNREF(lhs);
645  lhs = NULL;
646  num_lhs=0;
647  lhs_equals_rhs=false;
648 
649 #ifdef USE_SVMLIGHT
650  cache_reset();
651 #endif //USE_SVMLIGHT
652  SG_DEBUG("leaving CKernel::remove_lhs_and_rhs\n")
653 }
654 
656 {
657  if (rhs==lhs)
658  rhs=NULL;
659  SG_UNREF(lhs);
660  lhs = NULL;
661  num_lhs=0;
662  lhs_equals_rhs=false;
663 #ifdef USE_SVMLIGHT
664  cache_reset();
665 #endif //USE_SVMLIGHT
666 }
667 
670 {
671  if (rhs!=lhs)
672  SG_UNREF(rhs);
673  rhs = NULL;
674  num_rhs=0;
675  lhs_equals_rhs=false;
676 
677 #ifdef USE_SVMLIGHT
678  cache_reset();
679 #endif //USE_SVMLIGHT
680 }
681 
682 #define ENUM_CASE(n) case n: SG_INFO(#n " ") break;
683 
685 {
686  SG_INFO("%p - \"%s\" weight=%1.2f OPT:%s", this, get_name(),
688  get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" :
689  "SLOWBUTMEMEFFICIENT");
690 
691  switch (get_kernel_type())
692  {
755  }
756 
757  switch (get_feature_class())
758  {
769  ENUM_CASE(C_WD)
781  }
782 
783  switch (get_feature_type())
784  {
799  }
800  SG_INFO("\n")
801 }
802 #undef ENUM_CASE
803 
805  int32_t count, int32_t *IDX, float64_t * weights)
806 {
807  SG_ERROR("kernel does not support linadd optimization\n")
808  return false ;
809 }
810 
812 {
813  SG_ERROR("kernel does not support linadd optimization\n")
814  return false;
815 }
816 
818 {
819  SG_ERROR("kernel does not support linadd optimization\n")
820  return 0;
821 }
822 
824  int32_t num_vec, int32_t* vec_idx, float64_t* target, int32_t num_suppvec,
825  int32_t* IDX, float64_t* weights, float64_t factor)
826 {
827  SG_ERROR("kernel does not support batch computation\n")
828 }
829 
830 void CKernel::add_to_normal(int32_t vector_idx, float64_t weight)
831 {
832  SG_ERROR("kernel does not support linadd optimization, add_to_normal not implemented\n")
833 }
834 
836 {
837  SG_ERROR("kernel does not support linadd optimization, clear_normal not implemented\n")
838 }
839 
841 {
842  return 1;
843 }
844 
846  int32_t vector_idx, float64_t * subkernel_contrib)
847 {
848  SG_ERROR("kernel compute_by_subkernel not implemented\n")
849 }
850 
851 const float64_t* CKernel::get_subkernel_weights(int32_t &num_weights)
852 {
853  num_weights=1 ;
854  return &combined_kernel_weight ;
855 }
856 
858 {
859  int num_weights = 1;
860  const float64_t* weight = get_subkernel_weights(num_weights);
861  return SGVector<float64_t>(const_cast<float64_t*>(weight),1,false);
862 }
863 
865 {
866  ASSERT(weights.vector)
867  if (weights.vlen!=1)
868  SG_ERROR("number of subkernel weights should be one ...\n")
869 
870  combined_kernel_weight = weights.vector[0] ;
871 }
872 
874 {
875  if (kernel)
876  {
877  CKernel* casted=dynamic_cast<CKernel*>(kernel);
878  REQUIRE(casted, "CKernel::obtain_from_generic(): Error, provided object"
879  " of class \"%s\" is not a subclass of CKernel!\n",
880  kernel->get_name());
881  return casted;
882  }
883  else
884  return NULL;
885 }
886 
888 {
889  int32_t num_suppvec=svm->get_num_support_vectors();
890  int32_t* sv_idx=SG_MALLOC(int32_t, num_suppvec);
891  float64_t* sv_weight=SG_MALLOC(float64_t, num_suppvec);
892 
893  for (int32_t i=0; i<num_suppvec; i++)
894  {
895  sv_idx[i] = svm->get_support_vector(i);
896  sv_weight[i] = svm->get_alpha(i);
897  }
898  bool ret = init_optimization(num_suppvec, sv_idx, sv_weight);
899 
900  SG_FREE(sv_idx);
901  SG_FREE(sv_weight);
902  return ret;
903 }
904 
906 {
908  if (lhs_equals_rhs)
909  rhs=lhs;
910 }
911 
913 {
915 
916  if (lhs_equals_rhs)
917  rhs=NULL;
918 }
919 
921 {
923 
924  if (lhs_equals_rhs)
925  rhs=lhs;
926 }
927 
929  SG_ADD(&cache_size, "cache_size",
930  "Cache size in MB.", MS_NOT_AVAILABLE);
931  SG_ADD((CSGObject**) &lhs, "lhs",
932  "Feature vectors to occur on left hand side.", MS_NOT_AVAILABLE);
933  SG_ADD((CSGObject**) &rhs, "rhs",
934  "Feature vectors to occur on right hand side.", MS_NOT_AVAILABLE);
935  SG_ADD(&lhs_equals_rhs, "lhs_equals_rhs",
936  "If features on lhs are the same as on rhs.", MS_NOT_AVAILABLE);
937  SG_ADD(&num_lhs, "num_lhs", "Number of feature vectors on left hand side.",
939  SG_ADD(&num_rhs, "num_rhs", "Number of feature vectors on right hand side.",
941  SG_ADD(&combined_kernel_weight, "combined_kernel_weight",
942  "Combined kernel weight.", MS_AVAILABLE);
943  SG_ADD(&optimization_initialized, "optimization_initialized",
944  "Optimization is initialized.", MS_NOT_AVAILABLE);
945  SG_ADD((machine_int_t*) &opt_type, "opt_type",
946  "Optimization type.", MS_NOT_AVAILABLE);
947  SG_ADD(&properties, "properties", "Kernel properties.", MS_NOT_AVAILABLE);
948  SG_ADD((CSGObject**) &normalizer, "normalizer", "Normalize the kernel.",
949  MS_AVAILABLE);
950 }
951 
952 
953 void CKernel::init()
954 {
955  cache_size=10;
956  kernel_matrix=NULL;
957  lhs=NULL;
958  rhs=NULL;
959  num_lhs=0;
960  num_rhs=0;
961  lhs_equals_rhs=false;
966  normalizer=NULL;
967 
968 #ifdef USE_SVMLIGHT
969  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
970 #endif //USE_SVMLIGHT
971 
973 }
974 
975 namespace shogun
976 {
978 template <class T> struct K_THREAD_PARAM
979 {
983  int32_t start;
985  int32_t end;
987  int64_t total_start;
989  int32_t m;
991  int32_t n;
993  T* result;
995  bool symmetric;
997  bool verbose;
998  /* Progress bar*/
1000 };
1001 }
1002 
1004  bool no_diag)
1005 {
1006  SG_DEBUG("Entering\n");
1007 
1008  REQUIRE(has_features(), "No features assigned to kernel\n")
1009  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1010  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1011  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1012  REQUIRE(block_begin+block_size<=num_rhs,
1013  "Invalid block size (%d) at starting index (%d, %d)! "
1014  "Please use smaller blocks!", block_size, block_begin, block_begin)
1015  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1016 
1017  float64_t sum=0.0;
1018 
1019  // since the block is symmetric with main diagonal inside, we can save half
1020  // the computation with using only the upper triangular part.
1021  // this can be done in parallel
1022  #pragma omp parallel for reduction(+:sum)
1023  for (index_t i=0; i<block_size; ++i)
1024  {
1025  // compute the kernel values on the upper triangular part of the kernel
1026  // matrix and compute sum on the fly
1027  for (index_t j=i+1; j<block_size; ++j)
1028  {
1029  float64_t k=kernel(i+block_begin, j+block_begin);
1030  sum+=k;
1031  }
1032  }
1033 
1034  // the actual sum would be twice of what we computed
1035  sum*=2;
1036 
1037  // add the diagonal elements if required - keeping this check
1038  // outside of the loop to save cycles
1039  if (!no_diag)
1040  {
1041  #pragma omp parallel for reduction(+:sum)
1042  for (index_t i=0; i<block_size; ++i)
1043  {
1044  float64_t diag=kernel(i+block_begin, i+block_begin);
1045  sum+=diag;
1046  }
1047  }
1048 
1049  SG_DEBUG("Leaving\n");
1050 
1051  return sum;
1052 }
1053 
1054 float64_t CKernel::sum_block(index_t block_begin_row, index_t block_begin_col,
1055  index_t block_size_row, index_t block_size_col, bool no_diag)
1056 {
1057  SG_DEBUG("Entering\n");
1058 
1059  REQUIRE(has_features(), "No features assigned to kernel\n")
1060  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
1061  block_begin_col>=0 && block_begin_col<num_rhs,
1062  "Invalid block begin index (%d, %d)!\n",
1063  block_begin_row, block_begin_col)
1064  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
1065  block_begin_col+block_size_col<=num_rhs,
1066  "Invalid block size (%d, %d) at starting index (%d, %d)! "
1067  "Please use smaller blocks!", block_size_row, block_size_col,
1068  block_begin_row, block_begin_col)
1069  REQUIRE(block_size_row>=1 && block_size_col>=1,
1070  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
1071 
1072  // check if removal of diagonal is required/valid
1073  if (no_diag && block_size_row!=block_size_col)
1074  {
1075  SG_WARNING("Not removing the main diagonal since block is not square!\n");
1076  no_diag=false;
1077  }
1078 
1079  float64_t sum=0.0;
1080 
1081  // this can be done in parallel for the rows/cols
1082  #pragma omp parallel for reduction(+:sum)
1083  for (index_t i=0; i<block_size_row; ++i)
1084  {
1085  // compute the kernel values and compute sum on the fly
1086  for (index_t j=0; j<block_size_col; ++j)
1087  {
1088  float64_t k=no_diag && i==j ? 0 :
1089  kernel(i+block_begin_row, j+block_begin_col);
1090  sum+=k;
1091  }
1092  }
1093 
1094  SG_DEBUG("Leaving\n");
1095 
1096  return sum;
1097 }
1098 
1100  index_t block_size, bool no_diag)
1101 {
1102  SG_DEBUG("Entering\n");
1103 
1104  REQUIRE(has_features(), "No features assigned to kernel\n")
1105  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1106  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1107  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1108  REQUIRE(block_begin+block_size<=num_rhs,
1109  "Invalid block size (%d) at starting index (%d, %d)! "
1110  "Please use smaller blocks!", block_size, block_begin, block_begin)
1111  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1112 
1113  // initialize the vector that accumulates the row/col-wise sum on the go
1114  SGVector<float64_t> row_sum(block_size);
1115  row_sum.set_const(0.0);
1116 
1117  // since the block is symmetric with main diagonal inside, we can save half
1118  // the computation with using only the upper triangular part.
1119  // this can be done in parallel for the rows/cols
1120  #pragma omp parallel for
1121  for (index_t i=0; i<block_size; ++i)
1122  {
1123  // compute the kernel values on the upper triangular part of the kernel
1124  // matrix and compute row-wise sum on the fly
1125  for (index_t j=i+1; j<block_size; ++j)
1126  {
1127  float64_t k=kernel(i+block_begin, j+block_begin);
1128  #pragma omp critical
1129  {
1130  row_sum[i]+=k;
1131  row_sum[j]+=k;
1132  }
1133  }
1134  }
1135 
1136  // add the diagonal elements if required - keeping this check
1137  // outside of the loop to save cycles
1138  if (!no_diag)
1139  {
1140  #pragma omp parallel for
1141  for (index_t i=0; i<block_size; ++i)
1142  {
1143  float64_t diag=kernel(i+block_begin, i+block_begin);
1144  row_sum[i]+=diag;
1145  }
1146  }
1147 
1148  SG_DEBUG("Leaving\n");
1149 
1150  return row_sum;
1151 }
1152 
1154  block_begin, index_t block_size, bool no_diag)
1155 {
1156  SG_DEBUG("Entering\n");
1157 
1158  REQUIRE(has_features(), "No features assigned to kernel\n")
1159  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1160  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1161  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1162  REQUIRE(block_begin+block_size<=num_rhs,
1163  "Invalid block size (%d) at starting index (%d, %d)! "
1164  "Please use smaller blocks!", block_size, block_begin, block_begin)
1165  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1166 
1167  // initialize the matrix that accumulates the row/col-wise sum on the go
1168  // the first column stores the sum of kernel values
1169  // the second column stores the sum of squared kernel values
1170  SGMatrix<float64_t> row_sum(block_size, 2);
1171  row_sum.set_const(0.0);
1172 
1173  // since the block is symmetric with main diagonal inside, we can save half
1174  // the computation with using only the upper triangular part
1175  // this can be done in parallel for the rows/cols
1176 #pragma omp parallel for
1177  for (index_t i=0; i<block_size; ++i)
1178  {
1179  // compute the kernel values on the upper triangular part of the kernel
1180  // matrix and compute row-wise sum and squared sum on the fly
1181  for (index_t j=i+1; j<block_size; ++j)
1182  {
1183  float64_t k=kernel(i+block_begin, j+block_begin);
1184 #pragma omp critical
1185  {
1186  row_sum(i, 0)+=k;
1187  row_sum(j, 0)+=k;
1188  row_sum(i, 1)+=k*k;
1189  row_sum(j, 1)+=k*k;
1190  }
1191  }
1192  }
1193 
1194  // add the diagonal elements if required - keeping this check
1195  // outside of the loop to save cycles
1196  if (!no_diag)
1197  {
1198 #pragma omp parallel for
1199  for (index_t i=0; i<block_size; ++i)
1200  {
1201  float64_t diag=kernel(i+block_begin, i+block_begin);
1202  row_sum(i, 0)+=diag;
1203  row_sum(i, 1)+=diag*diag;
1204  }
1205  }
1206 
1207  SG_DEBUG("Leaving\n");
1208 
1209  return row_sum;
1210 }
1211 
1213  index_t block_begin_col, index_t block_size_row,
1214  index_t block_size_col, bool no_diag)
1215 {
1216  SG_DEBUG("Entering\n");
1217 
1218  REQUIRE(has_features(), "No features assigned to kernel\n")
1219  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
1220  block_begin_col>=0 && block_begin_col<num_rhs,
1221  "Invalid block begin index (%d, %d)!\n",
1222  block_begin_row, block_begin_col)
1223  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
1224  block_begin_col+block_size_col<=num_rhs,
1225  "Invalid block size (%d, %d) at starting index (%d, %d)! "
1226  "Please use smaller blocks!", block_size_row, block_size_col,
1227  block_begin_row, block_begin_col)
1228  REQUIRE(block_size_row>=1 && block_size_col>=1,
1229  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
1230 
1231  // check if removal of diagonal is required/valid
1232  if (no_diag && block_size_row!=block_size_col)
1233  {
1234  SG_WARNING("Not removing the main diagonal since block is not square!\n");
1235  no_diag=false;
1236  }
1237 
1238  // initialize the vector that accumulates the row/col-wise sum on the go
1239  // the first block_size_row entries store the row-wise sum of kernel values
1240  // the nextt block_size_col entries store the col-wise sum of kernel values
1241  SGVector<float64_t> sum(block_size_row+block_size_col);
1242  sum.set_const(0.0);
1243 
1244  // this can be done in parallel for the rows/cols
1245 #pragma omp parallel for
1246  for (index_t i=0; i<block_size_row; ++i)
1247  {
1248  // compute the kernel values and compute sum on the fly
1249  for (index_t j=0; j<block_size_col; ++j)
1250  {
1251  float64_t k=no_diag && i==j ? 0 :
1252  kernel(i+block_begin_row, j+block_begin_col);
1253 #pragma omp critical
1254  {
1255  sum[i]+=k;
1256  sum[j+block_size_row]+=k;
1257  }
1258  }
1259  }
1260 
1261  SG_DEBUG("Leaving\n");
1262 
1263  return sum;
1264 }
1265 
1266 template <class T> void* CKernel::get_kernel_matrix_helper(void* p)
1267 {
1268  K_THREAD_PARAM<T>* params= (K_THREAD_PARAM<T>*) p;
1269  int32_t i_start=params->start;
1270  int32_t i_end=params->end;
1271  CKernel* k=params->kernel;
1272  T* result=params->result;
1273  bool symmetric=params->symmetric;
1274  int32_t n=params->n;
1275  int32_t m=params->m;
1276  bool verbose=params->verbose;
1277  int64_t total_start=params->total_start;
1278  int64_t total=total_start;
1279  PRange<int64_t>* pb = params->pb;
1280 
1281  for (int32_t i=i_start; i<i_end; i++)
1282  {
1283  int32_t j_start=0;
1284 
1285  if (symmetric)
1286  j_start=i;
1287 
1288  for (int32_t j=j_start; j<n; j++)
1289  {
1290  float64_t v=k->kernel(i,j);
1291  result[i+j*m]=v;
1292 
1293  if (symmetric && i!=j)
1294  result[j+i*m]=v;
1295 
1296  if (verbose)
1297  {
1298  total++;
1299 
1300  if (symmetric && i!=j)
1301  total++;
1302 
1303  pb->print_progress();
1304 
1305  // TODO: replace with the new signal
1306  // if (CSignal::cancel_computations())
1307  // break;
1308  }
1309  }
1310 
1311  }
1312 
1313  return NULL;
1314 }
1315 
1316 template <class T>
1318 {
1319  T* result = NULL;
1320 
1321  REQUIRE(has_features(), "no features assigned to kernel\n")
1322 
1323  int32_t m=get_num_vec_lhs();
1324  int32_t n=get_num_vec_rhs();
1325 
1326  int64_t total_num = int64_t(m)*n;
1327 
1328  // if lhs == rhs and sizes match assume k(i,j)=k(j,i)
1329  bool symmetric= (lhs && lhs==rhs && m==n);
1330 
1331  SG_DEBUG("returning kernel matrix of size %dx%d\n", m, n)
1332 
1333  result=SG_MALLOC(T, total_num);
1334 
1335  int32_t num_threads=parallel->get_num_threads();
1336  K_THREAD_PARAM<T> params;
1337  int64_t step = total_num/num_threads;
1338  index_t t = 0;
1339  auto pb = progress(range(total_num), *this->io);
1340 #pragma omp parallel for lastprivate(t) private(params)
1341  for (t = 0; t < num_threads; ++t)
1342  {
1343  params.kernel = this;
1344  params.result = result;
1345  params.start = compute_row_start(t*step, n, symmetric);
1346  params.end = compute_row_start((t+1)*step, n, symmetric);
1347  params.total_start=t*step;
1348  params.n=n;
1349  params.m=m;
1350  params.symmetric=symmetric;
1351  params.verbose=false;
1352  params.pb = &pb;
1353  CKernel::get_kernel_matrix_helper<T>((void*)&params);
1354  }
1355 
1356  if (total_num % num_threads != 0)
1357  {
1358  params.kernel = this;
1359  params.result = result;
1360  params.start = compute_row_start(t*step, n, symmetric);
1361  params.end = m;
1362  params.total_start=t*step;
1363  params.n=n;
1364  params.m=m;
1365  params.symmetric=symmetric;
1366  params.verbose=false;
1367  params.pb = &pb;
1368  CKernel::get_kernel_matrix_helper<T>((void*)&params);
1369  }
1370 
1371  pb.complete();
1372 
1373  return SGMatrix<T>(result,m,n,true);
1374 }
1375 
1376 
1377 template SGMatrix<float64_t> CKernel::get_kernel_matrix<float64_t>();
1378 template SGMatrix<float32_t> CKernel::get_kernel_matrix<float32_t>();
1379 
1380 template void* CKernel::get_kernel_matrix_helper<float64_t>(void* p);
1381 template void* CKernel::get_kernel_matrix_helper<float32_t>(void* p);
virtual void clear_normal()
Definition: Kernel.cpp:835
virtual const char * get_name() const =0
virtual void load_serializable_post()
Definition: Kernel.cpp:905
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:97
virtual bool support_compatible_class() const
Definition: Features.h:340
int32_t compute_row_start(int64_t offs, int32_t n, bool symmetric)
#define SG_INFO(...)
Definition: SGIO.h:117
virtual void cleanup()
Definition: Kernel.cpp:172
#define SG_RESET_LOCALE
Definition: SGIO.h:85
virtual void set_matrix(const bool *matrix, int32_t num_feat, int32_t num_vec)
Definition: File.cpp:126
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
Definition: Kernel.cpp:845
void cache_multiple_kernel_rows(int32_t *key, int32_t varnum)
Definition: Kernel.cpp:375
virtual bool get_feature_class_compatibility(EFeatureClass rhs) const
Definition: Features.cpp:355
int32_t get_num_threads() const
Definition: Parallel.cpp:97
int32_t index_t
Definition: common.h:72
int32_t num_rhs
number of feature vectors on right hand side
PRange< T > progress(Range< T > range, const SGIO &io, std::string prefix="PROGRESS: ", SG_PRG_MODE mode=UTF8, std::function< bool()> condition=[](){return true;})
Definition: progress.h:712
static void * get_kernel_matrix_helper(void *p)
Definition: Kernel.cpp:1266
Class ShogunException defines an exception which is thrown whenever an error inside of shogun occurs...
virtual bool set_normalizer(CKernelNormalizer *normalizer)
Definition: Kernel.cpp:149
virtual float64_t sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:1054
virtual int32_t get_num_vectors() const =0
virtual void save_serializable_pre()
Definition: SGObject.cpp:465
#define SG_ERROR(...)
Definition: SGIO.h:128
#define REQUIRE(x,...)
Definition: SGIO.h:181
virtual bool delete_optimization()
Definition: Kernel.cpp:811
int64_t KERNELCACHE_IDX
Definition: kernel/Kernel.h:45
int32_t kernel_cache_space_available()
float64_t kernel(int32_t idx_a, int32_t idx_b)
#define ENUM_CASE(n)
Definition: Kernel.cpp:682
Parallel * parallel
Definition: SGObject.h:603
Range< T > range(T rend)
Definition: range.h:136
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
Definition: Kernel.cpp:669
virtual int32_t get_num_vec_lhs()
SGMatrix< float64_t > get_kernel_matrix()
#define SG_REF(x)
Definition: SGObject.h:52
#define SG_SET_LOCALE_C
Definition: SGIO.h:84
int32_t cache_size
cache_size in MB
void kernel_cache_shrink(int32_t totdoc, int32_t num_shrink, int32_t *after)
Definition: Kernel.cpp:471
bool get_is_initialized()
PRange< int64_t > * pb
Definition: Kernel.cpp:999
virtual SGMatrix< float64_t > row_wise_sum_squared_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1153
float64_t combined_kernel_weight
virtual void register_params()
Definition: Kernel.cpp:928
void save(CFile *writer)
Definition: Kernel.cpp:628
virtual void remove_lhs_and_rhs()
Definition: Kernel.cpp:636
virtual CKernelNormalizer * get_normalizer()
Definition: Kernel.cpp:161
#define ASSERT(x)
Definition: SGIO.h:176
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:124
virtual SGVector< float64_t > row_col_wise_sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:1212
void cache_kernel_row(int32_t x)
Definition: Kernel.cpp:301
virtual float64_t sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1003
virtual SGVector< float64_t > get_subkernel_weights()
Definition: Kernel.cpp:857
double float64_t
Definition: common.h:60
KERNEL_CACHE kernel_cache
kernel cache
virtual EFeatureType get_feature_type()=0
KERNELCACHE_ELEM * kernel_matrix
A File access base class.
Definition: File.h:34
virtual void save_serializable_post()
Definition: Kernel.cpp:920
virtual float64_t compute_optimized(int32_t vector_idx)
Definition: Kernel.cpp:817
EOptimizationType get_optimization_type()
index_t num_rows
Definition: SGMatrix.h:495
virtual void save_serializable_post()
Definition: SGObject.cpp:470
void list_kernel()
Definition: Kernel.cpp:684
float64_t get_alpha(int32_t idx)
float64_t get_combined_kernel_weight()
virtual SGVector< float64_t > row_wise_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1099
virtual EFeatureClass get_feature_class() const =0
void print_progress() const
Definition: progress.h:617
Identity Kernel Normalization, i.e. no normalization is applied.
index_t num_cols
Definition: SGMatrix.h:497
int32_t num_lhs
number of feature vectors on left hand side
The class Kernel Normalizer defines a function to post-process kernel values.
int32_t get_support_vector(int32_t idx)
virtual int32_t get_num_vec_rhs()
virtual void set_subkernel_weights(SGVector< float64_t > weights)
Definition: Kernel.cpp:864
void set_const(T const_elem)
Definition: SGVector.cpp:199
virtual bool init_normalizer()
Definition: Kernel.cpp:167
bool optimization_initialized
EOptimizationType opt_type
void load(CFile *loader)
Definition: Kernel.cpp:622
virtual void load_serializable_post()
Definition: SGObject.cpp:460
CFeatures * rhs
feature vectors to occur on right hand side
static CKernel * obtain_from_generic(CSGObject *kernel)
Definition: Kernel.cpp:873
#define SG_UNREF(x)
Definition: SGObject.h:53
#define SG_DEBUG(...)
Definition: SGIO.h:106
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual bool init(CKernel *k)=0
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
Definition: Kernel.cpp:823
T sum(const Container< T > &a, bool no_diag=false)
bool lhs_equals_rhs
lhs
int machine_int_t
Definition: common.h:69
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
Definition: Kernel.cpp:804
CFeatures * lhs
feature vectors to occur on left hand side
The class Features is the base class of all feature objects.
Definition: Features.h:69
virtual void save_serializable_pre()
Definition: Kernel.cpp:912
void kernel_cache_cleanup()
Definition: Kernel.cpp:543
virtual void remove_lhs()
Definition: Kernel.cpp:655
int32_t kernel_cache_check(int32_t cacheidx)
virtual int32_t get_num_subkernels()
Definition: Kernel.cpp:840
bool init_optimization_svm(CSVM *svm)
Definition: Kernel.cpp:887
A generic Support Vector Machine Interface.
Definition: SVM.h:49
void kernel_cache_reset_lru()
Definition: Kernel.cpp:530
The Kernel base class.
CKernelNormalizer * normalizer
virtual SGVector< float64_t > get_kernel_row(int32_t i)
void set_const(T const_elem)
Definition: SGMatrix.cpp:209
#define SG_WARNING(...)
Definition: SGIO.h:127
#define SG_ADD(...)
Definition: SGObject.h:93
virtual bool has_features()
void kernel_cache_init(int32_t size, bool regression_hack=false)
Definition: Kernel.cpp:180
virtual ~CKernel()
Definition: Kernel.cpp:72
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
Definition: Kernel.cpp:830
float64_t KERNELCACHE_ELEM
Definition: kernel/Kernel.h:34
void resize_kernel_cache(KERNELCACHE_IDX size, bool regression_hack=false)
Definition: Kernel.cpp:84
virtual EFeatureType get_feature_type() const =0
index_t vlen
Definition: SGVector.h:571
virtual EFeatureClass get_feature_class()=0

SHOGUN Machine Learning Toolbox - Documentation