MklKernel.h
Go to the documentation of this file.
1//===========================================================================
2/*!
3 *
4 *
5 * \brief Weighted sum of base kernels, each acting on a subset of features only.
6 *
7 *
8 *
9 * \author M. Tuma, O.Krause
10 * \date 2012
11 *
12 *
13 * \par Copyright 1995-2017 Shark Development Team
14 *
15 * <BR><HR>
16 * This file is part of Shark.
17 * <https://shark-ml.github.io/Shark/>
18 *
19 * Shark is free software: you can redistribute it and/or modify
20 * it under the terms of the GNU Lesser General Public License as published
21 * by the Free Software Foundation, either version 3 of the License, or
22 * (at your option) any later version.
23 *
24 * Shark is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU Lesser General Public License for more details.
28 *
29 * You should have received a copy of the GNU Lesser General Public License
30 * along with Shark. If not, see <http://www.gnu.org/licenses/>.
31 *
32 */
33//===========================================================================
34
35#ifndef SHARK_MODELS_KERNELS_MKL_KERNEL_H
36#define SHARK_MODELS_KERNELS_MKL_KERNEL_H
37
38
40#include "Impl/MklKernelBase.h"
41namespace shark {
42
43/// \brief Weighted sum of kernel functions
44///
45/// For a set of positive definite kernels \f$ k_1, \dots, k_n \f$
46/// with positive coeffitients \f$ w_1, \dots, w_n \f$ the sum
47/// \f[ \tilde k(x_1, x_2) := \sum_{i=1}^{n} w_i \cdot k_i(x_1, x_2) \f]
48/// is again a positive definite kernel function. This still holds when
49/// the sub-kernels only operate of a subset of features, that is, when
50/// we have a direct sum kernel ( see e.g. the UCSC Technical Report UCSC-CRL-99-10:
51/// Convolution Kernels on Discrete Structures by David Haussler ).
52///
53/// This class is very similar to the #WeightedSumKernel , except that it assumes
54/// its inputs to be tuples of values \f$ x=(x_1,\dots, x_n) \f$. It calculates
55/// the direct sum of kernels
56/// \f[ \tilde k(x, y) := \sum_{i=1}^{n} w_i \cdot k_i(x_i, y_i) \f]
57///
58/// Internally, the weights are represented as \f$ w_i = \exp(\xi_i) \f$
59/// to allow for unconstrained optimization.
60///
61/// The result of the kernel evaluation is devided by the sum of the
62/// kernel weights, so that in total, this amounts to fixing the sum
63/// of the of the weights to one.
64///
65/// In the current implementation, we expect the InputType to be a
66/// boost::fusion::vector. For example, boost::fusion::vector<RealVector,RealVector>
67/// represents a tuple of two vectors.
68/// \ingroup kernels
69template<class InputType>
71: private detail::MklKernelBase<InputType>//order is important!
72, public WeightedSumKernel<InputType>
73{
74private:
75 typedef detail::MklKernelBase<InputType> base_type1;
77public:
78
79 template<class KernelTuple>
80 MklKernel(KernelTuple const& kernels):base_type1(kernels),base_type2(base_type1::makeKernelVector()){}
81
82 /// \brief From INameable: return the class name.
83 std::string name() const
84 { return "MklKernel"; }
85};
86
87}
88#endif