Skip to content
Snippets Groups Projects
Commit 33de38e9 authored by mkirsz's avatar mkirsz
Browse files

Docs update

parent 81a3bb2b
No related branches found
No related tags found
1 merge request!13Docs update
Pipeline #48557 failed
...@@ -14,78 +14,48 @@ ...@@ -14,78 +14,48 @@
#include <type_traits> #include <type_traits>
#include <iostream> #include <iostream>
/** Bayesian Linear Regression /**
* @class M_BLR
* @brief Bayesian Linear Regression (BLR).
* *
* Model supported training modes: * This class implements Bayesian Linear Regression, a statistical method to make predictions using linear models with both linear and nonlinear features.
* *
* - LINEAR: Ordinary Least Squares or Ridge Regression (Regularised Least Squares) * **Model Supported Training Modes**:
* * - **LINEAR**: Uses Ordinary Least Squares or Ridge Regression for linear relationships.
* - NONLINEAR: Linear in parameters but nonlinear in an input space. * - **NONLINEAR**: Utilizes basis functions to handle nonlinear input spaces, transforming input descriptors into higher-dimensional feature spaces. For example, polynomial transformations.
* Each row of a matrix \f$\Phi\f$ is a vector-valued function
* of the original input descriptor \f$ \mathbf{\phi(x)}^T \f$
* e.g. BF_Polynomial2:
* (1, x_1, x_2) -> (1, x_1, x_2, x_1x_2, x_1^2, x_2^2)
* *
* **Prediction**: * **Prediction**:
* * - Computes predictions as a weighted sum of basis functions applied to input vectors.
* \f[
* y(\mathbf{x}, \mathbf{w}) = \sum_{j=0}^{M-1} w_j \phi_j(\mathbf(x))
* \f]
*
* where M is a number of parameters, **w** are machine learned weights
* and **x** is a vector of input variables.
* *
* **Training**: * **Training**:
* - Employs regularized least squares, allowing for optional regularization through the \f$\lambda\f$ parameter.
* - Ordinary Least Squares (OLS) is a special case when \f$\lambda = 0\f$.
* *
* \f[ * **Configuration Options**:
* \mathbf{w} = (\lambda \mathbf{I} + \mathbf{X}^T \mathbf{X})^{-1} \mathbf{X}^T \mathbf{y} * - **LAMBDA**: Set to `0` for OLS, a positive value for specified regularization, or `-1` for automatic tuning using evidence approximation.
* \f]
*
* for \f$\lambda=0\f$ this reduces to Ordinary Last Squares (OLS) aka
* Linear Least Squares with the normal equation:
*
* \f[
* \mathbf{w} = (\mathbf{X}^T \mathbf{X})^{-1} \mathbf{X}^T \mathbf{y}
* \f]
*
* This model is always a linear function of its parameters \f$w_i\f$.
*
* When used with linear basis function \f$\phi\f$ (here BF_Linear)
* it is also linear wrt to the input variables (here descriptors).
*
* When nonlinear basis function \f$\phi\f$ is used
* the function \f$y(\mathbf{x}, \mathbf{w})\f$ becomes nonlinear wrt to **x**
* but still linear wrt to **w**.
*
* **Optional Config keys**:
*
* - \ref LAMBDA = 0 -> use OLS
* - \ref LAMBDA > 0 -> manually set to given value
* - \ref LAMBDA = -1 -> use evidence approximation to find \f$\lambda\f$
* *
* @tparam BF DM_BF_Base child, Basis function * @tparam BF DM_BF_Base child, Basis function
*/ */
template template
<class BF=DM_Function_Base&> <class BF=DM_Function_Base&>
class M_BLR: public M_Tadah_Base, public M_BLR_Train<BF> { class M_BLR: public M_Tadah_Base, public M_BLR_Train<BF> {
public: public:
/** This constructor will preapare this object for either training
* or prediction (if potential is provides as a Config)
*
* Usage example:
*
* \code{.cpp}
* Config config("Config");
* M_BLR<BF_Linear> blr(config);
* \endcode
*
*/
using M_BLR_Train<BF>::config; using M_BLR_Train<BF>::config;
using M_BLR_Train<BF>::bf; using M_BLR_Train<BF>::bf;
/**
* @brief Initializes for training or prediction using a configuration.
*
* **Example**:
* \code{.cpp}
* Config config("Config");
* M_BLR<BF_Linear> blr(config);
* \endcode
*
* @param c Configuration object.
*/
M_BLR(Config &c): M_BLR(Config &c):
M_BLR_Train<BF>(c), M_BLR_Train<BF>(c),
desmat(M_BLR_Train<BF>::bf,c) desmat(M_BLR_Train<BF>::bf,c)
...@@ -93,18 +63,12 @@ public: ...@@ -93,18 +63,12 @@ public:
norm = Normaliser(c); norm = Normaliser(c);
} }
/** This constructor will preapare this object for either training /**
* or prediction (if potential is provides as a Config) * @brief Initializes for training or prediction using a basis function and configuration.
* *
* Usage example: * @param bf Basis function.
* @param c Configuration object.
* \code{.cpp} */
* Config config("Config");
* BF_Linear bf(config);
* M_BLR<> blr(bf, config);
* \endcode
*
*/
M_BLR(BF &bf, Config &c): M_BLR(BF &bf, Config &c):
M_BLR_Train<BF>(bf,c), M_BLR_Train<BF>(bf,c),
desmat(bf,c) desmat(bf,c)
......
...@@ -15,33 +15,26 @@ ...@@ -15,33 +15,26 @@
#include <type_traits> #include <type_traits>
#include <iostream> #include <iostream>
/** Kernel Ridge Regression. /**
* @class M_KRR
* @brief Kernel Ridge Regression (KRR) with Empirical Kernel Map (EKM).
* *
* Usage: * This class performs Kernel Ridge Regression using an Empirical Kernel Map
* to efficiently handle high-dimensional data. EKM is used to map objects into a kernel feature space, where linear methods can be applied.
* *
* M_KRR KERNEL ALGO * **Empirical Kernel Map (EKM)**:
* - Maps sample objects into finite-dimensional vectors in the kernel feature space.
* - Requires a kernel and basis samples to project new samples into the space defined by these bases.
* - Facilitates the kernelization of algorithms traditionally operating on vectors.
* - Supports basis sample selection through methods like random sampling or finding linearly independent subsets.
* *
* Example: * **Usage**:
* - **Kernels**: Use kernel functions (e.g., linear, RBF) to transform data for effective modeling in higher-dimensional spaces.
* *
* M_KRR Kern_RBF 1 * **Configuration Options**:
* - **LAMBDA**: Controls regularization. Use `0` for ordinary least squares, positive values for manual setting, or `-1` for automatic tuning.
* - **SBASIS**: Defines the number of basis functions for nonlinear kernels.
* *
* KERNEL is any kernel defined in Kernels, for example:
*
* Kern_Linear
* Kern_RBF
* ...
*
* ALGO specify which implementation to use:
*
* ALGO = 1 (default) uses empirical kernel map to achieve sparse model
* ALGO = 2 standard with the the covariane matrix
*
* **Optional Config keys**:
*
* - \ref LAMBDA = 0 -> use OLS
* - \ref LAMBDA > 0 -> manually set to given value
* - \ref LAMBDA = -1 -> use evidence approximation to find \f$\lambda\f$
* - \ref SBASIS N -> Use N basis functions when nonlinear K is used
* *
* @tparam K DM_Kern_Base child, Kernel function * @tparam K DM_Kern_Base child, Kernel function
*/ */
...@@ -49,21 +42,20 @@ template ...@@ -49,21 +42,20 @@ template
<class K=DM_Function_Base&> <class K=DM_Function_Base&>
class M_KRR: public M_Tadah_Base, class M_KRR: public M_Tadah_Base,
public M_KRR_Train<K> public M_KRR_Train<K>
//public M_KRR_Predict<K>
{ {
public: public:
/** This constructor will preapare this object for either training /**
* or prediction (if potential is provides as a Config) * @brief Initializes for training or prediction using a configuration.
* *
* Usage example: * **Example**:
* * \code{.cpp}
* \code{.cpp} * Config config("Config");
* Config config("Config"); * M_KRR<Kern_Linear> krr(config);
* M_KRR<Kern_Linear> krr(config); * \endcode
* \endcode *
* * @param c Configuration object.
*/ */
M_KRR(Config &c): M_KRR(Config &c):
M_KRR_Train<K>(c), M_KRR_Train<K>(c),
...@@ -73,18 +65,12 @@ public: ...@@ -73,18 +65,12 @@ public:
norm = Normaliser(c); norm = Normaliser(c);
} }
/** This constructor will preapare this object for either training /**
* or prediction (if potential is provides as a Config) * @brief Initializes for training or prediction using a kernel and configuration.
* *
* Usage example: * @param kernel Kernel function.
* @param c Configuration object.
* \code{.cpp} */
* Config config("Config");
* Kern_Linear kernel(config);
* M_KRR<> krr(kernel, config);
* \endcode
*
*/
M_KRR(K &kernel, Config &c): M_KRR(K &kernel, Config &c):
M_KRR_Train<K>(kernel,c), M_KRR_Train<K>(kernel,c),
basis(c), basis(c),
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment