Skip to content

Commit 9aae748

Browse files
authored
Documentation and examples update (#7)
1 parent 303c106 commit 9aae748

File tree

4 files changed

+68
-23
lines changed

4 files changed

+68
-23
lines changed

README.md

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -64,13 +64,23 @@ The `cvmatrix` software package now also features **weigthed matrix produts** $\
6464
> # Fit on X and Y
6565
> cvm.fit(X=X, Y=Y, weights=weights)
6666
> # Compute training set XTWX and/or XTWY for each fold
67-
> for fold in cvm.folds_dict.keys():
68-
> # Get both XTWX and XTWY
69-
> training_XTX, training_XTY = cvm.training_XTX_XTY(fold)
70-
> # Get only XTWX
71-
> training_XTX = cvm.training_XTX(fold)
72-
> # Get only XTWY
73-
> training_XTY = cvm.training_XTY(fold)
67+
> for fold in cvm.folds_dict:
68+
> # Get both XTWX, XTWY, and weighted statistics
69+
> result = cvm.training_XTX_XTY(fold)
70+
> (training_XTWX, training_XTWY) = result[0]
71+
> (training_X_mean, training_X_std, training_Y_mean, training_Y_std) = result[1]
72+
>
73+
> # Get only XTWX and weighted statistics for X.
74+
> # Weighted statistics for Y are returned as None as they are not computed when
75+
> # only XTWX is requested.
76+
> result = cvm.training_XTX(fold)
77+
> training_XTWX = result[0]
78+
> (training_X_mean, training_X_std, training_Y_mean, training_Y_std) = result[1]
79+
>
80+
> # Get only XTWY and weighted statistics
81+
> result = cvm.training_XTY(fold)
82+
> training_XTWY = result[0]
83+
> (training_X_mean, training_X_std, training_Y_mean, training_Y_std) = result[1]
7484
7585
### Examples
7686
In [examples](https://github.com/Sm00thix/CVMatrix/tree/main/examples), you will find:

cvmatrix/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "2.1.0"
1+
__version__ = "2.1.0.post1"

examples/training_matrices.py

Lines changed: 49 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -51,28 +51,45 @@
5151

5252
# Compute the training set matrices for each fold.
5353
print("Training set matrices using training_XTX_XTY:")
54-
for fold in cvm.folds_dict.keys():
54+
for fold in cvm.folds_dict:
5555
# Notice that the samples associated with fold are considered part of the
5656
# validation set. The training set is then all samples not associated with this
5757
# fold.
58-
XTX, XTY = cvm.training_XTX_XTY(fold)
58+
result = cvm.training_XTX_XTY(fold)
59+
(XTWX, XTWY), (X_mean, X_std, Y_mean, Y_std) = result
5960
print(f"Fold {fold}:")
60-
print(f"Training XTWX:\n{XTX}")
61-
print(f"Training XTWY:\n{XTY}")
61+
print(f"Training XTWX:\n{XTWX}")
62+
print(f"Training XTWY:\n{XTWY}")
63+
print(f"Training weighted X mean:\n{X_mean}")
64+
print(f"Training weighted X std:\n{X_std}")
65+
print(f"Training weighted Y mean:\n{Y_mean}")
66+
print(f"Training weighted Y std:\n{Y_std}")
6267
print()
6368

6469
# We can also get only XTWX or only XTWY. However, if both XTWX and XTWY are needed,
6570
# it is more efficient to call training_XTX_XTY.
6671
print("Training set matrices using training_XTX and training_XTY:")
67-
for fold in cvm.folds_dict.keys():
68-
XTX = cvm.training_XTX(fold)
72+
for fold in cvm.folds_dict:
73+
result = cvm.training_XTX(fold)
74+
XTWX, (X_mean, X_std, Y_mean, Y_std) = result
6975
print(f"Fold {fold}:")
70-
print(f"Training XTWX:\n{XTX}")
76+
print(f"Training XTWX:\n{XTWX}")
77+
print(f"Training weighted X mean:\n{X_mean}")
78+
print(f"Training weighted X std:\n{X_std}")
79+
80+
# These two are None as they are not computed when only XTX is requested.
81+
print(f"Training weighted Y mean:\n{Y_mean}")
82+
print(f"Training weighted Y std:\n{Y_std}")
7183
print()
72-
for fold in cvm.folds_dict.keys():
73-
XTY = cvm.training_XTY(fold)
84+
for fold in cvm.folds_dict:
85+
result = cvm.training_XTY(fold)
86+
XTWY, (X_mean, X_std, Y_mean, Y_std) = result
7487
print(f"Fold {fold}:")
75-
print(f"Training XTWY:\n{XTY}")
88+
print(f"Training XTWY:\n{XTWY}")
89+
print(f"Training weighted X mean:\n{X_mean}")
90+
print(f"Training weighted X std:\n{X_std}")
91+
print(f"Training weighted Y mean:\n{Y_mean}")
92+
print(f"Training weighted Y std:\n{Y_std}")
7693
print()
7794

7895
# We can also fit on new X and Y. This will recompute the global statistics and
@@ -84,9 +101,27 @@
84101

85102
print("Fitting on new data:")
86103
cvm.fit(X, Y)
87-
for fold in cvm.folds_dict.keys():
88-
XTX, XTY = cvm.training_XTX_XTY(fold)
104+
for fold in cvm.folds_dict:
105+
result = cvm.training_XTX_XTY(fold)
106+
(XTWX, XTWY), (X_mean, X_std, Y_mean, Y_std) = result
107+
print(f"Fold {fold}:")
108+
print(f"Training XTWX:\n{XTWX}")
109+
print(f"Training XTWY:\n{XTWY}")
110+
print(f"Training weighted X mean:\n{X_mean}")
111+
print(f"Training weighted X std:\n{X_std}")
112+
print(f"Training weighted Y mean:\n{Y_mean}")
113+
print(f"Training weighted Y std:\n{Y_std}")
114+
print()
115+
116+
# We can also get the training set statistics without computing the training set
117+
# matrices. This is useful if we only need the statistics for further processing.
118+
print("Training set statistics:")
119+
for fold in cvm.folds_dict:
120+
result = cvm.training_statistics(fold)
121+
X_mean, X_std, Y_mean, Y_std = result
89122
print(f"Fold {fold}:")
90-
print(f"Training XTWX:\n{XTX}")
91-
print(f"Training XTWY:\n{XTY}")
123+
print(f"Training weighted X mean:\n{X_mean}")
124+
print(f"Training weighted X std:\n{X_std}")
125+
print(f"Training weighted Y mean:\n{Y_mean}")
126+
print(f"Training weighted Y std:\n{Y_std}")
92127
print()

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "cvmatrix"
3-
version = "2.1.0"
3+
version = "2.1.0.post1"
44
description = "Fast computation of possibly weighted and possibly centered/scaled training set kernel matrices in a cross-validation setting."
55
authors = ["Sm00thix <oleemail@icloud.com>"]
66
maintainers = ["Sm00thix <oleemail@icloud.com>"]

0 commit comments

Comments
 (0)