[Scipy-svn] r2125 - in trunk/Lib/sandbox/svm: . tests

scipy-svn at scipy.org scipy-svn at scipy.org
Tue Jul 25 08:09:45 EDT 2006


Author: fullung
Date: 2006-07-25 07:09:08 -0500 (Tue, 25 Jul 2006)
New Revision: 2125

Removed:
   trunk/Lib/sandbox/svm/tests/test_precomputed.py
Modified:
   trunk/Lib/sandbox/svm/classification.py
   trunk/Lib/sandbox/svm/predict.py
   trunk/Lib/sandbox/svm/tests/test_classification.py
Log:
Test for classification with precomputed kernel.


Modified: trunk/Lib/sandbox/svm/classification.py
===================================================================
--- trunk/Lib/sandbox/svm/classification.py	2006-07-25 10:16:53 UTC (rev 2124)
+++ trunk/Lib/sandbox/svm/classification.py	2006-07-25 12:09:08 UTC (rev 2125)
@@ -96,10 +96,11 @@
         if weights is not None:
             self.weight_labels = N.empty((len(weights),), dtype=N.intp)
             self.weights = N.empty((len(weights),), dtype=N.float64)
+            weights = weights[:]
+            weights.sort()
             for i, (label, weight) in enumerate(weights):
                 self.weight_labels[i] = label
                 self.weights[i] = weight
-
             self.param.nr_weight = len(weights)
             self.param.weight_label = \
                 cast(self.weight_labels.ctypes.data, POINTER(c_int))

Modified: trunk/Lib/sandbox/svm/predict.py
===================================================================
--- trunk/Lib/sandbox/svm/predict.py	2006-07-25 10:16:53 UTC (rev 2124)
+++ trunk/Lib/sandbox/svm/predict.py	2006-07-25 12:09:08 UTC (rev 2125)
@@ -47,6 +47,8 @@
         return v
 
     def predict_probability(self, x, n):
+        if not self.model.contents.param.probability:
+            raise ValueError, 'not a probability model'
         x = self._transform_input(x)
         xptr = cast(x.ctypes.data, POINTER(libsvm.svm_node))
         pe = N.empty((n,), dtype=N.float64)

Modified: trunk/Lib/sandbox/svm/tests/test_classification.py
===================================================================
--- trunk/Lib/sandbox/svm/tests/test_classification.py	2006-07-25 10:16:53 UTC (rev 2124)
+++ trunk/Lib/sandbox/svm/tests/test_classification.py	2006-07-25 12:09:08 UTC (rev 2125)
@@ -5,6 +5,7 @@
 from svm.classification import *
 from svm.dataset import LibSvmClassificationDataSet, LibSvmTestDataSet
 from svm.kernel import *
+from svm.predict import *
 restore_path()
 
 class test_classification(NumpyTestCase):
@@ -118,5 +119,82 @@
     def check_nu_train(self):
         pass
 
+    def _make_datasets(self):
+        labels1 = N.random.random_integers(0, 2, 100)
+        x1 = N.random.randn(len(labels1), 10)
+        labels2 = N.random.random_integers(0, 2, 10)
+        x2 = N.random.randn(len(labels2), x1.shape[1])
+        trndata1 = LibSvmClassificationDataSet(zip(labels1, x1))
+        trndata2 = LibSvmClassificationDataSet(zip(labels2, x2))
+        reflabels = N.concatenate([labels1, labels2])
+        refx = N.vstack([x1, x2])
+        trndata = LibSvmClassificationDataSet(zip(reflabels, refx))
+        testdata = LibSvmTestDataSet(refx)
+        return trndata, trndata1, trndata2, testdata
+
+    def _make_kernels(self):
+        def kernelf(x, y, dot):
+            return dot(x, y)
+        def kernelg(x, y, dot):
+            return -dot(x, y)
+        kernels = [LinearKernel()]
+        #kernels += [RBFKernel(gamma)
+        #            for gamma in [-0.1, 0.2, 0.3]]
+        #kernels += [PolynomialKernel(degree, gamma, coef0)
+        #            for degree, gamma, coef0 in
+        #            [(1, 0.1, 0.0), (2, -0.2, 1.3), (3, 0.3, -0.3)]]
+        #kernels += [SigmoidKernel(gamma, coef0)
+        #            for gamma, coef0 in [(0.2, -0.5), (-0.5, 1.5)]]
+        #kernels += [CustomKernel(f) for f in [kernelf, kernelg]]
+        return kernels
+
+    def check_all(self):
+        trndata, trndata1, trndata2, testdata = self._make_datasets()
+        kernels = self._make_kernels()
+        weights = [(0, 2.0), (1, 5.0), (2, 3.0)]
+        for kernel in kernels:
+            pctrndata1 = trndata1.precompute(kernel)
+            pctrndata = pctrndata1.combine(trndata2)
+            models = [
+                LibSvmCClassificationModel(kernel, 2.0, weights),
+                LibSvmNuClassificationModel(kernel, 0.3, weights)
+                ]
+            fitargs = []
+            # CustomKernel needs a precomputed dataset
+            if not isinstance(kernel, CustomKernel):
+                fitargs += [
+                    (trndata, LibSvmPredictor),
+                    #(trndata, LibSvmPythonPredictor),
+                    ]
+            fitargs += [
+                (pctrndata, LibSvmPredictor),
+                #(pctrndata, LibSvmPythonPredictor)
+                ]
+
+            for model in models:
+                refresults = model.fit(*fitargs[0])
+                refrho = refresults.rho
+                refp = refresults.predict(testdata)
+                refv = refresults.predict_values(testdata)
+                refpp = refresults.predict_probability(testdata)
+                for args in fitargs[1:]:
+                    results = model.fit(*args)
+                    assert_array_almost_equal(results.rho, refrho)
+                    p = results.predict(testdata)
+                    assert_array_almost_equal(refp, p)
+                    v = results.predict_values(testdata)
+                    for v, refv in zip(v, refv):
+                        for key, value in refv.iteritems():
+                            self.assertAlmostEqual(v[key], value)
+                    try:
+                        pp = results.predict_probability(testdata)
+                        # XXX there are slight differences between
+                        # precomputed and normal kernels here
+                        #for (lbl, p), (reflbl, refp) in zip(pp, refpp):
+                        #    self.assertEqual(lbl, reflbl)
+                        #    assert_array_almost_equal(p, refp)
+                    except NotImplementedError:
+                        self.assert_(fitargs[-1] is LibSvmPythonPredictor)
+
 if __name__ == '__main__':
     NumpyTest().run()

Deleted: trunk/Lib/sandbox/svm/tests/test_precomputed.py
===================================================================
--- trunk/Lib/sandbox/svm/tests/test_precomputed.py	2006-07-25 10:16:53 UTC (rev 2124)
+++ trunk/Lib/sandbox/svm/tests/test_precomputed.py	2006-07-25 12:09:08 UTC (rev 2125)
@@ -1,97 +0,0 @@
-from numpy.testing import *
-import numpy as N
-
-set_local_path('../..')
-from svm.classification import *
-from svm.dataset import *
-from svm.kernel import *
-from svm.predict import *
-from svm.regression import *
-restore_path()
-
-class test_precomputed(NumpyTestCase):
-    def xcheck_precomputed_classification(self):
-        ModelType = LibSvmCClassificationModel
-        kernel = LinearKernel()
-
-        labels1 = ([0] * 10) + ([1] * 10) + ([2] * 10)
-        x1 = N.random.randn(len(labels1), 10)
-        data1 = LibSvmClassificationDataSet(zip(labels1, x1))
-        pcdata1 = data1.precompute(kernel)
-
-        labels2 = ([0] * 5) + ([1] * 5) + ([2] * 5)
-        x2 = N.random.randn(len(labels2), x1.shape[1])
-        data2 = LibSvmClassificationDataSet(zip(labels2, x2))
-
-        pcdata12 = pcdata1.combine(data2)
-        model = LibSvmCClassificationModel(kernel)
-        results = model.fit(pcdata12)
-
-        reflabels = labels1 + labels2
-        refx = N.vstack([x1, x2])
-        refdata = LibSvmClassificationDataSet(zip(reflabels, refx))
-        model = ModelType(kernel)
-        refresults = model.fit(refdata)
-
-        assert_array_almost_equal(results.rho, refresults.rho)
-        assert_array_almost_equal(results.sv_coef, refresults.sv_coef)
-
-        testdata = LibSvmTestDataSet(refx)
-        p = results.predict(testdata)
-        refp = refresults.predict(testdata)
-        assert_array_almost_equal(p, refp)
-
-        pv = results.predict_values(testdata)
-        refpv = refresults.predict_values(testdata)
-        for v, refv in zip(pv, refpv):
-            for key, value in refv.iteritems():
-                self.assertAlmostEqual(v[key], value)
-
-        pp = results.predict_probability(testdata)
-        refpp = refresults.predict_probability(testdata)
-        for (lbl, p), (reflbl, refp) in zip(pp, refpp):
-            self.assertEqual(lbl, reflbl)
-            assert_array_almost_equal(p, refp)
-
-    def check_precomputed_regression(self):
-        ModelType = LibSvmEpsilonRegressionModel
-
-        kernel = LinearKernel()
-
-        # this dataset remains constant
-        y1 = N.random.randn(50)
-        x1 = N.random.randn(len(y1), 10)
-        data1 = LibSvmRegressionDataSet(zip(y1, x1))
-        pcdata1 = data1.precompute(kernel)
-
-        # in a typical problem, this dataset would be smaller than the
-        # part that remains constant and would differ for each model
-        y2 = N.random.randn(5)
-        x2 = N.random.randn(len(y2), x1.shape[1])
-        data2 = LibSvmRegressionDataSet(zip(y2, x2))
-
-        pcdata12 = pcdata1.combine(data2)
-        model = LibSvmEpsilonRegressionModel(kernel)
-        results = model.fit(pcdata12, LibSvmPredictor)
-
-        # reference model, calculated without involving the
-        # precomputed Gram matrix
-        refy = N.concatenate([y1, y2])
-        refx = N.vstack([x1, x2])
-        refdata = LibSvmRegressionDataSet(zip(refy, refx))
-        model = ModelType(kernel)
-        refresults = model.fit(refdata, LibSvmPredictor)
-
-        self.assertAlmostEqual(results.rho, refresults.rho)
-        assert_array_almost_equal(results.sv_coef, refresults.sv_coef)
-
-        # XXX sigmas don't match exactly. need to find out why.
-        #self.assertAlmostEqual(results.sigma, refresults.sigma)
-
-        testdata = LibSvmTestDataSet(refx)
-        p = results.predict(testdata)
-        refp = refresults.predict(testdata)
-        assert_array_almost_equal(p, refp)
-
-if __name__ == '__main__':
-    NumpyTest().run()




More information about the Scipy-svn mailing list