Commit 2f7a313e authored by Philipp  Trunschke's avatar Philipp Trunschke

fix import errors

parent 7e9d8462
import sys; sys.path.insert(0, '.')
import numpy as np
import xerus as xe
from functools import partial
from basis import HermitePolynomials, Generic, scipy_integral, gramian
from samplers import CMDensity, CMSampler, CMWeights, CartesianProductSampler, test_CMSamples, test_CMWeights, approx_quantiles, gaussian, constant
from measures import BasisMeasure, MeasurementList, IdentityMeasure
from measurement_utils.bases import HermitePolynomials, Generic, scipy_integral, gramian
from measurement_utils.samplers import CMDensity, CMSampler, CMWeights, CartesianProductSampler, test_CMSamples, test_CMWeights, approx_quantiles, gaussian, constant
from measurement_utils.measures import BasisMeasure, MeasurementList, IdentityMeasure
# the function to approximate
# from functions import easy as fnc
......@@ -17,7 +16,6 @@ n_test_samples = 10000
def rejection_sampler(density, domain):
from samplers import interpolate, scan_AffineSampler, RejectionSampler
nodes = interpolate(density, domain, eps=1e-1)
sampler_1d = scan_AffineSampler(nodes, density)
return RejectionSampler(sampler_1d.domain, density, sampler_1d)
......@@ -61,7 +59,7 @@ for e, sampler_1d in enumerate([cm_sampler_1d, test_sampler_1d]):
ml = MeasurementList(measures)
tensor = xe.Tensor.from_ndarray
tensor = lambda arr: xe.Tensor.from_buffer(np.ascontiguousarray(arr))
meas = ml(nodes.T) # input shape: order, n_samples
meas = np.moveaxis(meas, 0, 1) # redundant with new xe interf.
meas = [[tensor(cmp_m) for cmp_m in m] for m in meas] # redundant with new xe interf.
......@@ -77,7 +75,7 @@ for e, sampler_1d in enumerate([cm_sampler_1d, test_sampler_1d]):
ml = MeasurementList([IdentityMeasure((1, 1)), *ml.measures])
test_vals = ml.evaluate(reco, test_pts)
test_vals = [val.to_ndarray() for val in test_vals] # redundant with new xe interf.
test_vals = [np.array(val) for val in test_vals] # redundant with new xe interf.
ref_vals = fnc(test_nodes.T)
error = np.linalg.norm(test_vals - ref_vals, axis=1)**2
......
import sys; sys.path.insert(0, '.')
import numpy as np
import xerus as xe
from basis import HermitePolynomials, Generic
from samplers import CMDensity, CMSampler, CMWeights, CartesianProductSampler, test_CMSamples, test_CMWeights, approx_quantiles, gaussian
from measures import BasisMeasure, MeasurementList, IdentityMeasure
from measurement_utils.bases import HermitePolynomials, Generic
from measurement_utils.samplers import CMDensity, CMSampler, CMWeights, CartesianProductSampler, test_CMSamples, test_CMWeights, approx_quantiles, gaussian, interpolate, scan_AffineSampler, RejectionSampler
from measurement_utils.measures import BasisMeasure, MeasurementList, IdentityMeasure
# the function to approximate
# from functions import easy as fnc
......@@ -16,7 +15,6 @@ n_test_samples = 10000
def rejection_sampler(density, domain):
from samplers import interpolate, scan_AffineSampler, RejectionSampler
nodes = interpolate(density, domain, eps=1e-1)
sampler_1d = scan_AffineSampler(nodes, density)
return RejectionSampler(sampler_1d.domain, density, sampler_1d)
......@@ -57,7 +55,7 @@ for e, sampler_1d in enumerate([cm_sampler_1d, test_sampler_1d]):
ml = MeasurementList(measures)
tensor = xe.Tensor.from_ndarray
tensor = lambda arr: xe.Tensor.from_buffer(np.ascontiguousarray(arr))
meas = ml(nodes.T) # input shape: order, n_samples
meas = np.moveaxis(meas, 0, 1) # redundant with new xe interf.
meas = [[tensor(cmp_m) for cmp_m in m] for m in meas] # redundant with new xe interf.
......@@ -73,7 +71,7 @@ for e, sampler_1d in enumerate([cm_sampler_1d, test_sampler_1d]):
ml = MeasurementList([IdentityMeasure((1, 1)), *ml.measures])
test_vals = ml.evaluate(reco, test_pts)
test_vals = [val.to_ndarray() for val in test_vals] # redundant with new xe interf.
test_vals = [np.array(val) for val in test_vals] # redundant with new xe interf.
ref_vals = fnc(test_nodes.T)
error = np.linalg.norm(test_vals - ref_vals, axis=1)**2
......
import sys; sys.path.insert(0, '.')
import numpy as np
import xerus as xe
from scipy.optimize import bisect
from basis import HermitePolynomials
from samplers import Uniform, CMDensity, CMSampler, CMWeights, CartesianProductSampler, test_CMSamples, test_CMWeights, approx_quantiles, constant, gaussian
from measures import BasisMeasure, MeasurementList, IdentityMeasure
from measurement_utils.bases import HermitePolynomials
from measurement_utils.samplers import Uniform, CMDensity, CMSampler, CMWeights, CartesianProductSampler, test_CMSamples, test_CMWeights, approx_quantiles, constant, gaussian, interpolate, scan_AffineSampler, RejectionSampler
from measurement_utils.measures import BasisMeasure, MeasurementList, IdentityMeasure
# the function to approximate
# from functions import easy as fnc
......@@ -17,7 +16,6 @@ n_test_samples = 10000
def rejection_sampler(density, domain):
from samplers import interpolate, scan_AffineSampler, RejectionSampler
nodes = interpolate(density, domain, eps=1e-1)
sampler_1d = scan_AffineSampler(nodes, density)
return RejectionSampler(sampler_1d.domain, density, sampler_1d)
......@@ -62,7 +60,7 @@ for e, sampler_1d in enumerate([cm_sampler_1d, test_sampler_1d]):
ml = MeasurementList(measures)
tensor = xe.Tensor.from_ndarray
tensor = lambda arr: xe.Tensor.from_buffer(np.ascontiguousarray(arr))
meas = ml(nodes.T) # input shape: order, n_samples
meas = np.moveaxis(meas, 0, 1) # redundant with new xe interf.
meas = [[tensor(cmp_m) for cmp_m in m] for m in meas] # redundant with new xe interf.
......@@ -78,7 +76,7 @@ for e, sampler_1d in enumerate([cm_sampler_1d, test_sampler_1d]):
ml = MeasurementList([IdentityMeasure((1, 1)), *ml.measures])
test_vals = ml.evaluate(reco, test_pts)
test_vals = [val.to_ndarray() for val in test_vals] # redundant with new xe interf.
test_vals = [np.array(val) for val in test_vals] # redundant with new xe interf.
ref_vals = fnc(test_nodes.T)
error = np.linalg.norm(test_vals - ref_vals, axis=1)**2
......
import sys; sys.path.insert(0, '.')
import numpy as np
import xerus as xe
from basis import HermitePolynomials
from samplers import Uniform, CMDensity, CMSampler, CMWeights, CartesianProductSampler, test_CMSamples, test_CMWeights, approx_quantiles, constant, gaussian
from measures import BasisMeasure, MeasurementList, IdentityMeasure
from measurement_utils.bases import HermitePolynomials
from measurement_utils.samplers import Uniform, CMDensity, CMSampler, CMWeights, CartesianProductSampler, test_CMSamples, test_CMWeights, approx_quantiles, constant, gaussian, interpolate, scan_AffineSampler, RejectionSampler
from measurement_utils.measures import BasisMeasure, MeasurementList, IdentityMeasure
# the function to approximate
# from functions import easy as fnc
......@@ -16,7 +15,6 @@ n_test_samples = 10000
def rejection_sampler(density, domain):
from samplers import interpolate, scan_AffineSampler, RejectionSampler
nodes = interpolate(density, domain, eps=1e-1)
sampler_1d = scan_AffineSampler(nodes, density)
return RejectionSampler(sampler_1d.domain, density, sampler_1d)
......@@ -49,7 +47,7 @@ for e, sampler_1d in enumerate([cm_sampler_1d, test_sampler_1d]):
ml = MeasurementList(measures)
tensor = xe.Tensor.from_ndarray
tensor = lambda arr: xe.Tensor.from_buffer(np.ascontiguousarray(arr))
meas = ml(nodes.T) # input shape: order, n_samples
meas = np.moveaxis(meas, 0, 1) # redundant with new xe interf.
meas = [[tensor(cmp_m) for cmp_m in m] for m in meas] # redundant with new xe interf.
......@@ -65,7 +63,7 @@ for e, sampler_1d in enumerate([cm_sampler_1d, test_sampler_1d]):
ml = MeasurementList([IdentityMeasure((1, 1)), *ml.measures])
test_vals = ml.evaluate(reco, test_pts)
test_vals = [val.to_ndarray() for val in test_vals] # redundant with new xe interf.
test_vals = [np.array(val) for val in test_vals] # redundant with new xe interf.
ref_vals = fnc(test_nodes.T)
error = np.linalg.norm(test_vals - ref_vals, axis=1)**2
......
import sys; sys.path.insert(0, '.')
import numpy as np
import xerus as xe
from basis import LegendrePolynomials
from samplers import Uniform, CMDensity, CMSampler, CMWeights, CartesianProductSampler, test_CMSamples, test_CMWeights, constant, gaussian
from measures import BasisMeasure, MeasurementList, IdentityMeasure
from measurement_utils.bases import LegendrePolynomials
from measurement_utils.samplers import Uniform, CMDensity, CMSampler, CMWeights, CartesianProductSampler, test_CMSamples, test_CMWeights, constant, gaussian, interpolate, scan_AffineSampler, RejectionSampler
from measurement_utils.measures import BasisMeasure, MeasurementList, IdentityMeasure
# the function to approximate
# from functions import easy as fnc
......@@ -16,7 +15,6 @@ n_test_samples = 10000
def rejection_sampler(density, domain):
from samplers import interpolate, scan_AffineSampler, RejectionSampler
nodes = interpolate(density, domain, eps=1e-1)
sampler_1d = scan_AffineSampler(nodes, density)
return RejectionSampler(sampler_1d.domain, density, sampler_1d)
......@@ -47,7 +45,7 @@ for e, sampler_1d in enumerate([cm_sampler_1d, test_sampler_1d]):
ml = MeasurementList(measures)
tensor = xe.Tensor.from_ndarray
tensor = lambda arr: xe.Tensor.from_buffer(np.ascontiguousarray(arr))
meas = ml(nodes.T) # input shape: order, n_samples
meas = np.moveaxis(meas, 0, 1) # redundant with new xe interf.
meas = [[tensor(cmp_m) for cmp_m in m] for m in meas] # redundant with new xe interf.
......@@ -63,7 +61,7 @@ for e, sampler_1d in enumerate([cm_sampler_1d, test_sampler_1d]):
ml = MeasurementList([IdentityMeasure((1, 1)), *ml.measures])
test_vals = ml.evaluate(reco, test_pts)
test_vals = [val.to_ndarray() for val in test_vals] # redundant with new xe interf.
test_vals = [np.array(val) for val in test_vals] # redundant with new xe interf.
ref_vals = fnc(test_nodes.T)
error = np.linalg.norm(test_vals - ref_vals, axis=1)**2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment