prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
from .coordsys import CoordSys, CoordTrans
import nitrogen.autodiff.forward as adf
import nitrogen.dfun as dfun
import numpy as np
__all__ = ['Valence3','CartesianN','LinearTrans','Polar','Cylindrical',
'Spherical']
class Valence3(CoordSys):
"""
A triatomic valence coordinate system.
The coordinates are :math:`r_1`, :math:`r_2`, and :math:`\\theta`.
See Notes for embedding conventions.
If `supplementary` then :math:`\\theta \\leftarrow \\pi - \\theta` is
used.
"""
def __init__(self, name = 'Triatomic valence', angle = 'rad', supplementary = False,
embedding_mode = 0):
"""
Create a new Valence3 object.
Parameters
----------
name : str, optional
The coordinate system name. The default is 'Triatomic valence'.
angle : {'rad', 'deg'}, optional
The degree units. The default is radians ('rad').
supplementary : bool, optional
If True, then the angle supplement is used. The default is False.
embedding_mode : int, optional
Select the frame embedding convention. The default is 0. See
Notes for details.
Notes
-----
For `embedding_mode` = 0, the Cartesian coordinates are
.. math::
X_0 &= (0, 0, -r_1) \\\\
X_1 &= (0, 0, 0) \\\\
X_2 &= (0, r_2 \\sin \\theta, -r_2 \\cos\\theta)
For `embedding_mode` = 1, the Cartesian coordinates are
.. math::
X_0 &= (r_1 \\cos \\theta/2, 0, r_1 \\sin \\theta/2) \\\\
X_1 &= (0, 0, 0) \\\\
X_2 &= (r_2 \\cos \\theta/2, 0, -r_2 \\sin \\theta/2)
"""
super().__init__(self._csv3_q2x, nQ = 3,
nX = 9, name = name,
Qstr = ['r1', 'r2', 'theta'],
maxderiv = None, isatomic = True,
zlevel = None)
if angle == 'rad' or angle == 'deg':
self.angle = angle
else:
raise ValueError('angle must be rad or deg')
if not embedding_mode in [0,1]:
raise ValueError('unexpected embedding_mode')
self.supplementary = supplementary
self.embedding_mode = embedding_mode
def _csv3_q2x(self, Q, deriv = 0, out = None, var = None):
"""
Triatomic valence coordinate system Q2X instance method.
See :meth:`CoordSys.Q2X` for details.
Parameters
----------
Q : ndarray
Shape (self.nQ, ...)
deriv, out, var :
See :meth:`CoordSys.Q2X` for details.
Returns
-------
out : ndarray
Shape (nd, self.nX, ...)
"""
natoms = 3
base_shape = Q.shape[1:]
if var is None:
var = [0, 1, 2] # Calculate derivatives for all Q
nvar = len(var)
# nd = adf.nck(deriv + nvar, min(deriv, nvar)) # The number of derivatives
nd = dfun.nderiv(deriv, nvar)
# Create adf symbols/constants for each coordinate
q = []
for i in range(self.nQ):
if i in var: # Derivatives requested for this variable
q.append(adf.sym(Q[i], var.index(i), deriv, nvar))
else: # Derivatives not requested, treat as constant
q.append(adf.const(Q[i], deriv, nvar))
# q = r1, r2, theta
if out is None:
out = np.ndarray( (nd, 3*natoms) + base_shape, dtype = Q.dtype)
out.fill(0) # Initialize out to 0
# Calculate Cartesian coordinates
if self.angle == 'deg':
q[2] = (np.pi / 180.0) * q[2]
# q[2] is now in radians
if self.supplementary:
q[2] = np.pi - q[2] # theta <-- pi - theta
if self.embedding_mode == 0:
|
np.copyto(out[:,2], (-q[0]).d )
|
numpy.copyto
|
import os
import numpy as np
import pandas as pd
from lingam.longitudinal_lingam import LongitudinalLiNGAM
def test_fit_success():
# causal direction: x0 --> x1 --> x3
x0 = np.random.uniform(size=1000)
x1 = 0.7*x0 + np.random.uniform(size=1000)
x2 = np.random.uniform(size=1000)
x3 = 0.3*x1 + np.random.uniform(size=1000)
X1 = pd.DataFrame(np.array([x0, x1, x2, x3]).T,
columns=['x0', 'x1', 'x2', 'x3'])
x0 = np.random.uniform(size=1000) + 0.5*x2
x1 = 0.3*x0 + np.random.uniform(size=1000)
x2 = np.random.uniform(size=1000) + 0.5*x3
x3 = 0.7*x1 + np.random.uniform(size=1000)
X2 = pd.DataFrame(np.array([x0, x1, x2, x3]).T,
columns=['x0', 'x1', 'x2', 'x3'])
x0 = np.random.uniform(size=1000) + 0.5*x2
x1 = 0.5*x0 + np.random.uniform(size=1000)
x2 = np.random.uniform(size=1000) + 0.5*x3
x3 = 0.5*x1 + np.random.uniform(size=1000)
X3 = pd.DataFrame(np.array([x0, x1, x2, x3]).T,
columns=['x0', 'x1', 'x2', 'x3'])
X_list = np.empty((3, 1000, 4))
X_list[0] = X1
X_list[1] = X2
X_list[2] = X3
model = LongitudinalLiNGAM()
model.fit(X_list)
# check causal ordering
cos = model.causal_orders_
for co in cos[1:]:
assert co.index(0) < co.index(1) < co.index(3)
# check B(t,t)
B_t = model.adjacency_matrices_[1, 0] # B(1,1)
assert B_t[1, 0] > 0.2 and B_t[3, 1] > 0.6
B_t[1, 0] = 0.0
B_t[3, 1] = 0.0
assert np.sum(B_t) < 0.1
B_t = model.adjacency_matrices_[2, 0] # B(2,2)
assert B_t[1, 0] > 0.4 and B_t[3, 1] > 0.4
B_t[1, 0] = 0.0
B_t[3, 1] = 0.0
assert np.sum(B_t) < 0.1
# check B(t,t-τ)
B_tau = model.adjacency_matrices_[1, 1] # B(1,0)
assert B_tau[0, 2] > 0.4 and B_tau[2, 3] > 0.4
B_tau = model.adjacency_matrices_[1, 1] # B(2,1)
assert B_tau[0, 2] > 0.4 and B_tau[2, 3] > 0.4
# fit by list
X_list = [X1, X2, X3]
model = LongitudinalLiNGAM()
model.fit(X_list)
def test_fit_invalid_data():
# Different features
x0 = np.random.uniform(size=1000)
x1 = 0.7*x0 + np.random.uniform(size=1000)
x2 = np.random.uniform(size=1000)
x3 = 0.3*x1 + np.random.uniform(size=1000)
X1 = pd.DataFrame(np.array([x0, x1, x2, x3]).T,
columns=['x0', 'x1', 'x2', 'x3'])
x0 = np.random.uniform(size=1000) + 0.5*x2
x1 = 0.3*x0 + np.random.uniform(size=1000)
x2 = np.random.uniform(size=1000) + 0.5*x3
X2 = pd.DataFrame(np.array([x0, x1, x2]).T,
columns=['x0', 'x1', 'x2'])
x0 = np.random.uniform(size=1000) + 0.5*x2
x1 = 0.5*x0 + np.random.uniform(size=1000)
x2 = np.random.uniform(size=1000) + 0.5*x3
x3 = 0.5*x1 + np.random.uniform(size=1000)
X3 = pd.DataFrame(np.array([x0, x1, x2, x3]).T,
columns=['x0', 'x1', 'x2', 'x3'])
X_list = [X1, X2, X3]
try:
model = LongitudinalLiNGAM()
model.fit(X_list)
except ValueError:
pass
else:
raise AssertionError
# Not list data
X = 1
try:
model = LongitudinalLiNGAM()
model.fit(X)
except ValueError:
pass
else:
raise AssertionError
# Include not-array data
x0 = np.random.uniform(size=1000)
x1 = 2.0*x0 + np.random.uniform(size=1000)
x2 = np.random.uniform(size=1000)
x3 = 4.0*x1 + np.random.uniform(size=1000)
X1 = pd.DataFrame(
|
np.array([x0, x1, x2, x3])
|
numpy.array
|
from astropy.io import fits
import copy
import logging
import numpy as np
import os
from os import path
import shutil
from autoarray.structures.arrays.abstract_array import Header
from autoarray.structures.arrays.two_d.array_2d import Array2D
from autoarray.layout.layout import Layout2D
from autoarray.layout.region import Region2D
from autoarray import exc
from autoarray.structures.arrays.two_d import array_2d_util
from autoarray.layout import layout_util
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel("INFO")
def fits_hdu_via_quadrant_letter_from(quadrant_letter):
if quadrant_letter == "D" or quadrant_letter == "C":
return 1
elif quadrant_letter == "B" or quadrant_letter == "A":
return 4
else:
raise exc.FrameException("Quadrant letter for FrameACS must be A, B, C or D.")
def array_eps_to_counts(array_eps, bscale, bzero):
if bscale is None:
raise exc.FrameException(
"Cannot convert a Frame2D to units COUNTS without a bscale attribute (bscale = None)."
)
return (array_eps - bzero) / bscale
class Array2DACS(Array2D):
"""
An ACS array consists of four quadrants ('A', 'B', 'C', 'D') which have the following layout (which are described
at the following STScI
link https://github.com/spacetelescope/hstcal/blob/master/pkg/acs/calacs/acscte/dopcte-gen2.c#L418).
<--------S----------- ---------S----------->
[] [========= 2 =========] [========= 3 =========] [] /\
/ [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] / |
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | | Direction arctic
P [xxxxxxxxx B/C xxxxxxx] [xxxxxxxxx A/D xxxxxxx] P | clocks an image
| [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] | | without any rotation
\/ [xxxxxxxxxxxxxxxxxxxxx] [xxxxxxxxxxxxxxxxxxxxx] \/ | (e.g. towards row 0
| of the NumPy arrays)
For a ACS .fits file:
- The images contained in hdu 1 correspond to quadrants B (left) and A (right).
- The images contained in hdu 4 correspond to quadrants C (left) and D (right).
"""
@classmethod
def from_fits(cls, file_path, quadrant_letter):
"""
Use the input .fits file and quadrant letter to extract the quadrant from the full CCD, perform
the rotations required to give correct arctic clocking and convert the image from units of COUNTS / CPS to
ELECTRONS.
See the docstring of the `FrameACS` class for a complete description of the HST FPA, quadrants and
rotations.
Also see https://github.com/spacetelescope/hstcal/blob/master/pkg/acs/calacs/acscte/dopcte-gen2.c#L418
"""
hdu = fits_hdu_via_quadrant_letter_from(quadrant_letter=quadrant_letter)
array = array_2d_util.numpy_array_2d_via_fits_from(file_path=file_path, hdu=hdu)
return cls.from_ccd(array_electrons=array, quadrant_letter=quadrant_letter)
@classmethod
def from_ccd(
cls,
array_electrons,
quadrant_letter,
header=None,
bias_subtract_via_prescan=False,
bias=None,
):
"""
Using an input array of both quadrants in electrons, use the quadrant letter to extract the quadrant from the
full CCD and perform the rotations required to give correct arctic.
See the docstring of the `FrameACS` class for a complete description of the HST FPA, quadrants and
rotations.
Also see https://github.com/spacetelescope/hstcal/blob/master/pkg/acs/calacs/acscte/dopcte-gen2.c#L418
"""
if quadrant_letter == "A":
array_electrons = array_electrons[0:2068, 0:2072]
roe_corner = (1, 0)
use_flipud = True
if bias is not None:
bias = bias[0:2068, 0:2072]
elif quadrant_letter == "B":
array_electrons = array_electrons[0:2068, 2072:4144]
roe_corner = (1, 1)
use_flipud = True
if bias is not None:
bias = bias[0:2068, 2072:4144]
elif quadrant_letter == "C":
array_electrons = array_electrons[0:2068, 0:2072]
roe_corner = (1, 0)
use_flipud = False
if bias is not None:
bias = bias[0:2068, 0:2072]
elif quadrant_letter == "D":
array_electrons = array_electrons[0:2068, 2072:4144]
roe_corner = (1, 1)
use_flipud = False
if bias is not None:
bias = bias[0:2068, 2072:4144]
else:
raise exc.FrameException(
"Quadrant letter for FrameACS must be A, B, C or D."
)
return cls.quadrant_a(
array_electrons=array_electrons,
header=header,
roe_corner=roe_corner,
use_flipud=use_flipud,
bias_subtract_via_prescan=bias_subtract_via_prescan,
bias=bias,
)
@classmethod
def quadrant_a(
cls,
array_electrons,
roe_corner,
use_flipud,
header=None,
bias_subtract_via_prescan=False,
bias=None,
):
"""
Use an input array of the left quadrant in electrons and perform the rotations required to give correct
arctic clocking.
See the docstring of the `FrameACS` class for a complete description of the HST FPA, quadrants and
rotations.
Also see https://github.com/spacetelescope/hstcal/blob/master/pkg/acs/calacs/acscte/dopcte-gen2.c#L418
"""
array_electrons = layout_util.rotate_array_via_roe_corner_from(
array=array_electrons, roe_corner=roe_corner
)
if use_flipud:
array_electrons = np.flipud(array_electrons)
if bias_subtract_via_prescan:
bias_serial_prescan_value = prescan_fitted_bias_column(
array_electrons[:, 18:24]
)
array_electrons -= bias_serial_prescan_value
header.bias_serial_prescan_column = bias_serial_prescan_value
if bias is not None:
bias = layout_util.rotate_array_via_roe_corner_from(
array=bias, roe_corner=roe_corner
)
if use_flipud:
bias = np.flipud(bias)
array_electrons -= bias
header.bias = Array2DACS.manual_native(array=bias, pixel_scales=0.05)
return cls.manual(array=array_electrons, header=header, pixel_scales=0.05)
@classmethod
def quadrant_b(
cls, array_electrons, header=None, bias_subtract_via_prescan=False, bias=None
):
"""
Use an input array of the right quadrant in electrons and perform the rotations required to give correct
arctic clocking.
See the docstring of the `FrameACS` class for a complete description of the HST FPA, quadrants and
rotations.
Also see https://github.com/spacetelescope/hstcal/blob/master/pkg/acs/calacs/acscte/dopcte-gen2.c#L418
"""
array_electrons = layout_util.rotate_array_via_roe_corner_from(
array=array_electrons, roe_corner=(1, 1)
)
array_electrons = np.flipud(array_electrons)
if bias_subtract_via_prescan:
bias_serial_prescan_value = prescan_fitted_bias_column(
array_electrons[:, 18:24]
)
array_electrons -= bias_serial_prescan_value
header.bias_serial_prescan_column = bias_serial_prescan_value
if bias is not None:
bias = layout_util.rotate_array_via_roe_corner_from(
array=bias, roe_corner=(1, 1)
)
bias = np.flipud(bias)
array_electrons -= bias
header.bias = Array2DACS.manual_native(array=bias, pixel_scales=0.05)
return cls.manual(array=array_electrons, header=header, pixel_scales=0.05)
@classmethod
def quadrant_c(
cls, array_electrons, header=None, bias_subtract_via_prescan=False, bias=None
):
"""
Use an input array of the left quadrant in electrons and perform the rotations required to give correct
arctic clocking.
See the docstring of the `FrameACS` class for a complete description of the HST FPA, quadrants and
rotations.
Also see https://github.com/spacetelescope/hstcal/blob/master/pkg/acs/calacs/acscte/dopcte-gen2.c#L418
"""
array_electrons = layout_util.rotate_array_via_roe_corner_from(
array=array_electrons, roe_corner=(1, 0)
)
if bias_subtract_via_prescan:
bias_serial_prescan_value = prescan_fitted_bias_column(
array_electrons[:, 18:24]
)
array_electrons -= bias_serial_prescan_value
header.bias_serial_prescan_column = bias_serial_prescan_value
if bias is not None:
bias = layout_util.rotate_array_via_roe_corner_from(
array=bias, roe_corner=(1, 0)
)
array_electrons -= bias
header.bias = Array2DACS.manual_native(array=bias, pixel_scales=0.05)
return cls.manual(array=array_electrons, header=header, pixel_scales=0.05)
@classmethod
def quadrant_d(
cls, array_electrons, header=None, bias_subtract_via_prescan=False, bias=None
):
"""
Use an input array of the right quadrant in electrons and perform the rotations required to give correct
arctic clocking.
See the docstring of the `FrameACS` class for a complete description of the HST FPA, quadrants and
rotations.
Also see https://github.com/spacetelescope/hstcal/blob/master/pkg/acs/calacs/acscte/dopcte-gen2.c#L418
"""
array_electrons = layout_util.rotate_array_via_roe_corner_from(
array=array_electrons, roe_corner=(1, 1)
)
if bias_subtract_via_prescan:
bias_serial_prescan_value = prescan_fitted_bias_column(
array_electrons[:, 18:24]
)
array_electrons -= bias_serial_prescan_value
header.bias_serial_prescan_column = bias_serial_prescan_value
if bias is not None:
bias = layout_util.rotate_array_via_roe_corner_from(
array=bias, roe_corner=(1, 1)
)
array_electrons -= bias
header.bias = Array2DACS.manual_native(array=bias, pixel_scales=0.05)
return cls.manual(array=array_electrons, header=header, pixel_scales=0.05)
def update_fits(self, original_file_path, new_file_path):
"""
Output the array to a .fits file.
Parameters
----------
file_path : str
The path the file is output to, including the filename and the ``.fits`` extension,
e.g. '/path/to/filename.fits'
"""
new_file_dir = os.path.split(new_file_path)[0]
if not os.path.exists(new_file_dir):
os.makedirs(new_file_dir)
if not os.path.exists(new_file_path):
shutil.copy(original_file_path, new_file_path)
hdulist = fits.open(new_file_path)
hdulist[self.header.hdu].data = self.layout_2d.original_orientation_from(
array=self
)
ext_header = hdulist[4].header
bscale = ext_header["BSCALE"]
os.remove(new_file_path)
hdulist.writeto(new_file_path)
class ImageACS(Array2DACS):
"""
The layout of an ACS array and image is given in `FrameACS`.
This class handles specifically the image of an ACS observation, assuming that it contains specific
header info.
"""
@classmethod
def from_fits(
cls,
file_path,
quadrant_letter,
bias_subtract_via_bias_file=False,
bias_subtract_via_prescan=False,
bias_file_path=None,
use_calibrated_gain=True,
):
"""
Use the input .fits file and quadrant letter to extract the quadrant from the full CCD, perform
the rotations required to give correct arctic clocking and convert the image from units of COUNTS / CPS to
ELECTRONS.
See the docstring of the `FrameACS` class for a complete description of the HST FPA, quadrants and
rotations.
Also see https://github.com/spacetelescope/hstcal/blob/master/pkg/acs/calacs/acscte/dopcte-gen2.c#L418
Parameters
----------
file_path
The full path of the file that the image is loaded from, including the file name and ``.fits`` extension.
quadrant_letter
The letter of the ACS quadrant the image is extracted from and loaded.
bias_subtract_via_bias_file
If True, the corresponding bias file of the image is loaded (via the name of the file in the fits header).
bias_subtract_via_prescan
If True, the prescan on the image is used to estimate a component of bias that is subtracted from the image.
bias_file_path
If `bias_subtract_via_bias_file=True`, this overwrites the path to the bias file instead of the default
behaviour of using the .fits header.
use_calibrated_gain
If True, the calibrated gain values are used to convert from COUNTS to ELECTRONS.
"""
hdu = fits_hdu_via_quadrant_letter_from(quadrant_letter=quadrant_letter)
header_sci_obj = array_2d_util.header_obj_from(file_path=file_path, hdu=0)
header_hdu_obj = array_2d_util.header_obj_from(file_path=file_path, hdu=hdu)
header = HeaderACS(
header_sci_obj=header_sci_obj,
header_hdu_obj=header_hdu_obj,
hdu=hdu,
quadrant_letter=quadrant_letter,
)
if header.header_sci_obj["TELESCOP"] != "HST":
raise exc.ArrayException(
f"The file {file_path} does not point to a valid HST ACS dataset."
)
if header.header_sci_obj["INSTRUME"] != "ACS":
raise exc.ArrayException(
f"The file {file_path} does not point to a valid HST ACS dataset."
)
array = array_2d_util.numpy_array_2d_via_fits_from(
file_path=file_path, hdu=hdu, do_not_scale_image_data=True
)
array = header.array_original_to_electrons(
array=array, use_calibrated_gain=use_calibrated_gain
)
if bias_subtract_via_bias_file:
if bias_file_path is None:
file_dir = os.path.split(file_path)[0]
bias_file_path = path.join(file_dir, header.bias_file)
bias = array_2d_util.numpy_array_2d_via_fits_from(
file_path=bias_file_path, hdu=hdu, do_not_scale_image_data=True
)
header_sci_obj = array_2d_util.header_obj_from(
file_path=bias_file_path, hdu=0
)
header_hdu_obj = array_2d_util.header_obj_from(
file_path=bias_file_path, hdu=hdu
)
bias_header = HeaderACS(
header_sci_obj=header_sci_obj,
header_hdu_obj=header_hdu_obj,
hdu=hdu,
quadrant_letter=quadrant_letter,
)
if bias_header.original_units != "COUNTS":
raise exc.ArrayException("Cannot use bias frame not in counts.")
bias = bias * bias_header.calibrated_gain
else:
bias = None
return cls.from_ccd(
array_electrons=array,
quadrant_letter=quadrant_letter,
header=header,
bias_subtract_via_prescan=bias_subtract_via_prescan,
bias=bias,
)
class Layout2DACS(Layout2D):
@classmethod
def from_sizes(cls, roe_corner, serial_prescan_size=24, parallel_overscan_size=20):
"""
Use an input array of the left quadrant in electrons and perform the rotations required to give correct
arctic clocking.
See the docstring of the `FrameACS` class for a complete description of the HST FPA, quadrants and
rotations.
"""
parallel_overscan = Region2D(
(2068 - parallel_overscan_size, 2068, serial_prescan_size, 2072)
)
serial_prescan = Region2D((0, 2068, 0, serial_prescan_size))
return Layout2D.rotated_from_roe_corner(
roe_corner=roe_corner,
shape_native=(2068, 2072),
parallel_overscan=parallel_overscan,
serial_prescan=serial_prescan,
)
class HeaderACS(Header):
def __init__(
self,
header_sci_obj,
header_hdu_obj,
quadrant_letter=None,
hdu=None,
bias=None,
bias_serial_prescan_column=None,
):
super().__init__(header_sci_obj=header_sci_obj, header_hdu_obj=header_hdu_obj)
self.bias = bias
self.bias_serial_prescan_column = bias_serial_prescan_column
self.quadrant_letter = quadrant_letter
self.hdu = hdu
@property
def bscale(self):
return self.header_hdu_obj["BSCALE"]
@property
def bzero(self):
return self.header_hdu_obj["BZERO"]
@property
def gain(self):
return self.header_sci_obj["CCDGAIN"]
@property
def calibrated_gain(self):
if round(self.gain) == 1:
calibrated_gain = [0.99989998, 0.97210002, 1.01070000, 1.01800000]
elif round(self.gain) == 2:
calibrated_gain = [2.002, 1.945, 2.028, 1.994]
elif round(self.gain) == 4:
calibrated_gain = [4.011, 3.902, 4.074, 3.996]
else:
raise exc.ArrayException(
"Calibrated gain of ACS file does not round to 1, 2 or 4."
)
if self.quadrant_letter == "A":
return calibrated_gain[0]
elif self.quadrant_letter == "B":
return calibrated_gain[1]
elif self.quadrant_letter == "C":
return calibrated_gain[2]
elif self.quadrant_letter == "D":
return calibrated_gain[3]
@property
def original_units(self):
return self.header_hdu_obj["BUNIT"]
@property
def bias_file(self):
return self.header_sci_obj["BIASFILE"].replace("jref$", "")
def array_eps_to_counts(self, array_eps):
return array_eps_to_counts(
array_eps=array_eps, bscale=self.bscale, bzero=self.bzero
)
def array_original_to_electrons(self, array, use_calibrated_gain):
if self.original_units in "COUNTS":
array = (array * self.bscale) + self.bzero
elif self.original_units in "CPS":
array = (array * self.exposure_time * self.bscale) + self.bzero
if use_calibrated_gain:
return array * self.calibrated_gain
else:
return array * self.gain
def array_electrons_to_original(self, array, use_calibrated_gain):
if use_calibrated_gain:
array /= self.calibrated_gain
else:
array /= self.gain
if self.original_units in "COUNTS":
return (array - self.bzero) / self.bscale
elif self.original_units in "CPS":
return (array - self.bzero) / (self.exposure_time * self.bscale)
def prescan_fitted_bias_column(prescan, n_rows=2048, n_rows_ov=20):
"""
Generate a bias column to be subtracted from the main image by doing a
least squares fit to the serial prescan region.
e.g. image -= prescan_fitted_bias_column(image[18:24])
See <NAME> (2013), S9.3, p460.
Parameters
----------
prescan : [[float]]
The serial prescan part of the image. Should usually cover the full
number of rows but may skip the first few columns of the prescan to
avoid trails.
n_rows
The number of rows in the image, exculding overscan.
n_rows_ov, int
The number of overscan rows in the image.
Returns
-------
bias_column : [float]
The fitted bias to be subtracted from all image columns.
"""
n_columns_fit = prescan.shape[1]
# Flatten the multiple fitting columns to a long 1D array
# y = [y_1_1, y_2_1, ..., y_nrow_1, y_1_2, y_2_2, ..., y_nrow_ncolfit]
y = prescan[:-n_rows_ov].T.flatten()
# x = [1, 2, ..., nrow, 1, ..., nrow, 1, ..., nrow, ...]
x = np.tile(np.arange(n_rows), n_columns_fit)
# M = [[1, 1, ..., 1], [x_1, x_2, ..., x_n]].T
M = np.array([np.ones(n_rows * n_columns_fit), x]).T
# Best-fit values for y = M v
v = np.dot(np.linalg.inv(np.dot(M.T, M)), np.dot(M.T, y))
# Map to full image size for easy subtraction
bias_column = v[0] + v[1] * np.arange(n_rows + n_rows_ov)
# plt.figure()
# pixels = np.arange(n_rows + n_rows_ov)
# for i in range(n_columns_fit):
# plt.scatter(pixels, prescan[:, i])
# plt.plot(pixels, bias_column)
# plt.show()
return np.transpose([bias_column])
def output_quadrants_to_fits(
file_path: str,
quadrant_a,
quadrant_b,
quadrant_c,
quadrant_d,
header_a=None,
header_b=None,
header_c=None,
header_d=None,
overwrite: bool = False,
):
file_dir = os.path.split(file_path)[0]
if not os.path.exists(file_dir):
os.makedirs(file_dir)
if overwrite and os.path.exists(file_path):
os.remove(file_path)
array_hdu_1 = np.zeros((2068, 4144))
array_hdu_4 = np.zeros((2068, 4144))
def get_header(quadrant):
try:
return quadrant.header
except AttributeError:
raise (
"You must pass in the header of the quadrants to output them to an ACS fits file."
)
header_a = get_header(quadrant_a) if header_a is None else header_a
try:
quadrant_a = copy.copy(np.asarray(quadrant_a.native))
except AttributeError:
quadrant_a = copy.copy(np.asarray(quadrant_a))
quadrant_a = quadrant_convert_to_original(
quadrant=quadrant_a, roe_corner=(1, 0), header=header_a, use_flipud=True
)
array_hdu_4[0:2068, 0:2072] = quadrant_a
header_b = get_header(quadrant_b) if header_b is None else header_b
try:
quadrant_b = copy.copy(np.asarray(quadrant_b.native))
except AttributeError:
quadrant_b = copy.copy(np.asarray(quadrant_b))
quadrant_b = quadrant_convert_to_original(
quadrant=quadrant_b, roe_corner=(1, 1), header=header_b, use_flipud=True
)
array_hdu_4[0:2068, 2072:4144] = quadrant_b
header_c = get_header(quadrant_c) if header_c is None else header_c
try:
quadrant_c = copy.copy(np.asarray(quadrant_c.native))
except AttributeError:
quadrant_c = copy.copy(np.asarray(quadrant_c))
quadrant_c = quadrant_convert_to_original(
quadrant=quadrant_c, roe_corner=(1, 0), header=header_c, use_flipud=False
)
array_hdu_1[0:2068, 0:2072] = quadrant_c
header_d = get_header(quadrant_d) if header_d is None else header_d
try:
quadrant_d = copy.copy(np.asarray(quadrant_d.native))
except AttributeError:
quadrant_d = copy.copy(np.asarray(quadrant_d))
quadrant_d = quadrant_convert_to_original(
quadrant=quadrant_d, roe_corner=(1, 1), header=header_d, use_flipud=False
)
array_hdu_1[0:2068, 2072:4144] = quadrant_d
hdu_list = fits.HDUList()
hdu_list.append(fits.ImageHDU())
hdu_list.append(fits.ImageHDU(array_hdu_1))
hdu_list.append(fits.ImageHDU())
hdu_list.append(fits.ImageHDU())
hdu_list.append(fits.ImageHDU(array_hdu_4))
hdu_list.append(fits.ImageHDU())
def set_header(header):
header.set("cticor", "ARCTIC", "CTI CORRECTION PERFORMED USING ARCTIC")
return header
hdu_list[0].header = set_header(header_a.header_sci_obj)
hdu_list[1].header = set_header(header_c.header_hdu_obj)
hdu_list[4].header = set_header(header_a.header_hdu_obj)
hdu_list.writeto(file_path)
def quadrant_convert_to_original(
quadrant, roe_corner, header, use_flipud=False, use_calibrated_gain=True
):
if header.bias is not None:
quadrant += header.bias.native
if header.bias_serial_prescan_column is not None:
quadrant += header.bias_serial_prescan_column
quadrant = header.array_electrons_to_original(
array=quadrant, use_calibrated_gain=use_calibrated_gain
)
if use_flipud:
quadrant =
|
np.flipud(quadrant)
|
numpy.flipud
|
import collections
from typing import Any, Dict, List, Tuple, Union
import dm_env
import numpy as np
from dm_env import specs
from gym import Space
from pettingzoo.utils.conversions import ParallelEnv
from pettingzoo.utils.env import AECEnv
# Need to install typing_extensions since we support pre python 3.8
from typing_extensions import TypedDict
from mava import types
SeqTimestepDict = TypedDict(
"SeqTimestepDict",
{"timestep": dm_env.TimeStep, "action": types.Action},
)
def convert_dm_compatible_observations(
observes: Dict[str, np.ndarray],
dones: Dict[str, bool],
action_spaces: Dict[str, Space],
observation_spaces: Dict[str, Space],
env_done: bool,
possible_agents: List,
) -> types.Observation:
"""Convert Parallel observation so it's dm_env compatible.
Args:
observes (Dict[str, np.ndarray]): observations per agent.
dones (Dict[str, bool]): dones per agent.
action_spaces ( Dict[str, Space]): env action spaces.
observation_spaces ( Dict[str, Space]): env observation spaces.
env_done (bool): is env done.
possible_agents (List): possible agents in env.
Returns:
types.Observation: dm compatible observation.
"""
observations: Dict[str, types.OLT] = {}
if observes:
for agent, observation in observes.items():
if isinstance(observation, dict) and "action_mask" in observation:
legals = observation["action_mask"]
observation = observation["observation"]
else:
# TODO Handle legal actions better for continous envs,
# maybe have min and max for each action and clip the
# agents actions accordingly
legals = np.ones(
action_spaces[agent].shape,
dtype=action_spaces[agent].dtype,
)
observations[agent] = types.OLT(
observation=observation,
legal_actions=legals,
terminal=np.asarray([dones[agent]], dtype=np.float32),
)
# Handle empty observations - some envs return {} at last step.
else:
observations = {
agent: types.OLT(
observation=np.zeros(
observation_spaces[agent].shape,
dtype=observation_spaces[agent].dtype,
),
legal_actions=np.ones(
action_spaces[agent].shape,
dtype=action_spaces[agent].dtype,
),
terminal=np.asarray([env_done], dtype=np.float32),
)
for agent in possible_agents
}
return observations
def generate_zeros_from_spec(spec: specs.Array) -> np.ndarray:
return np.zeros(spec.shape, spec.dtype)
def convert_np_type(dtype: np.dtype, value: Union[int, float]) -> Union[int, float]:
return np.dtype(dtype).type(value)
def parameterized_restart(
reward: types.Reward,
discount: types.Discount,
observation: types.Observation,
) -> dm_env.TimeStep:
"""Returns a `TimeStep` with `step_type` set to `StepType.FIRST`.
Differs from dm_env.restart, since reward and discount can be set to
initial types."""
return dm_env.TimeStep(dm_env.StepType.FIRST, reward, discount, observation)
def parameterized_termination(
reward: types.Reward,
discount: types.Discount,
observation: types.Observation,
) -> dm_env.TimeStep:
"""Returns a `TimeStep` with `step_type` set to `StepType.LAST`."""
return dm_env.TimeStep(dm_env.StepType.LAST, reward, discount, observation)
"""Project single timestep to all agents."""
def broadcast_timestep_to_all_agents(
timestep: dm_env.TimeStep, possible_agents: list
) -> dm_env.TimeStep:
parallel_timestep = dm_env.TimeStep(
observation={agent: timestep.observation for agent in possible_agents},
reward={agent: timestep.reward for agent in possible_agents},
discount={agent: timestep.discount for agent in possible_agents},
step_type=timestep.step_type,
)
return parallel_timestep
"""Convert dict of seq timestep and actions to parallel"""
def convert_seq_timestep_and_actions_to_parallel(
timesteps: Dict[str, SeqTimestepDict], possible_agents: list
) -> Tuple[dict, dm_env.TimeStep]:
step_types = [timesteps[agent]["timestep"].step_type for agent in possible_agents]
assert all(
x == step_types[0] for x in step_types
), f"Step types should be identical - {step_types} "
parallel_timestep = dm_env.TimeStep(
observation={
agent: timesteps[agent]["timestep"].observation for agent in possible_agents
},
reward={
agent: timesteps[agent]["timestep"].reward for agent in possible_agents
},
discount={
agent: timesteps[agent]["timestep"].discount for agent in possible_agents
},
step_type=step_types[0],
)
parallel_actions = {agent: timesteps[agent]["action"] for agent in possible_agents}
return parallel_actions, parallel_timestep
def apply_env_wrapper_preprocessers(
environment: Any,
env_preprocess_wrappers: List,
) -> Any:
# Currently only supports PZ envs.
if isinstance(environment, ParallelEnv) or isinstance(environment, AECEnv):
if env_preprocess_wrappers and isinstance(env_preprocess_wrappers, List):
for (env_wrapper, params) in env_preprocess_wrappers:
if params:
environment = env_wrapper(environment, **params)
else:
environment = env_wrapper(environment)
return environment
class RunningStatistics:
"""Helper class to comute running statistics such as
the max, min, mean, variance and standard deviation of
a specific quantity.
"""
# The queue_size is used to estimate a moving mean and variance value.
def __init__(self, label: str, queue_size: int = 100) -> None:
self.queue: collections.deque = collections.deque(maxlen=queue_size)
self._max = -float("inf")
self._min = float("inf")
self._mean = 0.0
self._var = 0.0
self._label = label
self._raw = 0.0
def push(self, x: float) -> None:
self._raw = x
self.queue.append(x)
if x > self._max:
self._max = x
if x < self._min:
self._min = x
if len(self.queue) == 1:
self._mean = x
self._var = 0
else:
self._mean =
|
np.mean(self.queue)
|
numpy.mean
|
from typing import Optional, List
import numpy
from pyrr import Matrix44, Vector3
from vistas.core.bounds import BoundingBox
from vistas.core.graphics.object import Object3D, Intersection
class Ray:
"""
Representation of a ray in 3D space. Rays emit from an origin along a direction. Implementation inspired by mrdoob -
https://github.com/mrdoob/three.js/blob/master/src/math/Ray.js
"""
def __init__(self, origin: Optional[Vector3]=None, direction: Optional[Vector3]=None):
self.origin = origin if origin is not None else Vector3()
self.direction = direction if direction is not None else Vector3()
self.direction.normalize()
def at(self, t):
""" Retrieve a point along the ray. """
return self.direction * t + self.origin
def intersects_bbox(self, bbox: BoundingBox):
return self.intersect_bbox(bbox) is not None
def intersect_bbox(self, bbox: BoundingBox):
invdirx, invdiry, invdirz = 1 / self.direction # Any or all could evaluate to numpy.inf, handled below
if invdirx >= 0:
tmin = (bbox.min_x - self.origin.x) * invdirx
tmax = (bbox.max_x - self.origin.x) * invdirx
else:
tmin = (bbox.max_x - self.origin.x) * invdirx
tmax = (bbox.min_x - self.origin.x) * invdirx
if invdiry >= 0:
tymin = (bbox.min_y - self.origin.y) * invdiry
tymax = (bbox.max_y - self.origin.y) * invdiry
else:
tymin = (bbox.max_y - self.origin.y) * invdiry
tymax = (bbox.min_y - self.origin.y) * invdiry
if tmin > tymax or tymin > tmax:
return None
if tymin > tmin or tmin != tmin: # tmin != tmin returns false if t_min is numpy.inf
tmin = tymin
if tymax < tmax or tmax != tmax:
tmax = tymax
if invdirz >= 0:
tzmin = (bbox.min_z - self.origin.z) * invdirz
tzmax = (bbox.max_z - self.origin.z) * invdirz
else:
tzmin = (bbox.max_z - self.origin.z) * invdirz
tzmax = (bbox.min_z - self.origin.z) * invdirz
if tmin > tzmax or tzmin > tmax:
return None
if tzmin > tmin or tmin != tmin:
tmin = tzmin
if tzmax < tmax or tmax != tmax:
tmax = tzmax
# Return point closest to the ray on the positive side
if tmax < 0:
return None
return self.at(tmin if tmin >= 0 else tmax)
def intersect_triangles(self, a, b, c):
""" Determine face-level triangle intersections from this ray. """
e1 = b - a
e2 = c - a
direction = numpy.array(self.direction)
origin = numpy.array(self.origin)
eps = numpy.finfo(numpy.float32).eps
pvec = numpy.cross(direction, e2)
det =
|
numpy.sum(e1 * pvec, axis=-1)
|
numpy.sum
|
from reproduce.manipulate_dave import add_wn_frame, add_white_noise, add_wn_random
from utils import load_driving_data, load_dave_model
from utils import get_trainable_layers
from utils import save_data, load_data
from utils import generate_adversarial
from utils import preprocess_image
from coverages.idc import ImportanceDrivenCoverage
from coverages.neuron_cov import NeuronCoverage
from coverages.tkn import DeepGaugeLayerLevelCoverage
from coverages.kmn import DeepGaugePercentCoverage
from coverages.ss import SSCover
from coverages.sa import SurpriseAdequacy
import os
import random
import argparse
import datetime
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
__version__ = 0.9
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def by_indices(outs, indices):
return [[outs[i][0][indices]] for i in range(len(outs))]
def parse_arguments():
"""
Parse command line argument and construct the DNN
:return: a dictionary comprising the command-line arguments
"""
# define the program description
text = 'Coverage Analyzer for DNNs'
# initiate the parser
parser = argparse.ArgumentParser(description=text)
# add new command-line arguments
parser.add_argument("-V", "--version", help="show program version",
action="version", version="DeepFault %f" % __version__)
parser.add_argument("-M", "--model", help="Path to the model to be loaded.\
The specified model will be used.") # , required=True)
# choices=['lenet1','lenet4', 'lenet5'], required=True)
parser.add_argument("-DS", "--dataset", help="The dataset to be used (mnist\
or cifar10).", choices=["mnist", "cifar10"]) # , required=True)
parser.add_argument("-A", "--approach", help="the approach to be employed \
to measure coverage", choices=['idc', 'nc', 'kmnc',
'nbc', 'snac', 'tknc', 'ssc', 'lsa', 'dsa'])
parser.add_argument("-C", "--class", help="the selected class", type=int)
parser.add_argument("-Q", "--quantize", help="quantization granularity for \
combinatorial other_coverage_metrics.", type=int)
parser.add_argument("-L", "--layer", help="the subject layer's index for \
combinatorial cov. NOTE THAT ONLY TRAINABLE LAYERS CAN \
BE SELECTED", type=int)
parser.add_argument("-KS", "--k_sections", help="number of sections used in \
k multisection other_coverage_metrics", type=int)
parser.add_argument("-KN", "--k_neurons", help="number of neurons used in \
top k neuron other_coverage_metrics", type=int)
parser.add_argument("-RN", "--rel_neurons", help="number of neurons considered\
as relevant in combinatorial other_coverage_metrics", type=int)
parser.add_argument("-AT", "--act_threshold", help="a threshold value used\
to consider if a neuron is activated or not.", type=float)
parser.add_argument("-LOG", "--logfile", help="path to log file")
parser.add_argument("-ADV", "--advtype", help="path to log file")
parser.add_argument("-S", "--seed", help="seed t0 random", type=int)
args = parser.parse_args()
return vars(args)
if __name__ == "__main__":
args = parse_arguments()
model_path = args['model'] if args['model'] else 'neural_networks/LeNet5'
dataset = args['dataset'] if args['dataset'] else 'mnist'
approach = args['approach'] if args['approach'] else 'idc'
num_rel_neurons = args['rel_neurons'] if args['rel_neurons'] else 10
act_threshold = args['act_threshold'] if args['act_threshold'] else 0
top_k = args['k_neurons'] if args['k_neurons'] else 3
k_sect = args['k_sections'] if args['k_sections'] else 1000
selected_class = args['class'] if not args['class'] == None else -1 # ALL CLASSES
logfile_name = args['logfile'] if args['logfile'] else 'result.log'
quantization_granularity = args['quantize'] if args['quantize'] else 3
adv_type = args['advtype'] if args['advtype'] else 'fgsm'
seed = args['seed'] if args['seed'] else 1
logfile = open(logfile_name, 'a')
random.seed(seed)
np.random.seed(seed)
####################
# 0) Load Driving Data
X_all = []
X_paths, Ys = load_driving_data()
for xp in X_paths:
X_all.append(preprocess_image(xp)[0])
print("LOAD DONE")
# TODO: Check numbers here
X_train = np.array(X_all)
X_test = np.array(X_all[4000:])
Y_train = np.array(Ys)
Y_test = np.array(Ys[4000:])
####################
# 1) Setup the model
model_name = model_path.split('/')[-1]
model = load_dave_model()
# 2) Load necessary information
trainable_layers = get_trainable_layers(model)
non_trainable_layers = list(set(range(len(model.layers))) - set(trainable_layers))
print('Trainable layers: ' + str(trainable_layers))
print('Non trainable layers: ' + str(non_trainable_layers))
experiment_folder = 'experiments'
# Investigate the penultimate layer
subject_layer = args['layer'] if not args['layer'] == None else -1
subject_layer = trainable_layers[subject_layer]
skip_layers = [0] # SKIP LAYERS FOR NC, KMNC, NBC etc.
for idx, lyr in enumerate(model.layers):
if 'flatten' in lyr.__class__.__name__.lower(): skip_layers.append(idx)
####################
# 3) Analyze Coverages
if approach == 'nc':
fw = open('validation_dave_nc.log', 'a')
nc = NeuronCoverage(model, threshold=.75, skip_layers=skip_layers) # SKIP ONLY INPUT AND FLATTEN LAYERS
coverage, _, _, _, _ = nc.test(X_test)
orig_err = model.evaluate(X_test, Y_test)
nc.set_measure_state(nc.get_measure_state())
maninp = add_wn_frame(X_test, 1, noise_std_dev=0.75)
frame_coverage, _, _, _, _ = nc.test(np.array(maninp))
frame_err = model.evaluate(np.array(maninp), Y_test)
maninp = add_white_noise(X_test, model_path, selected_class,
lrpmethod='simple', relevance_percentile=98,
noise_std_dev=0.75)
rel_coverage, _, _, _, _ = nc.test(np.array(maninp))
rel_err = model.evaluate(
|
np.array(maninp)
|
numpy.array
|
#
# author: <NAME> (<EMAIL>)
# last updated: September 24, 2020
#
"""It is utilities for common features."""
import functools
import numpy as np
from bayeso import constants
def validate_types(func: constants.TYPING_CALLABLE) -> constants.TYPING_CALLABLE:
"""
It is a decorator for validating the number of types, which are declared for typing.
:param func: an original function.
:type func: callable
:returns: a callable decorator.
:rtype: callable
:raises: AssertionError
"""
annos = func.__annotations__
assert len(annos) == func.__code__.co_argcount + 1
# arg_names = func.__code__.co_varnames[:func.__code__.co_argcount]
@functools.wraps(func)
def _validate_types(*args, **kwargs):
return func(*args, **kwargs)
return _validate_types
@validate_types
def get_grids(ranges: np.ndarray, num_grids: int) -> np.ndarray:
"""
It returns grids of given `ranges`, where each of dimension has `num_grids` partitions.
:param ranges: ranges. Shape: (d, 2).
:type ranges: numpy.ndarray
:param num_grids: the number of partitions per dimension.
:type num_grids: int.
:returns: grids of given `ranges`. Shape: (`num_grids`:math:`^{\\text{d}}`, d).
:rtype: numpy.ndarray
:raises: AssertionError
"""
assert isinstance(ranges, np.ndarray)
assert isinstance(num_grids, int)
assert len(ranges.shape) == 2
assert ranges.shape[1] == 2
assert (ranges[:, 0] <= ranges[:, 1]).all()
list_grids = []
for range_ in ranges:
list_grids.append(np.linspace(range_[0], range_[1], num_grids))
list_grids_mesh = list(np.meshgrid(*list_grids))
list_grids = []
for elem in list_grids_mesh:
list_grids.append(elem.flatten(order='C'))
arr_grids = np.vstack(tuple(list_grids))
arr_grids = arr_grids.T
return arr_grids
@validate_types
def get_minimum(Y_all: np.ndarray, num_init: int) -> constants.TYPING_TUPLE_THREE_ARRAYS:
"""
It returns accumulated minima at each iteration, their arithmetic means
over rounds, and their standard deviations over rounds, which is widely
used in Bayesian optimization community.
:param Y_all: historical function values. Shape: (r, t) where r is the
number of Bayesian optimization rounds and t is the number of
iterations including initial points for each round. For example,
if we run 50 iterations with 5 initial examples and repeat this
procedure 3 times, r would be 3 and t would be 55 (= 50 + 5).
:type Y_all: numpy.ndarray
:param num_init: the number of initial points.
:type num_init: int.
:returns: tuple of accumulated minima, their arithmetic means over
rounds, and their standard deviations over rounds.
Shape: ((r, t - `num_init` + 1), (t - `num_init` + 1, ), (t - `num_init` + 1, )).
:rtype: (numpy.ndarray, numpy.ndarray, numpy.ndarray)
:raises: AssertionError
"""
assert isinstance(Y_all, np.ndarray)
assert isinstance(num_init, int)
assert len(Y_all.shape) == 2
assert Y_all.shape[1] > num_init
list_minima = []
for by in Y_all:
minimum_best = np.inf
list_minima_ = []
for y in by[:num_init]:
if minimum_best > y:
minimum_best = y
list_minima_.append(minimum_best)
for y in by[num_init:]:
if minimum_best > y:
minimum_best = y
list_minima_.append(minimum_best)
list_minima.append(list_minima_)
minima =
|
np.array(list_minima)
|
numpy.array
|
import numpy as np
import pandas as pd
from copy import deepcopy
class Epochs(object):
"""Epochs extracted from a Raw instance.
Parameters
----------
raw : instance of `Raw`
Raw data to be epoched.
events : array
Event onsets (in sample indices).
tmin : float | array
Start time before event. If float, all events start at same
time relative to event onset.
tmax : float | array
End time after event. If float, all events start at same
time relative to event onset.
picks : 'gaze' | 'pupil' | None
Data types to include (if None, all data are used).
eyes : 'LEFT' | 'RIGHT' | None
Eye recordings to include (if None, all data are used).
blinks : True | False
Include blinks and re-reference to epochs.
saccades : True | False
Include saccades and re-ference to epochs.
Attributes
----------
info : dict
Recording metadata.
data : array, shape (n_trials, n_eyes, n_channels, n_times)
Recording samples.
times : array, shape (n_times,)
Time vector in seconds. Goes from `tmin` to `tmax`. Time interval
between consecutive time samples is equal to the inverse of the
sampling frequency.
extents : array, shape (n_trials, 2)
Onset and offset of trials.
ch_names : list, shape (n_channels)
Names of data channels.
eye_names : list, shape (n_eyes)
Order of data channels (by eye).
blinks : array, shape (i, 3)
(If included) Detected blinks detailed by their trial, start, and end.
saccades : array, shape (j, 3)
(If included) Detected saccades detailed by their trial, start, and end.
"""
def __init__(self, raw, events, tmin=0, tmax=1, picks=None, eyes=None,
blinks=True, saccades=True):
## Define metadata.
self.info = deepcopy(raw.info)
## Define channels.
if picks is None: ch_names = ('gx','gy','pupil')
elif picks.lower().startswith('g'): ch_names = ('gx','gy')
elif picks.lower().startswith('p'): ch_names = ('pupil')
else: raise ValueError(f'"{picks}" not valid input for picks.')
self.ch_names = tuple(np.intersect1d(ch_names, raw.ch_names))
ch_ix = np.in1d(raw.ch_names,self.ch_names)
## Define eyes.
if eyes is None: eye_names = deepcopy(raw.eye_names)
elif eyes.lower().startswith('l'): eye_names = ('LEFT')
elif eyes.lower().startswith('r'): eye_names = ('RIGHT')
else: raise ValueError(f'"{eyes}" not valid input for eyes.')
self.eye_names = tuple(np.intersect1d(eye_names, raw.eye_names))
eye_ix = np.in1d(raw.eye_names,self.eye_names)
## Define events.
assert np.ndim(events) == 1
if isinstance(tmin, (int, float)): tmin = np.repeat(float(tmin), events.size)
if isinstance(tmax, (int, float)): tmax = np.repeat(float(tmax), events.size)
assert np.size(events) == np.size(tmin) == np.size(tmax)
self.extents = np.column_stack([tmin, tmax])
## Convert times to sampling frequency.
sfreq = self.info['sfreq']
tmin = np.array(tmin * sfreq).astype(int) / sfreq
tmax = np.array(tmax * sfreq).astype(int) / sfreq
self.times = np.arange(tmin.min(), tmax.max(), 1/sfreq)
## Define indices of data relative to raw.
raw_ix = np.column_stack([events + tmin * sfreq, events + tmax * sfreq])
raw_ix = np.rint(raw_ix).astype(int)
## Define indices of data relative to epochs.
epoch_ix = (np.column_stack([tmin,tmax]) - tmin.min()) * sfreq
epoch_ix =
|
np.rint(epoch_ix)
|
numpy.rint
|
#!/usr/bin/env python
# coding=utf-8
"""
python pred.py Animals model/mobile_Animals_wgt.h5
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.applications.xception import Xception, preprocess_input
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, Activation
from scipy.spatial import distance
from utils import *
import numpy as np
import sys
import os
def main(superclass, model_weight, img_path, model=None):
is_debug = False
classNum = {'A': 40, 'F': 40, 'V': 40, 'E': 40, 'H': 24}
classAttrsNums = {'Animals': 123, 'Fruits': 58}
classAttrsNums = {'Animals': 99, 'Fruits': 48}
classAttrsNum = classAttrsNums[superclass]
unknown_labels = {
'Animals': [
'Label_A_02',
'Label_A_05',
'Label_A_08',
'Label_A_14',
'Label_A_20',
'Label_A_29',
'Label_A_31',
'Label_A_35',
'Label_A_39',
'Label_A_41'
],
'Fruits': [
'Label_F_03',
'Label_F_09',
'Label_F_10',
'Label_F_17',
'Label_F_25',
'Label_F_29',
'Label_F_31',
'Label_F_34',
'Label_F_43',
'Label_F_49'
]
}
date = '20180321'
class_attrs_path = '../zsl_a_%s_train_%s/zsl_a_%s_train_annotations_attributes_per_class_%s.txt' % (superclass.lower(), date, superclass.lower(), date)
attrs_list_path = '../zsl_a_%s_train_%s/zsl_a_%s_train_annotations_attribute_list_%s.txt' % (superclass.lower(), date, superclass.lower(), date)
test_dir= '../zsl_a_%s_test_%s/' % (superclass.lower(), date)
pred_path = 'pred_%s.txt' % (superclass)
attr_acc_path = 'attr_pred_acc_%s.txt' % (superclass.lower())
facc = open(attr_acc_path, 'r', encoding='utf-8')
acc = facc.readlines()
facc.close()
acc = [float(row.strip().split(' ')[1]) for row in acc]
if model == None:
is_debug = True
base_model = Xception(include_top=True, weights=None,
input_tensor=None, input_shape=(72,72,3),
pooling=None, classes=classNum[superclass[0]])
output = Dense(classAttrsNum, activation='sigmoid', name='predictions')(base_model.get_layer('avg_pool').output)
model = Model(inputs=base_model.input, outputs=output)
model.load_weights(model_weight)
Y = {}
img = image.load_img(img_path, target_size=(72, 72))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
#x = preprocess_input(x)
x = x/255.0
#print(np.shape(x))
#print(x[0][0][0])
y_pred = model.predict(x)
y_pred = y_pred[0]
y_pred = list(map(lambda x: round(x,2), y_pred))
y_pred = np.array(y_pred)
with open(attrs_list_path, 'r') as f:
attrs_map = []
for line in f.readlines():
tokens = line.split(', ')
attrs_map.append(tokens[-1])
attrs_map = np.array(attrs_map)
if is_debug:
print(attrs_map[np.where(y_pred>0.5)])
date = '20180321'
class_attrs_path = '../zsl_a_%s_train_%s/zsl_a_%s_train_annotations_attributes_per_class_%s.txt' % (superclass.lower(), date, superclass.lower(), date)
to_be_removed_attrs = [3, 16, 18, 20, 21, 27, 36, 45, 48, 49, 50, 51, 52, 53, 54, 55, 56, 60, 66, 96, 106, 107, 108, 119]
to_be_removed_attrs = [2, 5, 18, 21, 23, 25, 29, 33, 45, 46]
fattrs = open(class_attrs_path, 'r', encoding='utf-8')
attrs = fattrs.readlines()
fattrs.close()
label_attrs = {}
label_attrs_list = []
for row in attrs:
pair = row.strip().split(',')
if pair[0] not in unknown_labels[superclass]:
continue
label_attrs[pair[0]] = list(map(lambda x: float(x), pair[1].strip().split(' ')[1:-1]))
label_attrs[pair[0]] = list(np.delete(np.array(label_attrs[pair[0]]), to_be_removed_attrs))
label_attrs_list.append(label_attrs[pair[0]])
attrs_entropy = calc_attrs_entropy(label_attrs)
label_attrs_weight = np.sum(np.array(label_attrs_list),axis=0)
label_attrs_weight = 1 - label_attrs_weight/10.0
#label_attrs_weight[26:29] = 10 # just for fun
#label_attrs_weight[:] = 1
nearest = 0.0
y_label = ''
y_pred = np.array(y_pred)*
|
np.array(acc)
|
numpy.array
|
import Image
import matplotlib.pyplot as plt
import numpy as np
from pygco import cut_simple
def stereo_unaries(img1, img2):
differences = []
max_disp = 8
for disp in np.arange(max_disp):
if disp == 0:
diff = np.sum((img1 - img2) ** 2, axis=2)
else:
diff = np.sum((img1[:, 2 * disp:, :] - img2[:, :-2 * disp, :]) **
2, axis=2)
if disp != max_disp - 1:
diff = diff[:, max_disp - disp - 1:disp - max_disp + 1]
differences.append(diff)
return np.dstack(differences).copy("C")
def potts_example():
img1 = np.asarray(Image.open("scene1.row3.col1.ppm")) / 255.
img2 = np.asarray(Image.open("scene1.row3.col2.ppm")) / 255.
unaries = (stereo_unaries(img1, img2) * 100).astype(np.int32)
n_disps = unaries.shape[2]
newshape = unaries.shape[:2]
potts_cut = cut_simple(unaries, -5 * np.eye(n_disps, dtype=np.int32))
x, y = np.ogrid[:n_disps, :n_disps]
one_d_topology = np.abs(x - y).astype(np.int32).copy("C")
one_d_cut = cut_simple(unaries, 5 * one_d_topology)
plt.subplot(231, xticks=(), yticks=())
plt.imshow(img1)
plt.subplot(232, xticks=(), yticks=())
plt.imshow(img2)
plt.subplot(233, xticks=(), yticks=())
plt.imshow(
|
np.argmin(unaries, axis=2)
|
numpy.argmin
|
import numpy as np
'''
FVD Tool ConvectionTVDexample
'''
nmax = 500
phi = np.zeros((nmax))
for i in range(nmax):
if (i >= 19) & (i <= 120):
phi[i] = 1
if (i >= 179) & (i <= 400):
phi[i] = np.sin((i * 0.02 + 0.01) * np.pi)
dt = 0.0005
dx = 0.002
tmax = 1000
dphi = np.zeros((nmax))
rp = np.zeros((nmax))
psiplus = np.zeros((nmax))
psiminus = np.zeros((nmax))
flux = np.zeros((nmax))
eps = 1e-10
u = np.zeros((nmax + 1))
u[:] = 0.3
M = np.zeros((nmax+2,nmax+2))
for i in range(2,nmax+1):
M[i,i] = 2000 + 150
M[i,i-1] = - 150
M[0,0] = 1
M[0,1] = 1
M[1,0] = -75
M[1,1] = 2075
M[501,0] = 1
M[0,1] = 1
M[501,1] = -1
M[0,500] = -1
M[501,500] = -1
M[0,501] = -1
M[501,501] = 1
rhst = np.zeros((nmax + 2))
rhs = np.zeros((nmax + 2))
phit = np.zeros((nmax,tmax+1))
phit[:,0] = phi[:]
for t in range(tmax):
rhst[1:nmax+1] = phi[:] / dt
for i in range(nmax-1):
dphi[i] = (phi[i+1] - phi[i])/dx
for i in range(nmax-2):
phiout = (abs(dphi[i+1]) >= eps)*dphi[i+1] + eps*(dphi[i+1] == 0) + eps*(abs(dphi[i+1]) < eps)*np.sign(dphi[i+1])
rp[i] = dphi[i] / phiout
flux[i] = max(0,max(min(2*rp[i],1),min(rp[i],2)))
psiplus[i+1] = 0.5 * flux[i] * (phi[i+2] - phi[i+1])
phiout = (abs(dphi[i]) >= eps)*dphi[i] + eps*(dphi[i] == 0) + eps*(abs(dphi[i]) < eps)*
|
np.sign(dphi[i])
|
numpy.sign
|
# Ported from the Synchrosqueezing Toolbox, authored by
# <NAME>, <NAME>
# (http://www.math.princeton.edu/~ebrevdo/)
# (https://github.com/ebrevdo/synchrosqueezing/)
import numpy as np
from .utils import est_riskshrink_thresh, p2up, synsq_adm
from .wavelet_transforms import phase_cwt, phase_cwt_num
from .wavelet_transforms import cwt_fwd, synsq_squeeze
def synsq_cwt_fwd(x, t=None, fs=None, nv=32, opts=None):
"""Calculates the synchrosqueezing transform of vector `x`, with samples
taken at times given in vector `t`. Uses `nv` voices. Implements the
algorithm described in Sec. III of [1].
# Arguments:
x: np.ndarray. Vector of signal samples (e.g. x = np.cos(20 * np.pi * t))
t: np.ndarray / None. Vector of times samples are taken
(e.g. np.linspace(0, 1, n)). If None, defaults to np.arange(len(x)).
Overrides `fs` if not None.
fs: float. Sampling frequency of `x`; overridden by `t`, if provided.
nv: int. Number of voices. Recommended 32 or 64 by [1].
opts: dict. Options specifying how synchrosqueezing is computed.
'type': str. type of wavelet. See `wfiltfn` docstring.
'gamma': float / None. Wavelet hard thresholding value. If None,
is estimated automatically.
'difftype': str. 'direct', 'phase', or 'numerical' differentiation.
'numerical' uses MEX differentiation, which is faster and
uses less memory, but may be less accurate.
# Returns:
Tx: Synchrosqueeze-transformed `x`, columns associated w/ `t`
fs: Frequencies associated with rows of `Tx`.
Wx: Wavelet transform of `x` (see `cwt_fwd`)
Wx_scales: scales associated with rows of `Wx`.
w: Phase transform for each element of `Wx`.
# References:
1. <NAME>, <NAME>, <NAME>, and <NAME>,
"The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications",
Signal Processing, 93:1079-1094, 2013.
2. <NAME>, <NAME>, <NAME>, "Synchrosqueezed Wavelet Transforms:
an empricial mode decomposition-like tool",
Applied and Computational Harmonic Analysis, 30(2):243-261, 2011.
"""
def _get_opts(opts):
opts_default = {'type': 'morlet',
'difftype': 'direct',
'gamma': None,
'freqscale': None}
if opts is None:
new_opts = opts_default
else:
new_opts = opts
for opt_name in opts_default:
if opt_name not in new_opts:
new_opts[opt_name] = opts_default[opt_name]
return new_opts
def _wavelet_transform(x, nv, dt, opts):
N = len(x)
N_up, n1, n2 = p2up(N)
if opts['difftype'] == 'direct':
# calculate derivative directly in the wavelet domain
# before taking wavelet transform
opts['rpadded'] = 0
Wx, Wx_scales, dWx, _ = cwt_fwd(x, opts['type'], nv, dt, opts)
w = phase_cwt(Wx, dWx, opts)
elif opts['difftype'] == 'phase':
# take derivative of unwrapped CWT phase
# directly in phase transform
opts['rpadded'] = 0
Wx, Wx_scales, _ = cwt_fwd(x, opts['type'], nv, dt, opts)
w = phase_cwt(Wx, None, opts)
else:
# calculate derivative numerically after calculating wavelet
# transform. This requires less memory and is more accurate
# for lesser `a`.
opts['rpadded'] = 1
Wx, Wx_scales, _ = cwt_fwd(x, opts['type'], nv, dt, opts)
Wx = Wx[:, (n1 - 5 + 1):(n1 + N + 3)]
w = phase_cwt_num(Wx, dt, opts)
return Wx, w, Wx_scales, opts
def _validate_spacing_uniformity(t):
if np.any([(np.diff(t, 2) / (t[-1] - t[0]) > 1e-5)]):
raise Exception("Time vector `t` must be uniformly sampled.")
if t is None:
fs = fs or 1.
# t = np.linspace(0., len(x) / fs, len(x))
t = np.arange(0, len(x)) / fs
else:
_validate_spacing_uniformity(t)
opts = _get_opts(opts)
dt = t[1] - t[0] # sampling period, assuming regular spacing
Wx, w, Wx_scales, opts = _wavelet_transform(x, nv, dt, opts)
if opts['gamma'] is None:
opts['gamma'] = est_riskshrink_thresh(Wx, nv)
# calculate the synchrosqueezed frequency decomposition
opts['transform'] = 'CWT'
Tx, fs = synsq_squeeze(Wx, w, t, nv, opts)
if opts['difftype'] == 'numerical':
Wx = Wx[:, (3 + 1):(- 4)]
w = w[:, (3 + 1):(- 4)]
Tx = Tx[:, (3 + 1):(- 4)]
return Tx, fs, Wx, Wx_scales, w
def synsq_cwt_inv(Tx, fs, opts={}, Cs=None, freqband=None): # TODO Arguments
"""Inverse synchrosqueezing transform of `Tx` with associated frequencies
in `fs` and curve bands in time-frequency plane specified by `Cs` and
`freqband`. This implements Eq. 5 of [1].
# Arguments:
Tx: np.ndarray. Synchrosqueeze-transformed `x` (see `synsq_cwt_fwd`).
fs: np.ndarray. Frequencies associated with rows of Tx.
(see `synsq_cwt_fwd`).
opts: dict. Options (see `synsq_cwt_fwd`):
'type': type of wavelet used in `synsq_cwt_fwd`
other wavelet options ('mu', 's') should also match
those used in `synsq_cwt_fwd`
'Cs': (optional) curve centerpoints
'freqs': (optional) curve bands
# Returns:
x: components of reconstructed signal, and residual error
# Example:
Tx, fs = synsq_cwt_fwd(t, x, 32) # synchrosqueeizing
Txf = synsq_filter_pass(Tx,fs, -np.inf, 1) # pass band filter
xf = synsq_cwt_inv(Txf, fs) # filtered signal reconstruction
# References:
1. <NAME>, <NAME>, <NAME>, and <NAME>,
"The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications",
Signal Processing, 93:1079-1094, 2013.
"""
opts = opts or {'type': 'morlet'}
Cs = Cs or np.ones((Tx.shape[1], 1))
freqband = freqband or np.ones((1, 1)) * Tx.shape[0]
# Find the admissibility coefficient Cpsi
Css = synsq_adm(opts['type'], opts)
# Invert Tx around curve masks in the time-frequency plane to recover
# individual components; last one is the remaining signal
# Integration over all frequencies recovers original signal
# Factor of 2 is because real parts contain half the energy
x =
|
np.zeros((Cs.shape[0], Cs.shape[1] + 1))
|
numpy.zeros
|
# _*_ coding:utf-8 _*_
import numpy as np
def load_data_set():
"""
导入数据, 1代表脏话
@ return posting_list: 数据集
@ return class_vectors: 分类向量
"""
posting_list = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
class_vectors = [0, 1, 0, 1, 0, 1]
return posting_list, class_vectors
# 汇总所有词汇,词汇只需要统计一次性
def create_vocabulary_list(data_set):
"""
创建词库
@ param data_set: 数据集
@ return vocabulary_set: 词库
"""
vocabulary_set = set([])
for document in data_set:
# 求并集
vocabulary_set = vocabulary_set | set(document)
return list(vocabulary_set)
def words_2_vector(vocabulary_list, input_set):
"""
文本词向量.词库中每个词当作一个特征,文本中就该词,该词特征就是1,没有就是0
@ param vocabulary_list: 词表
@ param input_set: 输入的数据集
@ return return_vector: 返回的向量,统计input_set中各个词汇是否在vocabulary_set中
"""
return_vector = [0] * len(vocabulary_list)
for word in input_set:
if word in vocabulary_list:
return_vector[vocabulary_list.index( word)] = 1
else:
print("单词: %s 不在词库中!" % word)
return return_vector
def train_NB(train_matrix, train_category):
"""
训练
@ param train_matrix: 训练集
@ param train_category: 分类
"""
train_document_number = len(train_matrix)
words_number = len(train_matrix[0])
probability_abusive = sum(train_category) / float(train_document_number)
#防止某个类别计算出的概率为0,导致最后相乘都为0,所以初始词都赋值1,分母赋值为2.(拉普拉斯修正)
p0_number =
|
np.ones(words_number)
|
numpy.ones
|
DESC='''
Code to calculate results based on candidates/interpretations
'''
# import language_check
import warnings
warnings.filterwarnings("ignore")
import torch
from transformers import AutoModelForSequenceClassification
from datasets import load_dataset
from transformers import AutoTokenizer
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from textattack.shared import AttackedText
import matplotlib.pyplot as plt
from scipy.stats import spearmanr
import pickle
import argparse
import numpy as np
import math
import re
from tqdm import tqdm
from matplotlib import collections
import random
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
def com_scores(atts,enc):
score = 0
for i in range(len(enc)):
score += atts[i]*i
return score/len(enc)
def l2_scores(atts):
return torch.norm(atts)
def diff(x,y):
"""Returns the set of indices for which this and other_attacked_text
have different words."""
indices = set()
w1 = x.words
w2 = y.words
for i in range(min(len(w1), len(w2))):
if w1[i] != w2[i]:
indices.add(i)
return indices
def rank_correlation(int1,int2):
return spearmanr(int1.cpu().numpy().tolist(),int2.cpu().numpy().tolist())
def topk_intersection(int1,int2):
k = int((int1.size()[0])/2)
i1 = torch.argsort(torch.abs(int1),descending=True).cpu().numpy().tolist()[:k]
i2 = torch.argsort(torch.abs(int2),descending=True).cpu().numpy().tolist()[:k]
return len([x for x in i1 if x in i2])/k
def plot_violin_graph_rand(data_to_plot,data_to_plot_rand, xlabel, ylabel,nm,met):
fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(9, 6), sharey=True)
# print(data_to_plot)
f = ax1.violinplot(data_to_plot,showmeans=True)
f_rand = ax1.violinplot(data_to_plot_rand,showmeans=True)
labels = []
def add_label(violin, label):
import matplotlib.patches as mpatches
color = violin["bodies"][0].get_facecolor().flatten()
labels.append((mpatches.Patch(color=color), label))
add_label(f,"Metric:"+met)
add_label(f_rand,"Random")
def set_axis_style(ax, labels):
ax.get_xaxis().set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.tick_params(axis='both', which='major', labelsize=14)
ax.set_xlabel(xlabel, fontsize=20)
ax.set_ylabel(ylabel, fontsize=20)
set_axis_style(ax1,["{0:.1f}".format(0.1*(i-1))+"-"+"{0:.1f}".format(0.1*i) for i in range(1,len(data_to_plot)+1)])
plt.legend(*zip(*labels), loc=2)
l = f['cmeans'].get_segments()
lines = [np.array([0,1])]
for i in range(len(l)):
lines.append((l[i][0]+l[i][1])/2)
l_rand = f_rand['cmeans'].get_segments()
lines_rand = [np.array([0,1])]
for i in range(len(l_rand)):
lines_rand.append((l_rand[i][0]+l_rand[i][1])/2)
w = collections.LineCollection([lines])
w_rand = collections.LineCollection([lines_rand],color="tab:orange")
ax1.add_collection(w)
ax1.add_collection(w_rand)
ax1.autoscale()
# ax1.legend(proxies, ['Selection based on ExplainFooler-'+str(metric), 'Random Selection'])
plt.savefig(nm)
def process_string(string):
string = re.sub("( )(\'[(m)(d)(t)(ll)(re)(ve)(s)])", r"\2", string)
string = re.sub("(\d+)( )([,\.])( )(\d+)", r"\1\3\5", string)
# U . S . -> U.S.
string = re.sub("(\w)( )(\.)( )(\w)( )(\.)", r"\1\3\5\7", string)
# reduce left space
string = re.sub("( )([,\.!?:;)])", r"\2", string)
# reduce right space
string = re.sub("([(])( )", r"\1", string)
string = re.sub("s '", "s'", string)
# reduce both space
string = re.sub("(')( )(\S+)( )(')", r"\1\3\5", string)
string = re.sub("(\")( )(\S+)( )(\")", r"\1\3\5", string)
string = re.sub("(\w+) (-+) (\w+)", r"\1\2\3", string)
string = re.sub("(\w+) (/+) (\w+)", r"\1\2\3", string)
# string = re.sub(" ' ", "'", string)
return string
def get_ppl(texts):
ppl_model = GPT2LMHeadModel.from_pretrained('gpt2').cuda()
ppl_tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
ppl_model.eval()
eval_loss = 0
nb_eval_steps = 0
with torch.no_grad():
for text in texts:
text = process_string(text)
input_ids = torch.tensor(ppl_tokenizer.encode(text, add_special_tokens=True))
if len(input_ids) < 2:
continue
input_ids = input_ids.cuda()
outputs = ppl_model(input_ids, labels=input_ids)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
# print(eval_loss)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
return perplexity.item()
def main():
parser=argparse.ArgumentParser(description=DESC, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-m","--model",required=True, help="Name of model")
parser.add_argument("-d","--dataset",required=True, help="Name of dataset")
parser.add_argument("-s","--split",required=True, help="Split of dataset")
parser.add_argument("-num","--number",required=False, type=int, default=-1, help="Number of samples from dataset")
parser.add_argument("-mf","--modelfolder",required=False, default='./models/',help="Folder to load models from")
parser.add_argument("-if","--interpretfolder",required=False, default='./interpretations/',help="Folder to store interpretations")
parser.add_argument("-im","--interpretmethod",required=True,help="Interpretation Method (IG/LIME)")
parser.add_argument("-rf","--resultfolder",required=False,default="./results/",help="Folder to store results")
args = parser.parse_args()
global model
global tokenizer
if args.model == "distilbert":
if args.dataset == "sst2":
model = AutoModelForSequenceClassification.from_pretrained(args.modelfolder+"distilbert-base-uncased-SST-2-glue^sst2-2021-01-11-09-08-54-383533").to(device)
tokenizer = AutoTokenizer.from_pretrained(args.modelfolder+"distilbert-base-uncased-SST-2-glue^sst2-2021-01-11-09-08-54-383533")
elif args.dataset == "agnews":
model = AutoModelForSequenceClassification.from_pretrained("textattack/distilbert-base-uncased-ag-news")
tokenizer = AutoTokenizer.from_pretrained("textattack/distilbert-base-uncased-ag-news")
elif args.dataset == "imdb":
model = AutoModelForSequenceClassification.from_pretrained("textattack/distilbert-base-uncased-imdb")
tokenizer = AutoTokenizer.from_pretrained("textattack/distilbert-base-uncased-imdb")
elif args.model == "roberta":
if args.dataset == "sst2":
model = AutoModelForSequenceClassification.from_pretrained("textattack/roberta-base-SST-2")
tokenizer = AutoTokenizer.from_pretrained("textattack/roberta-base-SST-2")
elif args.dataset == "agnews":
model = AutoModelForSequenceClassification.from_pretrained("textattack/roberta-base-ag-news")
tokenizer = AutoTokenizer.from_pretrained("textattack/roberta-base-ag-news")
elif args.dataset == "imdb":
model = AutoModelForSequenceClassification.from_pretrained("textattack/roberta-base-imdb")
tokenizer = AutoTokenizer.from_pretrained("textattack/roberta-base-imdb")
elif args.model == "bert-adv":
if args.dataset == "sst2":
model = AutoModelForSequenceClassification.from_pretrained(args.modelfolder+"bert-sst2-adv")
tokenizer = AutoTokenizer.from_pretrained(args.modelfolder+"bert-sst2-adv")
elif args.dataset == "agnews":
model = AutoModelForSequenceClassification.from_pretrained(args.modelfolder+"bert-ag-adv")
tokenizer = AutoTokenizer.from_pretrained(args.modelfolder+"bert-ag-adv")
elif args.dataset == "imdb":
model = AutoModelForSequenceClassification.from_pretrained(args.modelfolder+"bert-imdb-adv")
tokenizer = AutoTokenizer.from_pretrained(args.modelfolder+"bert-imdb-adv")
model.eval()
interpretation_name = args.interpretfolder+"interpretations-"+args.dataset+"-"+args.model+"-"+args.interpretmethod+'-'+str(args.number)+".pkl"
original_interpretation_name = args.interpretfolder+"original_sentences/original-interpretations-"+args.dataset+"-"+args.model+"-"+args.interpretmethod+'-'+str(args.number)+".pkl"
with open(interpretation_name, 'rb') as f:
interp = pickle.load(f)
with open(original_interpretation_name, 'rb') as f:
orig_interp = pickle.load(f)
if args.number == -1:
args.number = len(fin)
lom_list = []
l2_list = []
lom_list_rand = []
l2_list_rand = []
ids = range(args.number)
for fn,idx in zip(interp,ids):
try:
print("Calculating on %d of %d total sentences" %(idx,len(ids)))
p2s = {}
p2s_rand ={}
t2s = {}
t2s_rand ={}
cms_original = com_scores(orig_interp[idx][1][0],orig_interp[idx][1][1])
for i in range(len(fn)):
try:
r = random.randint(0,len(fn[i])-1)
cms_rand = com_scores(fn[i][r][1][0],fn[i][r][1][1])
for j in range(len(fn[i])):
w = len(diff(AttackedText(orig_interp[idx][0]),AttackedText(fn[i][j][0])))
if w not in p2s_rand:
p2s_rand[w] = (np.linalg.norm(cms_original-cms_rand),fn[i][r])
if w not in t2s_rand:
t2s_rand[w] = (np.linalg.norm(cms_original-cms_rand),fn[i][r])
cms = com_scores(fn[i][j][1][0],fn[i][j][1][1])
if w not in p2s:
p2s[w] = (
|
np.linalg.norm(cms_original-cms)
|
numpy.linalg.norm
|
#Added by <NAME>, University of Heidelberg, <EMAIL>
#Commissioned by Universitätsklinikum Heidelberg, Klinik für Allgemein-, Viszeral- und Transplantationschirurgie
from HyperGuiModules.utility import *
from HyperGuiModules.constants import *
from tkinter import filedialog, messagebox
from PIL import Image
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
class BS:
def __init__(self, bs_frame, listener):
self.root = bs_frame
self.listener = listener
# BOOLS
self.hypergui_crops = False
self.save_tif_bool = False
self.measure_bool = False
self.delete_content = True
# LISTS
self.data_cube_paths = []
# INT-VARS
self.checkbox_value_hypergui_crops = IntVar()
self.save_as_tif_checkbox_value = IntVar()
# IMAGE MODIFIERS
self.stretch_lr = 1
self.stretch_ud = 1
self.stretch_lr_2 = 1
self.stretch_ud_2 = 1
self.shift_ud = 0
self.shift_lr = 0
self.tilt = 0
self.shift_ud_2 = 0
self.shift_lr_2 = 0
self.tilt_2 = 0
self.active_image = "RGB"
self.mode = ""
# IMAGE DATA
self.upimage = None
self.downimage = None
self.original_image_graph = None
self.original_image_data = None
self.original_image_data_ol = None
self.original_image = None
self.image_array = None
# GUI
self.left_image_text = None
self.right_image_text = None
self.shiftLR_text = None
self.entry_shift_lr = None
self.ShiftUD_text = None
self.entry_shift_ud = None
self.tilt_text = None
self.entry_tilt = None
self.stretchLR_text = None
self.entry_stretch_lr = None
self.stretchUD_text = None
self.entry_stretch_ud = None
self.shiftLR_text_2 = None
self.entry_shift_lr_2 = None
self.shiftUD_text_2 = None
self.entry_shift_ud_2 = None
self.tilt_text_2 = None
self.entry_tilt_2 = None
self.stretchLR_text_2 = None
self.entry_stretch_lr_2 = None
self.stretchUD_text_2 = None
self.entry_stretch_ud_2 = None
self.select_data_cube_button = None
self.select_output_dir_button = None
self.render_data_cube_button = None
self.selection_listbox = None
self.data_cube_path_label = None
self.output_dir_label = None
self.delete_button = None
self.data_cube_path_label = None
self.path_label = None
self.rgb_button = None
self.sto2_button = None
self.nir_button = None
self.thi_button = None
self.twi_button = None
self.tli_button = None
self.ohi_button = None
self.instant_save_button = None
# PATHS
self.current_dc_path = None
self.current_dc_path_ol = None
self._init_widget()
# ---------------------------------------------- UPDATER AND GETTERS ----------------------------------------------
def get_selected_data_cube_path(self):
if len(self.selection_listbox.curselection())>0:
index = self.selection_listbox.curselection()[0]
else:
index = self.current_dc_path
return self.data_cube_paths[index]
def get_selected_data_cube_path_ol(self):
if len(self.selection_listbox_ol.curselection())>0:
index = self.selection_listbox_ol.curselection()[0]
else:
index = self.current_dc_path_ol
return self.data_cube_paths[index]
def update_original_image(self, original_image_data, original_image_data_ol):
self.original_image_data = original_image_data
self._build_original_image(self.original_image_data, original_image_data_ol)
# ------------------------------------------------ INITIALIZATION ------------------------------------------------
def _init_widget(self):
self._build_info_label()
self._build_rgb()
self._build_sto2()
self._build_nir()
self._build_thi()
self._build_twi()
self._build_tli()
self._build_ohi()
self._build_original_image(self.original_image_data, self.original_image_data_ol)
self._build_select_superdir_button()
self._build_select_all_subfolders_button()
self._build_selection_box()
self._build_selection_box_overlay()
self._build_next_button()
self._build_reset_button()
self._build_inputs()
self._build_save_img_button()
self._build_subtract_button()
self._build_addition_button()
self._build_multiply_button()
self._build_mean_button()
self._clean_list_button()
self._build_checkbox_hypergui_crops()
self.rgb_button.config(foreground="red")
# ---------------------------------------------- BUILDERS (DISPLAY) -----------------------------------------------
def _build_rgb(self):
self.rgb_button = make_button(self.root, text='RGB', width=3, command=self.__update_to_rgb, row=0, column=6,
columnspan=1, inner_pady=5, outer_padx=(0, 5), outer_pady=(5, 0))
def _build_sto2(self):
self.sto2_button = make_button(self.root, text='StO2', width=4, command=self.__update_to_sto2, row=0, column=7,
columnspan=1, inner_pady=5, outer_padx=(0, 5), outer_pady=(5, 0))
def _build_nir(self):
self.nir_button = make_button(self.root, text='NIR', width=3, command=self.__update_to_nir, row=0, column=8,
columnspan=1, inner_pady=5, outer_padx=(0, 5), outer_pady=(5, 0))
def _build_thi(self):
self.thi_button = make_button(self.root, text='THI', width=3, command=self.__update_to_thi, row=0, column=9,
columnspan=1, inner_pady=5, outer_padx=(0, 5), outer_pady=(5, 0))
def _build_twi(self):
self.twi_button = make_button(self.root, text='TWI', width=3, command=self.__update_to_twi, row=0, column=10,
columnspan=1, inner_pady=5, outer_padx=(0, 5), outer_pady=(5, 0))
def _build_tli(self):
self.tli_button = make_button(self.root, text='TLI', width=3, command=self.__update_to_tli, row=0, column=11,
columnspan=1, inner_pady=5, outer_padx=(0, 5), outer_pady=(5, 0))
def _build_ohi(self):
self.ohi_button = make_button(self.root, text='OHI', width=3, command=self.__update_to_ohi, row=0, column=12,
columnspan=1, inner_pady=5, outer_padx=(0, 5), outer_pady=(5, 0))
# ----------------------------------------------- BUILDERS (MISC) -----------------------------------------------
def _build_info_label(self):
self.info_label = make_label_button(self.root, text='Baseline Subtraction', command=self.__info, width=8)
def _build_next_button(self):
self.next_button = make_button(self.root, text='Next (wo. saving)', width=12, command=self.__next,
row=26, column=12, rowspan = 3, columnspan=1, inner_pady=5, outer_padx=5,
outer_pady=(10, 15), height= 2)
def _build_reset_button(self):
self.next_button = make_button(self.root, text='Reset', width=9, command=self.__reset,
row=26, column=6, rowspan = 1, columnspan=1, inner_pady=5, outer_padx=5,
outer_pady=(10, 15), height= 1)
def _clean_list_button(self):
self.next_button = make_button(self.root, text='Clean List', width=12, command=self.__trash_list,
row=27, column=0, rowspan = 2, columnspan=1, inner_pady=5, outer_padx=5,
outer_pady=(10, 15), height= 2)
def _build_select_superdir_button(self):
self.select_data_cube_button = make_button(self.root, text="Open OP\nFolder",
command=self.__add_data_cube_dirs, inner_padx=10, inner_pady=10,
outer_padx=15, row=25, rowspan = 1, column=0, width=11, outer_pady=(5, 5))
def _build_save_img_button(self):
self.select_data_cube_button = make_button(self.root, text="Save",
command=self.__save_img, inner_padx=10, inner_pady=10,
outer_padx=15, row=25, rowspan = 1, column=1, columnspan=4, width=11, outer_pady=(5, 5))
def _build_subtract_button(self):
self.subtract_button = make_button(self.root, text="-",
command=self.__subtract, inner_padx=10, inner_pady=10,
outer_padx=5, row=26, rowspan = 1, column=1, width=5, outer_pady=(1, 1))
def _build_addition_button(self):
self.subtract_button = make_button(self.root, text="+",
command=self.__add, inner_padx=10, inner_pady=10,
outer_padx=5, row=27, rowspan = 2, column=1, width=5, outer_pady=(1, 1))
def _build_multiply_button(self):
self.subtract_button = make_button(self.root, text="*",
command=self.__multiply, inner_padx=10, inner_pady=10,
outer_padx=5, row=27, rowspan = 2, column=2, width=5, outer_pady=(1, 1))
def _build_mean_button(self):
self.mean_button = make_button(self.root, text="Mean",
command=self.__mean, inner_padx=10, inner_pady=10,
outer_padx=5, row=26, rowspan = 1, column=2, width=5, outer_pady=(1, 1))
def _build_select_all_subfolders_button(self):
self.select_data_cube_button = make_button(self.root, text="Open Project\nFolder",
command=self.__add_data_cube_subdirs, inner_padx=10, inner_pady=10,
outer_padx=15, row=26, rowspan=1, column=0, width=11, outer_pady=(5, 5))
def _build_checkbox_hypergui_crops(self):
hypergui_crops_label = make_label(self.root, "use hypergui_crops", row=0, column=1, columnspan=2,
outer_padx=(35, 15), outer_pady=(10, 15), inner_padx=10, inner_pady=5, wraplength=140)
hypergui_crops_checkbox = make_checkbox(self.root, text="", row=0, column=1, columnspan=2,
var=self.checkbox_value_hypergui_crops, sticky=NE, inner_padx=0,
inner_pady=0, outer_pady=(10, 0), outer_padx=(0, 20))
hypergui_crops_checkbox.deselect()
hypergui_crops_checkbox.bind('<Button-1>', self.__update_hypergui_crops_checkbox)
def _build_selection_box(self):
self.selection_listbox = make_listbox(self.root, row=1, column=0, rowspan=24, padx=(0, 15), pady=(0, 15), height = 35, width = 32)
self.selection_listbox.bind('<<ListboxSelect>>', self.__update_selected_data_cube)
def _build_selection_box_overlay(self):
self.selection_listbox_ol = make_listbox(self.root, row=1, column=1, columnspan = 4, rowspan=24, padx=(0, 15), pady=(0, 15), height = 35, width = 32)
self.selection_listbox_ol.bind('<<ListboxSelect>>', self.__update_selected_data_cube_ol)
def _build_inputs(self):
self.left_image_text = make_text(self.root, content="Left image", row=27, column=6, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text = self.left_image_text )
self.right_image_text = make_text(self.root, content="Right image", row=28, column=6, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text = self.right_image_text)
self.shiftLR_text = make_text(self.root, content="Shift LR (a, d)", row=26, column=7, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text = self.shiftLR_text)
self.entry_shift_lr = make_entry(self.root, row=28, column=7, width=12, pady=5, columnspan=1, entry = self.entry_shift_lr)
self.entry_shift_lr.bind('<Return>', self.__update_trans)
self.entry_shift_lr.delete(0,END)
self.entry_shift_lr.insert(END, str(int(self.shift_lr)))
self.ShiftUD_text = make_text(self.root, content="Shift UD (w, s)", row=26, column=8, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text = self.ShiftUD_text)
self.entry_shift_ud = make_entry(self.root, row=28, column=8, width=12, pady=5, columnspan=1, entry = self.entry_shift_ud)
self.entry_shift_ud.bind('<Return>', self.__update_trans)
self.entry_shift_ud.delete(0,END)
self.entry_shift_ud.insert(END, str(int(self.shift_ud)))
self.tilt_text = make_text(self.root, content="Tilt (q, e)", row=26, column=9, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text = self.tilt_text)
self.entry_tilt = make_entry(self.root, row=28, column=9, width=12, pady=5, columnspan=1, entry = self.entry_tilt)
self.entry_tilt.bind('<Return>', self.__update_trans)
self.entry_tilt.delete(0,END)
self.entry_tilt.insert(END, str(round(self.tilt, 2)))
self.stretchLR_text = make_text(self.root, content="Stretch LR (y, x)", row=26, column=10, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text = self.stretchLR_text)
self.entry_stretch_lr = make_entry(self.root, row=28, column=10, width=12, pady=5, columnspan=1, entry = self.entry_stretch_lr)
self.entry_stretch_lr.bind('<Return>', self.__update_trans)
self.entry_stretch_lr.delete(0,END)
self.entry_stretch_lr.insert(END, str(float(self.stretch_lr)))
self.stretchUD_text = make_text(self.root, content="Stretch UD (c, v)", row=26, column=11, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text = self.stretchUD_text)
self.entry_stretch_ud = make_entry(self.root, row=28, column=11, width=12, pady=5, columnspan=1, entry = self.entry_stretch_ud)
self.entry_stretch_ud.bind('<Return>', self.__update_trans)
self.entry_stretch_ud.delete(0,END)
self.entry_stretch_ud.insert(END, str(float(self.stretch_ud)))
self.shiftLR_text_2 = make_text(self.root, content="Shift LR (a, d)", row=26, column=7, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text = self.shiftLR_text_2)
self.entry_shift_lr_2 = make_entry(self.root, row=27, column=7, width=12, pady=5, columnspan=1, entry = self.entry_shift_lr_2)
self.entry_shift_lr_2.bind('<Return>', self.__update_trans)
self.entry_shift_lr_2.delete(0,END)
self.entry_shift_lr_2.insert(END, str(int(self.shift_lr_2)))
self.shiftUD_text_2 = make_text(self.root, content="Shift UD (w, s)", row=26, column=8, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text= self.shiftUD_text_2)
self.entry_shift_ud_2 = make_entry(self.root, row=27, column=8, width=12, pady=5, columnspan=1, entry = self.entry_shift_ud_2)
self.entry_shift_ud_2.bind('<Return>', self.__update_trans)
self.entry_shift_ud_2.delete(0,END)
self.entry_shift_ud_2.insert(END, str(int(self.shift_ud_2)))
self.tilt_text_2 = make_text(self.root, content="Tilt (q, e)", row=26, column=9, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text = self.tilt_text_2)
self.entry_tilt_2 = make_entry(self.root, row=27, column=9, width=12, pady=5, columnspan=1, entry = self.entry_tilt_2)
self.entry_tilt_2.bind('<Return>', self.__update_trans)
self.entry_tilt_2.delete(0,END)
self.entry_tilt_2.insert(END, str(round(self.tilt_2, 2)))
self.stretchLR_text_2 = make_text(self.root, content="Stretch LR (y, x)", row=26, column=10, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text = self.stretchLR_text_2)
self.entry_stretch_lr_2 = make_entry(self.root, row=27, column=10, width=12, pady=5, columnspan=1, entry= self.entry_stretch_lr_2)
self.entry_stretch_lr_2.bind('<Return>', self.__update_trans)
self.entry_stretch_lr_2.delete(0,END)
self.entry_stretch_lr_2.insert(END, str(float(self.stretch_lr_2)))
self.stretchUD_text_2 = make_text(self.root, content="Stretch UD (c, v)", row=26, column=11, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5, text= self.stretchUD_text_2)
self.entry_stretch_ud_2 = make_entry(self.root, row=27, column=11, width=12, pady=5, columnspan=1, entry = self.entry_stretch_ud_2)
self.entry_stretch_ud_2.bind('<Return>', self.__update_trans)
self.entry_stretch_ud_2.delete(0,END)
self.entry_stretch_ud_2.insert(END, str(float(self.stretch_ud_2)))
def _build_inputs_slider(self):
self.lower_scale_text = make_text(self.root, content="Shift LR (a, d)", row=26, column=7, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5)
self.lower_scale_text = make_text(self.root, content="Shift UD (w, s)", row=26, column=8, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5)
self.lower_scale_text = make_text(self.root, content="Tilt (q, e)", row=26, column=9, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5)
self.lower_scale_text = make_text(self.root, content="Stretch LR (y, x)", row=26, column=10, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5)
self.lower_scale_text = make_text(self.root, content="Stretch UD (c, v)", row=26, column=11, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5)
self.lower_scale_text_2 = make_text(self.root, content="Shift LR (a, d)", row=26, column=7, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5)
self.lower_scale_text_2 = make_text(self.root, content="Shift UD (w, s)", row=26, column=8, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5)
self.lower_scale_text_2 = make_text(self.root, content="Tilt (q, e)", row=26, column=9, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5)
self.lower_scale_text_2 = make_text(self.root, content="Stretch LR (y, x)", row=26, column=10, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5)
self.lower_scale_text_2 = make_text(self.root, content="Stretch UD (c, v)", row=26, column=11, columnspan=1, width=12,
bg=tkcolour_from_rgb(BACKGROUND), pady=5)
self.shift_lr_slider = make_slider(self.root, "", row=27, rowspan=1, column=7, command=self.__update_trans_slider, columnspan=1, orient = "horizontal", from_=-320, to=320)
self.shift_lr_slider.set(0)
self.shift_ud_slider = make_slider(self.root, "", row=27, rowspan=1, column=8, command=self.__update_trans_slider, columnspan=1, orient = "horizontal", from_=-240, to=240)
self.shift_ud_slider.set(0)
self.tilt_slider = make_slider(self.root, "", row=27, rowspan=1, column=9, command=self.__update_trans_slider, columnspan=1, orient = "horizontal", from_=0, to=360)
self.tilt_slider.set(0)
self.stretch_lr_slider = make_slider(self.root, "", row=27, rowspan=1, column=10, command=self.__update_trans_slider, columnspan=1, orient = "horizontal", from_=0, to=2, resolution=0.01)
self.stretch_lr_slider.set(1)
self.stretch_ud_slider = make_slider(self.root, "", row=27, rowspan=1, column=11, command=self.__update_trans_slider, columnspan=1, orient = "horizontal", from_=0, to=2, resolution=0.01)
self.stretch_ud_slider.set(1)
self.shift_lr_slider_2 = make_slider(self.root, "", row=28, rowspan=1, column=7, command=self.__update_trans_slider, columnspan=1, orient = "horizontal", from_=-320, to=320)
self.shift_lr_slider_2.set(0)
self.shift_ud_slider_2 = make_slider(self.root, "", row=28, rowspan=1, column=8, command=self.__update_trans_slider, columnspan=1, orient = "horizontal", from_=-240, to=240)
self.shift_ud_slider_2.set(0)
self.tilt_slider_2 = make_slider(self.root, "", row=28, rowspan=1, column=9, command=self.__update_trans_slider, columnspan=1, orient = "horizontal", from_=0, to=360)
self.tilt_slider_2.set(0)
self.stretch_lr_slider_2 = make_slider(self.root, "", row=28, rowspan=1, column=10, command=self.__update_trans_slider, columnspan=1, orient = "horizontal", from_=0, to=2, resolution=0.01)
self.stretch_lr_slider_2.set(1)
self.stretch_ud_slider_2 = make_slider(self.root, "", row=28, rowspan=1, column=11, command=self.__update_trans_slider, columnspan=1, orient = "horizontal", from_=0, to=2, resolution=0.01)
self.stretch_ud_slider_2.set(1)
# ---------------------------------------------- BUILDERS (IMAGE) -----------------------------------------------
def _build_original_image(self, data, data_ol):
if data is None:
# Placeholder
self.original_image = make_label(self.root, "original image placeholder", row=1, column=6, rowspan=25,
columnspan=10, inner_pady=300, inner_padx=400, outer_padx=(15, 10),
outer_pady=(15, 10))
else:
self.original_image_graph = Figure(figsize=(9, 7))
self.axes = self.original_image_graph.add_subplot(111)
self.original_image_graph.patch.set_facecolor(rgb_to_rgba(BACKGROUND))
self.axes.get_yaxis().set_visible(False)
self.axes.get_xaxis().set_visible(False)
colorImage = Image.fromarray(data)
rotated = colorImage.rotate(self.tilt_2)
data = np.array(rotated)
data = np.roll(data, self.shift_lr_2, axis = 1)
data = np.roll(data, self.shift_ud_2, axis = 0)
colorImage_bool = Image.fromarray(self.bark)
rotated_bool = colorImage_bool.rotate(self.tilt_2)
data_bool = np.array(rotated_bool)
data_bool = np.roll(data_bool, self.shift_lr_2, axis = 1)
data_bool = np.roll(data_bool, self.shift_ud_2, axis = 0)
self.bool = data_bool
self.original_image = self.axes.imshow(data, interpolation='none')
self.downimage = data
colorImage = Image.fromarray(data_ol)
rotated = colorImage.rotate(self.tilt)
data_ol = np.array(rotated)
data_ol = np.roll(data_ol, self.shift_lr, axis = 1)
data_ol = np.roll(data_ol, self.shift_ud, axis = 0)
colorImage_bool = Image.fromarray(self.bark_ol)
rotated_bool = colorImage_bool.rotate(self.tilt)
data_ol_bool = np.array(rotated_bool)
data_ol_bool = np.roll(data_ol_bool, self.shift_lr, axis = 1)
data_ol_bool = np.roll(data_ol_bool, self.shift_ud, axis = 0)
self.ol_bool = data_ol_bool
self.axes.imshow(data_ol, alpha = 0.5)
self.upimage = data_ol
self.original_image_graph.tight_layout()
self.original_image_canvas = FigureCanvasTkAgg(self.original_image_graph, master=self.root)
self.original_image_canvas.draw()
self.original_image_canvas.get_tk_widget().grid(column=6, row=1, columnspan=10, rowspan=25, ipady=0, ipadx=0)
self.original_image_canvas.get_tk_widget().bind('<Key-a>', self.__shift_right)
self.original_image_canvas.get_tk_widget().bind('<Key-d>', self.__shift_left)
self.original_image_canvas.get_tk_widget().bind('<Key-w>', self.__shift_up)
self.original_image_canvas.get_tk_widget().bind('<Key-s>', self.__shift_down)
self.original_image_canvas.get_tk_widget().bind('<Key-q>', self.__tilt_1)
self.original_image_canvas.get_tk_widget().bind('<Key-e>', self.__tilt_2)
self.original_image_canvas.get_tk_widget().bind('<Key-y>', self.__stretch_lr_min)
self.original_image_canvas.get_tk_widget().bind('<Key-x>', self.__stretch_lr_plu)
self.original_image_canvas.get_tk_widget().bind('<Key-c>', self.__stretch_ud_min)
self.original_image_canvas.get_tk_widget().bind('<Key-v>', self.__stretch_ud_plu)
# ----------------------------------------------- UPDATERS (IMAGE) ------------------------------------------------
def __update_to_rgb(self):
self.active_image = "RGB"
self.rgb_button.config(foreground="red")
self.sto2_button.config(foreground="black")
self.nir_button.config(foreground="black")
self.thi_button.config(foreground="black")
self.twi_button.config(foreground="black")
self.tli_button.config(foreground="black")
self.ohi_button.config(foreground="black")
self.update_original_image(self.RGB, self.RGB_ol)
def __update_to_sto2(self):
self.active_image = "STO2"
self.rgb_button.config(foreground="black")
self.sto2_button.config(foreground="red")
self.nir_button.config(foreground="black")
self.thi_button.config(foreground="black")
self.twi_button.config(foreground="black")
self.tli_button.config(foreground="black")
self.ohi_button.config(foreground="black")
self.update_original_image(self.STO2, self.STO2_ol)
def __update_to_nir(self):
self.active_image = "NIR"
self.rgb_button.config(foreground="black")
self.sto2_button.config(foreground="black")
self.nir_button.config(foreground="red")
self.thi_button.config(foreground="black")
self.twi_button.config(foreground="black")
self.tli_button.config(foreground="black")
self.ohi_button.config(foreground="black")
self.update_original_image(self.NIR, self.NIR_ol)
def __update_to_thi(self):
self.active_image = "THI"
self.rgb_button.config(foreground="black")
self.sto2_button.config(foreground="black")
self.nir_button.config(foreground="black")
self.thi_button.config(foreground="red")
self.twi_button.config(foreground="black")
self.tli_button.config(foreground="black")
self.ohi_button.config(foreground="black")
self.update_original_image(self.THI,self.THI_ol)
def __update_to_twi(self):
self.active_image = "TWI"
self.rgb_button.config(foreground="black")
self.sto2_button.config(foreground="black")
self.nir_button.config(foreground="black")
self.thi_button.config(foreground="black")
self.twi_button.config(foreground="red")
self.tli_button.config(foreground="black")
self.ohi_button.config(foreground="black")
self.update_original_image(self.TWI, self.TWI_ol)
def __update_to_tli(self):
self.active_image = "TLI"
self.rgb_button.config(foreground="black")
self.sto2_button.config(foreground="black")
self.nir_button.config(foreground="black")
self.thi_button.config(foreground="black")
self.twi_button.config(foreground="black")
self.tli_button.config(foreground="red")
self.ohi_button.config(foreground="black")
self.update_original_image(self.TLI, self.TLI_ol)
def __update_to_ohi(self):
self.active_image = "OHI"
self.rgb_button.config(foreground="black")
self.sto2_button.config(foreground="black")
self.nir_button.config(foreground="black")
self.thi_button.config(foreground="black")
self.twi_button.config(foreground="black")
self.tli_button.config(foreground="black")
self.ohi_button.config(foreground="red")
self.update_original_image(self.OHI, self.OHI_ol)
def __update_img(self):
if self.active_image is "RGB":
self.__update_to_rgb()
elif self.active_image is "STO2":
self.__update_to_sto2()
elif self.active_image is "NIR":
self.__update_to_nir()
elif self.active_image is "TWI":
self.__update_to_twi()
elif self.active_image is "THI":
self.__update_to_thi()
elif self.active_image is "OHI":
self.__update_to_ohi()
elif self.active_image is "TLI":
self.__update_to_tli()
# ------------------------------------------------- UPDATERS AND GETTERS (SELECTION LISTBOX) --------------------------------------------------
def __add_data_cube_dirs(self):
super_dir = self.__get_path_to_dir("Please select folder containing all the data folders.")
sub_dirs = self.__get_sub_folder_paths(super_dir)
for sub_dir in sub_dirs:
if os.path.exists(sub_dir +"/_hypergui_crops/") and self.hypergui_crops:
self.__add_data_cube(sub_dir)
elif not self.hypergui_crops:
self.__add_data_cube(sub_dir)
def __add_data_cube_subdirs(self):
super_dir = self.__get_path_to_dir("Please select folder containing all the OP folders.")
sub_dirs = self.__get_sub_folder_paths(super_dir, True)
for sub_dir in sub_dirs:
if os.path.exists(sub_dir +"/_hypergui_crops/") and self.hypergui_crops:
self.__add_data_cube(sub_dir)
elif not self.hypergui_crops:
self.__add_data_cube(sub_dir)
def __add_data_cube(self, sub_dir):
contents = os.listdir(sub_dir)
dc_path = [sub_dir + "/" + i for i in contents if "SpecCube.dat" in i] # takes first data cube it finds
if len(dc_path) > 0:
dc_path = dc_path[0]
if dc_path in self.data_cube_paths:
messagebox.showerror("Error", "That data has already been added.")
else:
# Add the new data to current class
self.data_cube_paths.append(dc_path)
# Display the data cube
concat_path = os.path.basename(os.path.normpath(dc_path))
self.selection_listbox.insert(END, concat_path)
self.selection_listbox.config(width=32)
self.selection_listbox_ol.insert(END, concat_path)
self.selection_listbox_ol.config(width=32)
def __get_path_to_dir(self, title):
if self.listener.dc_path is not None:
p = os.path.dirname(os.path.dirname(self.listener.dc_path))
path = filedialog.askdirectory(parent=self.root, title=title, initialdir=p)
else:
path = filedialog.askdirectory(parent=self.root, title=title)
return path
@staticmethod
def __get_sub_folder_paths(path_to_main_folder, recursive = False):
sub_folders = sorted(glob.glob(path_to_main_folder+"/**/", recursive = recursive))
return sub_folders
def __update_selected_data_cube(self, event = None):
if self. hypergui_crops:
self.__update_selected_data_cube_hypergui_crops()
else:
self.__update_selected_data_cube_main()
def __update_selected_data_cube_ol(self, event = None):
if self. hypergui_crops:
self.__update_selected_data_cube_ol_hypergui_crops()
else:
self.__update_selected_data_cube_ol_main()
def __update_selected_data_cube_main(self, event=None):
dc_path = self.get_selected_data_cube_path()[0:-33] + "/"
if self.current_dc_path is not self.selection_listbox.curselection()[0]:
if len(self.selection_listbox.curselection())>0:
self.current_dc_path = self.selection_listbox.curselection()[0]
if self.current_dc_path_ol is None:
self.selection_listbox_ol.select_set(0)
a = Image.open(glob.glob(dc_path +"*RGB-Image.png")[0])
a = np.asarray(a)
if a.shape[0] == 550:
a = a[50:530, 20:660, :3]
else:
a = a[30:510, 3:643, :3]
a = Image.fromarray(a)
a = a.resize((int(a.size[0]*self.stretch_lr_2), int(a.size[1]*self.stretch_ud_2)))
a = np.asarray(a)
dark = np.zeros((480, 640,3))
self.bark = np.zeros((480, 640,3))
mid_x = int(round(a.shape[0]/2))
mid_y = int(round(a.shape[1]/2))
before_midx = mid_x
before_midy = mid_y
if before_midx>240:
before_midx = 240
if before_midy>320:
before_midy = 320
dark[(240-before_midx):(240-before_midx+a.shape[0]), (320-before_midy):(320-before_midy+a.shape[1]), 0:3] = a[(mid_x-before_midx):(mid_x-before_midx+2*before_midx), (mid_y-before_midy):(mid_y-before_midy+2*before_midy), 0:3]
self.bark[(240-before_midx):(240-before_midx+a.shape[0]), (320-before_midy):(320-before_midy+a.shape[1]), 0:3] = 1
dark = dark.astype("uint8")
self.RGB = dark
self.bark = self.bark.astype("uint8")
if len(glob.glob(dc_path +"*NIR-Perfusion.png"))>0:
a = Image.open(glob.glob(dc_path +"*NIR-Perfusion.png")[0])
a = np.asarray(a)
if a.shape[0] == 550:
a = a[50:530, 50:690, :3]
else:
a = a[26:506, 4:644, :3]
a = Image.fromarray(a)
a = a.resize((int(a.size[0]*self.stretch_lr_2), int(a.size[1]*self.stretch_ud_2)))
a = np.asarray(a)
dark = np.zeros((480, 640,3))
dark[(240-before_midx):(240-before_midx+a.shape[0]), (320-before_midy):(320-before_midy+a.shape[1]), 0:3] = a[(mid_x-before_midx):(mid_x-before_midx+2*before_midx), (mid_y-before_midy):(mid_y-before_midy+2*before_midy), 0:3]
dark = dark.astype("uint8")
self.NIR = dark
else:
self.NIR = np.zeros((480, 640,3)).astype("uint8")
if len(glob.glob(dc_path +"*TWI.png"))>0:
a = Image.open(glob.glob(dc_path +"*TWI.png")[0])
a = np.asarray(a)
if a.shape[0] == 550:
a = a[50:530, 50:690, :3]
else:
a = a[26:506, 4:644, :3]
a = Image.fromarray(a)
a = a.resize((int(a.size[0]*self.stretch_lr_2), int(a.size[1]*self.stretch_ud_2)))
a = np.asarray(a)
dark = np.zeros((480, 640,3))
dark[(240-before_midx):(240-before_midx+a.shape[0]), (320-before_midy):(320-before_midy+a.shape[1]), 0:3] = a[(mid_x-before_midx):(mid_x-before_midx+2*before_midx), (mid_y-before_midy):(mid_y-before_midy+2*before_midy), 0:3]
dark = dark.astype("uint8")
self.TWI = dark
else:
self.TWI = np.zeros((480, 640,3)).astype("uint8")
if len(glob.glob(dc_path +"*THI.png"))>0:
a = Image.open(glob.glob(dc_path +"*THI.png")[0])
a = np.asarray(a)
if a.shape[0] == 550:
a = a[50:530, 50:690, :3]
else:
a = a[26:506, 4:644, :3]
a = Image.fromarray(a)
a = a.resize((int(a.size[0]*self.stretch_lr_2), int(a.size[1]*self.stretch_ud_2)))
a = np.asarray(a)
dark = np.zeros((480, 640,3))
dark[(240-before_midx):(240-before_midx+a.shape[0]), (320-before_midy):(320-before_midy+a.shape[1]), 0:3] = a[(mid_x-before_midx):(mid_x-before_midx+2*before_midx), (mid_y-before_midy):(mid_y-before_midy+2*before_midy), 0:3]
dark = dark.astype("uint8")
self.THI = dark
else:
self.THI = np.zeros((480, 640,3)).astype("uint8")
if len(glob.glob(dc_path +"*Oxygenation.png"))>0:
a = Image.open(glob.glob(dc_path +"*Oxygenation.png")[0])
a = np.asarray(a)
if a.shape[0] == 550:
a = a[50:530, 50:690, :3]
else:
a = a[26:506, 4:644, :3]
a = Image.fromarray(a)
a = a.resize((int(a.size[0]*self.stretch_lr_2), int(a.size[1]*self.stretch_ud_2)))
a = np.asarray(a)
dark = np.zeros((480, 640,3))
dark[(240-before_midx):(240-before_midx+a.shape[0]), (320-before_midy):(320-before_midy+a.shape[1]), 0:3] = a[(mid_x-before_midx):(mid_x-before_midx+2*before_midx), (mid_y-before_midy):(mid_y-before_midy+2*before_midy), 0:3]
dark = dark.astype("uint8")
self.STO2 = dark
else:
self.STO2 = np.zeros((480, 640,3)).astype("uint8")
if len(glob.glob(dc_path +"*TLI.png"))>0:
a = Image.open(glob.glob(dc_path +"*TLI.png")[0])
a = np.asarray(a)
if a.shape[0] == 550:
a = a[50:530, 50:690, :3]
else:
a = a[26:506, 4:644, :3]
a = Image.fromarray(a)
a = a.resize((int(a.size[0]*self.stretch_lr_2), int(a.size[1]*self.stretch_ud_2)))
a = np.asarray(a)
dark = np.zeros((480, 640,3))
dark[(240-before_midx):(240-before_midx+a.shape[0]), (320-before_midy):(320-before_midy+a.shape[1]), 0:3] = a[(mid_x-before_midx):(mid_x-before_midx+2*before_midx), (mid_y-before_midy):(mid_y-before_midy+2*before_midy), 0:3]
dark = dark.astype("uint8")
self.TLI = dark
else:
self.TLI = np.zeros((480, 640,3)).astype("uint8")
if len(glob.glob(dc_path +"*OHI.png"))>0:
a = Image.open(glob.glob(dc_path +"*OHI.png")[0])
a = np.asarray(a)
if a.shape[0] == 550:
a = a[50:530, 50:690, :3]
else:
a = a[26:506, 4:644, :3]
a = Image.fromarray(a)
a = a.resize((int(a.size[0]*self.stretch_lr_2), int(a.size[1]*self.stretch_ud_2)))
a = np.asarray(a)
dark = np.zeros((480, 640,3))
dark[(240-before_midx):(240-before_midx+a.shape[0]), (320-before_midy):(320-before_midy+a.shape[1]), 0:3] = a[(mid_x-before_midx):(mid_x-before_midx+2*before_midx), (mid_y-before_midy):(mid_y-before_midy+2*before_midy), 0:3]
dark = dark.astype("uint8")
self.OHI = dark
else:
self.OHI =
|
np.zeros((480, 640,3))
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 04 13:46:09 2013
@author: <NAME>
A set of classes and methods intended to model the diffusion signal in a range
of environments. Restricted diffusion inside spheres and cylinders can be
modelled, as well as a tortuosity approximation of the extacellular space
2019-09-18: adjusting inputs so that parameters passed are scaled near 1.
"""
import numpy as np
gammap=42.576e6*2*np.pi
class Tortuosity():
"""
Tortuosity(fs=0.5,ft=0.3,acyl=np.r_[5,50],DE=1.7, fibreDir=[1,0,0], AngleDist='random')
Set up as a tortuosity approximation for aligned cylinders (Stanisz et al.,
1997 and Szafer et al., 1995), but there exist formulae for spheres (Stanisz, 2003).
"""
def __init__(self, fs=0.5,ft=0.3,acyl=np.r_[5,50],DE=1.7, fibreDir=[1,0,0], AngleDist='random'):
"""
acyl in um, DE in um^2/ms
"""
self.fs=fs
self.ft=ft
self.acyl=acyl*1e-6
self.DE=DE*1e-9
self.AngleDist=AngleDist
self.Lpar=np.log(2*self.acyl[1]/self.acyl[0]-1)*(self.acyl[0]/self.acyl[1])**2
self.Lperp=(1-self.Lpar)/2
self.ADCEpar=self.DE*(1-self.fs-self.ft)**((self.fs/2+self.ft*self.Lpar/(1-self.Lpar))/(self.fs+self.ft))
self.ADCEperp=self.DE*(1-self.fs-self.ft)**((self.fs/2+self.ft*self.Lperp/(1-self.Lperp))/(self.fs+self.ft))
def GetSig(self,pulseseq):
"""
Function to calculate the diffusion signal for a particular pulse
sequence (see PulseSequences.py module)
(Right now angule distribution is unused because I've assumed aligned
cylinders)
"""
bVals=pulseseq.GetB()
SigE=np.exp(-bVals*(self.ADCEpar+self.ADCEperp))
return SigE
def GetJac(self,pulseseq):
bVals=pulseseq.GetB()
return -1*bVals*np.exp(-bVals*(self.ADCEpar+self.ADCEperp))
class Sphere(object):
"""
Sphere(rad=5,DI=1.1,ApproxType='GPD')
Water motion inside restricted sphere with defined radius
Inputs:
rad - The radius of the cell in um
DI - Intracellular diffusion time for this group of cells in um^2/s
ApproxType - Approximation used to calculate dephasing by gradients. Choices
are GPD (Gaussian Phase Distribution, default) or SPG (short pulse gradient)
"""
def __init__(self, rad=5,DI=1.1,ApproxType='GPD'):
self.rad=rad*1e-6
self.DI=DI*1e-9
self.ApproxType=ApproxType
def GetSig(self,pulseseq):
"""
Function to calculate the diffusion signal for a particular pulse
sequence (GPD is set up for either TRSE or PGSE; see PulseSequences.py
module). The SPG calculation involves an infinite series, which is
terminated after 60 steps, hard-coded in vec below. The GPD approximation
involves the BesselRoots, which are hard-coded below.
"""
lval=self.rad
Sig=np.ones(pulseseq.gMag.shape)
if self.ApproxType=="SPG":
#Short pulse gradient approximation
for gct,(gval,dval,Dval) in enumerate(zip(pulseseq.gMag,pulseseq.delta,pulseseq.DELTA)):
if gval!=0:
qval=gammap*gval*dval
vec=np.r_[1:60] #accuracy of convergence
qsum=sum(np.exp(-1*vec**2*np.pi*np.pi*self.DI*Dval/lval/lval)*(1-(-1)**vec*np.cos(qval*lval))/((qval*lval)**2-(vec*np.pi)**2)**2)
Sig[gct] = (2*(1-np.cos(qval*lval))/(qval*lval)**2+4*(qval*lval)**2*qsum)
elif self.ApproxType=="GPD":
#GPD approximation for spheres
BesselRoots=np.r_[2.08157597782, 5.94036999057, 9.20584014294, 12.4044450219, 15.5792364104, 18.7426455848, 21.8996964795, 25.052825281,
28.203361004, 31.3520917266, 34.4995149214, 37.6459603231, 40.7916552313, 43.9367614714, 47.0813974122, 50.2256516492,
53.3695918205, 56.5132704622, 59.6567290035, 62.8000005565, 65.9431119047, 69.0860849466, 72.228937762, 75.3716854093,
78.5143405319, 81.656913824, 84.7994143922, 87.9418500397, 91.0842274915, 94.2265525746, 97.3688303629, 100.511065295,
103.653261272, 106.795421733, 109.937549726, 113.079647959, 116.221718846, 119.363764549, 122.505787005, 125.647787961,
128.789768989, 131.931731515, 135.073676829, 138.215606107, 141.357520417, 144.499420737, 147.64130796, 150.783182905,
153.925046323, 157.066898908, 160.208741296, 163.350574075, 166.492397791, 169.634212946, 172.776020008, 175.917819411,
179.059611558, 182.201396824, 185.343175559, 188.484948089, 191.626714721, 194.76847574, 197.910231412, 201.05198199]
Betam=BesselRoots/lval
Yfunc=lambda x: np.exp(-1*self.DI*Betam**2*x)
try: #TRSE pulse sequence is the only one with this format
gDirsvec=pulseseq.gDirs
gMagvec=pulseseq.gMag
del1vec=pulseseq.del1
del2vec=pulseseq.del2
del3vec=pulseseq.del3
t1vec=pulseseq.t1
t2vec=pulseseq.t2
t3vec=pulseseq.t3
for gct,(gDir,gMag,del1,del2,del3,t1,t2,t3) in enumerate(zip(gDirsvec,gMagvec,del1vec,del2vec,del3vec,t1vec,t2vec,t3vec)):
numer=2*self.DI*Betam**2*(del1+del2)-5-(Yfunc(t2-t1)-Yfunc(t3-t1)-Yfunc(t3-t2)-Yfunc(del1)-Yfunc(t2-t1-del1)+
+Yfunc(t3-t1-del1)-2*Yfunc(del2)-2*Yfunc(t2-t1+del2)+2*Yfunc(t2-t1+del2-del1)+2*Yfunc(t3-t2-del2)-
2*Yfunc(del3)+Yfunc(del2+del3)+Yfunc(t2-t1+del2+del3)-Yfunc(t2-t1+del2+del3-del1)-
2*Yfunc(t3-t2+del1-del3)-Yfunc(t3-t1+del2-del3)-Yfunc(del1+del2-del3)+Yfunc(t3-t1+del1+del2-del3)+
Yfunc(t3-t2+del1+del2-del3)-Yfunc(t3-t2-del2-del3)+Yfunc(t3-t2+del1-2*del3))
denom=self.DI**2*Betam**6*(lval**2*Betam**2-2)
Sig[gct]=np.exp(-2*gammap**2*sum(numer/denom)*gMag**2)
except AttributeError: # pulse sequence is PGSE not TRSE
gDirsvec=pulseseq.gDirs
gMagvec=pulseseq.gMag
delvec=pulseseq.delta
DELTA=pulseseq.DELTA
for gct,(gDir,gMag,del1,t1) in enumerate(zip(gDirsvec,gMagvec,delvec,DELTA)):
numer=2*self.DI*del1*(Betam)**2-2+2*Yfunc(del1)+2*Yfunc(t1)-Yfunc(t1-del1)-Yfunc(t1+del1)
denom=self.DI**2*Betam**6*(self.rad**2*Betam**2-2)
Sig[gct]=np.exp(-2*gammap**2*sum(numer/denom)*gMag**2)
return Sig
def GetJac(self,pulseseq):
dp=1e-5
pList=[self.rad,self.DI]
npar=len(pList)
jac=np.ones([npar,len(pulseseq.ScanPars['gMag'].squeeze())])
# compute numerically
S0=self.GetSig(pulseseq)
for jct in range(npar):
phold=pList[jct]
pList[jct]=pList[jct]*(1+dp)
S1=self.GetSig(pulseseq)
pList[jct]=phold
jac[jct,:]=(S1-S0)/(dp*phold)
return jac
class Cylinder(object):
"""
Cylinder(rad=5, length=50, DI=1.1, fibreDir=[1,0,0],ApproxType='GPD')
Water motion inside a cylinder with radius and particular orientation
Inputs:
rad - The radius of the cylinder in um
length - length of cylinder in um (currently unused in signal calculations,
which assuming infinitely long cylinders)
DI - Intracellular diffusion time for this cylinder in um^2/s
fibreDir - direction of main fibre axis (same co-ordienate system as gradient
directions use in pulse sequence)
ApproxType - Approximation used to calculate dephasing by gradients. Choices
are GPD (Gaussian Phase Distribution, default) or SPG (short pulse gradient)
"""
def __init__(self, rad=5, length=50, DI=1.1, fibreDir=[1,0,0],ApproxType='GPD'):
self.rad=rad*1e-6
self.length=length*1e-6
self.DI=DI*1e-9
self.fibreDir=fibreDir
self.ApproxType=ApproxType
def GetSig(self,pulseseq,numSteps=60):
"""
Function to calculate the diffusion signal for a pulse sequence (only
set up for PGSE currently, see PulseSequences.py module). The SPG
calculation involves an infinite series, which is terminated after
numSteps steps.
The GPD approximation involves the BesselRoots, which are hard-coded below.
Currently using the assumption that the signal is the product of the
parallel and perpendicular signals (Assaf et al., 2004). The calculation
can be made using different approximations.
"""
lval=self.rad
gradDir=pulseseq.gDirs
gradMag=pulseseq.gMag
delta=pulseseq.delta
Delta=pulseseq.DELTA
DI=self.DI
if type(gradMag) is not np.ndarray:
gradMag=np.r_[gradMag]
if type(Delta) is not np.ndarray:
Delta=Delta*np.ones(gradMag.shape)
if type(delta) is not np.ndarray:
delta=delta*np.ones(gradMag.shape)
Sperp=np.ones(gradMag.shape)
Spar=np.exp(-1*(gammap*gradMag*np.dot(gradDir,self.fibreDir)*delta)**2*(Delta-delta/3)*DI)
sinThetasq=1-np.dot(gradDir,self.fibreDir)**2
if self.ApproxType=="SPG":
#Short pulse gradient approximation
for gct,(gval,dval,Dval) in enumerate(zip(gradMag*np.sqrt(1-np.dot(gradDir,self.fibreDir)**2),delta,Delta)):
if gval!=0:
qval=gammap*gval*dval
vec=np.r_[1:60] #accuracy of convergence
qsum=np.sum(np.exp(-1*vec**2*np.pi*np.pi*DI*Dval/lval/lval)*(1-(-1)**vec*np.cos(qval*lval))/((qval*lval)**2-(vec*np.pi)**2)**2)
Sperp[gct] = (2*(1-np.cos(qval*lval))/(qval*lval)**2+4*(qval*lval)**2*qsum)
elif self.ApproxType=="GPD":
#GPD approximation for cylinders
import scipy.special
for gct,(gval,dval,Dval) in enumerate(zip(gradMag,delta,Delta)):
BesselRoots=scipy.special.jnp_zeros(1,numSteps) #can adjust for better convergence
Betam=BesselRoots/lval
Yfunc=lambda x: np.exp(DI*Betam**2*x)
numer=2*DI*Betam**2*dval-2+2*Yfunc(-1*dval)+2*Yfunc(-1*Dval)-Yfunc(dval-Dval)-Yfunc(-1*(Dval+dval))
denom=DI**2*Betam**6*(lval**2*Betam**2-1)
Sperp[gct]=np.exp(-2*gammap**2*np.sum(numer/denom)*sinThetasq[gct]*gval**2)
return Spar*Sperp
class IsotropicFree(object):
"""
IsotropicFree(ADC=1)
Free diffusion in an isotropic environment with coeffecient ADC
"""
def __init__(self,ADC=1):
self.ADC=ADC*1e-9
def GetSig(self,pulseseq):
bVals=pulseseq.GetB()
return np.exp(-1*bVals*self.ADC)
def GetJac(self,pulseseq):
bVals=pulseseq.GetB()
return -1*bVals*np.exp(-1*bVals*self.ADC)
class Stick(object):
"""
Stick(thetaf,phif,DI=1.1)
Diffusion along a 1D "stick" (infinitessimally think cyclinder) with directions
defined by thetaf (angle from z) in radians and phif (rotation of xy-projection from x-axis)
"""
def __init__(self,thetaf,phif,DI=1.1):
self.DI=DI*1e-9
self.thetaf=thetaf
self.phif=phif
@property
def fibreDir(self):
return np.r_[np.sin(self.thetaf)*np.cos(self.phif),np.sin(self.thetaf)*np.sin(self.phif),np.cos(self.thetaf)]
def GetSig(self,pulseseq):
gFact=np.zeros(pulseseq.gMag.shape)
for gct,gd in enumerate(pulseseq.gDirs):
gFact[gct]=np.dot(pulseseq.gDirs[gct],self.fibreDir)
bVals=pulseseq.GetB()*gFact
return np.exp(-1*bVals*self.DI)
class StretchedExp(object):
"""
StretchedExp(ADC=1,alpha=1)
Stretched exponential (phenomenological) diffusion model
ADC - apparent diffusion coefficient in um^2/ms
alpha - exponent
"""
def __init__(self,ADC=1,alpha=1):
self.ADC=ADC*1e-9
self.alpha=alpha
def GetSig(self,pulseseq):
bVals=pulseseq.GetB()
return np.exp(-1*(bVals*self.ADC)**self.alpha)
class Kurtosis(object):
"""
Kurtosis(ADC=1,kurt=0)
Kurtosis (phenomenological) diffusion model
ADC - apparent diffusion coefficient in um^2/ms
kurt - kurtosis parameter. A value of 0 is Gaussian diffusion
"""
def __init__(self,ADC=1,kurt=0):
self.ADC=ADC*1e-9
self.kurt=kurt
def GetSig(self,pulseseq):
bVals=pulseseq.GetB()
return np.exp(-1*bVals*self.ADC+(bVals)**2*self.kurt*self.ADC**2/6)
class DiffTensor(object):
"""
DiffTensor(lambda1,lambda2,lambda3,theta,phi,alpha)
lambda 1 - diffusion coefficient along primary direction (defined by theta
and phi below) in um^2/ms
lambda 2 - diffusion coefficient along secondary directions in um^2/ms
lambda 3 - diffusion coefficient along tertiary direction
theta - angle of primary diffusion direction relative to z-axis, in radians
phi - angle of primary diffusion direction projection in xy-plan relative to x-axis, in radians
alpha - angle of secondary diffusion direction (third direction is determined by orthogonality constraint)
"""
def __init__(self,lambda1,lambda2,lambda3,theta,phi,alpha):
self.theta=theta
self.phi=phi
self.alpha=alpha
self.lambda1=lambda1*1e-9
self.lambda2=lambda2*1e-9
self.lambda3=lambda3*1e-9
@property
def e1(self):
return np.r_[np.sin(self.theta)*np.cos(self.phi),np.sin(self.theta)*np.sin(self.phi),np.cos(self.theta)]
@property
def e2(self):
# first, find a vector perpendicular to e1. Then rotate by alpha around e1
if (np.dot(self.e1,np.r_[0,1,0])-1)==0:
rotvec=np.dot(np.array([[1,0,0],[0,0,-1],[0,1,0]]),self.e1)
else:
rotvec=np.dot(np.array([[0,0,1],[0,1,0],[-1,0,0]]),self.e1)
RotMat=np.cos(self.alpha)*np.eye(3) + np.sin(self.alpha)*np.array([[0,-1*self.e1[2],self.e1[1]],[self.e1[2],0,-1*self.e1[0]],
[-1*self.e1[1],self.e1[0],0]]) + (1-np.cos(self.alpha))*np.array([[self.e1[0]**2,self.e1[0]*self.e1[1],self.e1[0]*self.e1[2]],
[self.e1[0]*self.e1[1],self.e1[1]**2,self.e1[1]*self.e1[2]],[self.e1[0]*self.e1[2],self.e1[1]*self.e1[2],self.e1[2]**2]])
return np.dot(RotMat,rotvec)
@property
def e3(self):
return np.cross(self.e1,self.e2)/np.linalg.norm(np.cross(self.e1,self.e2))
def GetSig(self,pulseseq):
bVals=pulseseq.GetB()
SigVals=np.zeros(bVals.shape)
for gct,gd in enumerate(pulseseq.gDirs):
SigVals[gct]=np.exp(-1*bVals[gct]*(self.lambda1*np.dot(self.e1,gd)**2+self.lambda2*np.dot(self.e2,gd)**2
+self.lambda3*np.dot(self.e3,gd)**2))
return SigVals
class Zeppelin(object):
"""
Zeppelin(lambda1,lambda2,theta,phi)
Diffusion calculation for spherically symmetric tensor
lambda 1 - diffusion coefficient along primary direction (defined by theta
and phi below) in um^2/ms
lambda 2 - diffusion coefficient along secondary directions in um^2/ms
theta - angle of primary diffusion direction relative to z-axis, in radians
phi - angle of primary diffusion direction projection in xy-plan relative to x-axis, in radians
"""
def __init__(self,lambda1,lambda2,theta,phi):
self.theta=theta
self.phi=phi
self.lambda1=lambda1
self.lambda2=lambda2
@property
def e1(self):
return np.r_[np.sin(self.theta)*np.cos(self.phi),np.sin(self.theta)*np.sin(self.phi),np.cos(self.theta)]
def GetSig(self,pulseseq):
bVals=pulseseq.GetB()
SigVals=np.zeros(bVals.shape)
for gct,gd in enumerate(pulseseq.gDirs):
SigVals[gct]=np.exp(-1*bVals[gct]*self.lambda1*
|
np.dot(self.e1,gd)
|
numpy.dot
|
import numpy as np
from scipy import stats
Multi_N= stats.multivariate_normal
class GaussianMixtureModel():
"""Density estimation with Gaussian Mixture Models (GMM).
You can add new functions if you find it useful, but *do not* change
the names or argument lists of the functions provided.
"""
def __init__(self, X, K):
"""Initialise GMM class.
Arguments:
X -- data, N x D array
K -- number of mixture components, int
"""
self.X = X
self.n = X.shape[0]
self.D = X.shape[1]
self.K = K
def MNormal(self, mu, S, X):
result = np.zeros((self.n,self.K))
for i in range(self.n):
for j in range(self.K):
result[i,j] = Multi_N.pdf(self.X[i],mu[j],S[j], allow_singular=True)
return result
def mixure(self, mu_k,S_k, pi_k,x):
result = []
for i in range(self.K):
#print(i, self.normal(x, mu_k[i], S_k[i]))
result.append(Multi_N.pdf(x, mu_k[i], S_k[i], allow_singular=True))
return (pi_k * np.array(result).reshape(-1,1)).sum()
def log_likelihood(self,mu,S, pi, X):
l = 0
n = X.shape[0]
for i in range(n):
#print(i)
#if i in [27,28,29]:
#print(self.mixure(mu,S, pi,self.X[i]))
l += np.log(self.mixure(mu,S, pi, X[i]))
return l
def E_step(self, mu, S, pi):
"""Compute the E step of the EM algorithm.
Arguments:
mu -- component means, K x D array
S -- component covariances, K x D x D array
pi -- component weights, K x 1 array
Returns:
r_new -- updated component responsabilities, N x K array
"""
#print(mu.shape == (self.K, self.D))
#print(S.shape)
#print(S.shape == (self.K, self.D, self.D))
#print(pi.shape == (self.K, 1))
# Assert that all arguments have the right shape
assert(mu.shape == (self.K, self.D) and S.shape == (self.K, self.D, self.D) and pi.shape == (self.K, 1))
r_new = np.zeros((self.n, self.K))
# Task 1: implement the E step and return updated responsabilities
# Write your code from here...
#r_new = pi[i]*Multi_N.pdf(self.X[i],mu[1],S[i])
r_new = pi.reshape(1,-1)*self.MNormal(mu, S, self.X)
r_new /= r_new.sum(axis = 1).reshape(-1,1)
# ... to here.
assert(r_new.shape == (self.n, self.K))
return r_new
def M_step(self, mu, r):
"""Compute the M step of the EM algorithm.
Arguments:
mu -- previous component means, K x D array
r -- previous component responsabilities, N x K array
Returns:
mu_new -- updated component means, K x D array
S_new -- updated component covariances, K x D x D array
pi_new -- updated component weights, K x 1 array
"""
assert(mu.shape == (self.K, self.D) and r.shape == (self.n, self.K))
mu_new = np.zeros((self.K, self.D))
S_new = np.zeros((self.K, self.D, self.D))
pi_new = np.zeros((self.K, 1))
# Task 2: implement the M step and return updated mixture parameters
# Write your code from here...
#mu_new = np.sum(r[:,i]*self.X, axis=1)/r[:,i].sum()
Nk = np.sum(r, axis=0).reshape(-1,1)
mu_new = (r.T @ self.X)/Nk
#S_new = np.sum(r[:,i]*(self.X[i] - mu[i]).T @ (self.X[i] - mu[i]), axis=1)/r[:,i].sum()
for i in range(self.K):
S_new[i] = r[:,i]*(self.X - mu_new[i]).T @ (self.X - mu_new[i])/r[:,i].sum()
pi_new = Nk/self.n
# ... to here.
assert(mu_new.shape == (self.K, self.D) and S_new.shape == (self.K, self.D, self.D) and pi_new.shape == (self.K, 1))
return mu_new, S_new, pi_new
def train(self, initial_params):
"""Fit a Gaussian Mixture Model (GMM) to the data in matrix X.
Arguments:
initial_params -- dictionary with fields 'mu', 'S', 'pi' and 'K'
Returns:
mu -- component means, K x D array
S -- component covariances, K x D x D array
pi -- component weights, K x 1 array
r -- component responsabilities, N x K array
"""
# Assert that initial_params has all the necessary fields
assert(all([k in initial_params for k in ['mu', 'S', 'pi']]))
mu = np.zeros((self.K, self.D))
S =
|
np.zeros((self.K, self.D, self.D))
|
numpy.zeros
|
"""
MPI-aware read and write PETSc Vec to HDF5
The goal of this module is to save snapshots of a PETSc Vec to HDF5
files, and obviously to read them again later. The obvious way to do
this is parallel HDF5. Unfortunately, distributions of HDF5 and h5py
may be built without support for parallel operation. (In particular,
the conda-forge version doesn't have it.) This is accomplished through
the following kludge:
When a KSFD.TimeSeries is created with name tsname and argument mpiok
True, the runtime envirnoment is checked to find out if parallel HDF5
is enabled (using h5py.getconfig().mpi). If so, the data are stored in
an HDF5 file named
'{name}MPI.h5'.format(name=tsname).
Note: there is a serious problem with parallel HDF5: variable length
records can't be written. If you try, you get this exception:
OSError: Can't write data (Parallel IO does not support writing VL
datatypes yet)
Since that makes parallel HDF5 a nonstarter for my purposes, mpiok
defaults to False. You won't get parallel MPI unless you specifically
ask for it, and then dealing with the lack of VL records is your
problem.
If not, each process stores the data it owns in a file named
'{name}s{size}r{rank}.h5'.format(name=tsname, size=comm.size, rank=comm.rank)
where comm is the MPI communicator. If run sequentially the data will
all be stored in a file called '{name}s1r0.h5'. It is intended that
the *MPI.h5 file created using parallele HDF5 and the *s1r0.h5 file
created when running sequentially and parallel HDF5 is not available
will be the same.
The same procedure is used for finding the filename when opening in
read/write mode ('r+' or 'a').
When opening a TimeSeries for read (mode 'r') TimeSeries checks (in
order) for the *s<size>r<rank>.h5 file, then the *MPI.h5 file ,and
finally a *s1r0.h5 file, and opens the first it finds. In this case
the retrieve methods will only return the components of the vector
owned by the local process.
Finally, I will write a simple script to merge all the files of
*s<size>r<rank>.h5 series into a single *MPI.h5 file. In this way an
MPi process group of any size will be able to retrieve data written by
a process group of any size.
"""
import h5py, os, re, gc, time
import traceback as tb
import numpy as np
import petsc4py
from mpi4py import MPI
#
# These imports are placed inside a try/except so that this script can
# be executed standalone to check for syntax errors.
#
try:
from .ksfddebug import log
from .ksfdgrid import Grid
except ImportError:
from ksfddebug import log
from ksfdgrid import Grid
def logSERIES(*args, **kwargs):
log(*args, system='SERIES', **kwargs)
class KSFDTimeSeries:
"""
Base class for TimeSeries
KSFDTimeSeries is intended as an abstract base class for reading and
writing time series from KSFD solutions to HDF5 files. It is not
formally defined as an ABC: you can instantiate it if you really
wish, but it is not designed to make that a useful thing to do.
"""
def __init__(
self,
basename,
size=1,
rank=0,
mpiok=False,
mode='r+',
retries=0,
retry_interval=60
):
"""
Required parameter:
basename: the prefix of the filename.
Optional keyword parameters:
size=1: Number of MPI processes. This typically corresponds to
comm.size for an MPI communicator comm.
rank=0: Number of the MPI process that created this
file. Typically comm.rank.
mpiok=True: Whether parallel HDF5 should be used to store to
store all the data from all MPI processes in a single
file.
mode='r+': The file mode for opening the h5py.File.
retries=0. If nonzero, retry faile dopens this many times.
retry_interval=60: time (in secodns) between successive
retries. Note: the open will block while waiting for a
successful retry.
size, rank, and mpiok are used mostly to figure out what
filename to use. They need not correspond to the actual
current MPU configuration. For instance, they may correspond
to the config when the time series was created.
"""
self.get_filename(basename, size, rank, mpiok, mode)
self.retries = retries
self.retry_interval = retry_interval
self._size = size
self._rank = rank
self._mode = mode
self._tsf = self.open_with_retry()
_ = self.info # make sure '/info' exists
self.try_to_set('size', self.size)
self.try_to_set('rank', self.rank)
if 'times' in self.tsf:
self.ts = np.array(self.tsf['times'][()])
try:
self.ks = np.array(self.tsf['ks'][()])
except KeyError:
self.ks = np.arange(len(self.ts))
self.order = np.array(self.tsf['order'][()])
else:
self.ts = np.array([], dtype=float)
self.ks =
|
np.array([], dtype=int)
|
numpy.array
|
import numpy as np
import pandas as pd
import pdb
import random
##################################################
#### generate generalization data (interpolation &
#### extrapolation using one-hot vectors as input
##################################################
def gen_hier_onehot(train_size = 2, gen_type = 'int', random_training = False):
target = []
input_batch = []
output_batch = []
# words in one-hot encoding
seventh = [1,0,0,0,0,0,0,0,0,0]
second = [0,1,0,0,0,0,0,0,0,0]
third = [0,0,1,0,0,0,0,0,0,0]
fourth = [0,0,0,1,0,0,0,0,0,0]
fifth = [0,0,0,0,1,0,0,0,0,0]
sixth = [0,0,0,0,0,1,0,0,0,0]
blue = [0,0,0,0,0,0,1,0,0,0]
green = [0,0,0,0,0,0,0,1,0,0]
red = [0,0,0,0,0,0,0,0,1,0]
ball = [0,0,0,0,0,0,0,0,0,1]
# combines words into their category
ordinals = [second, third, fourth, fifth, sixth, seventh] # 'first' not included because all trials must be divergent
colors = [blue, red, green]
colors_nored = [blue, green]
shapes = [ball]
# properties of elements in picture
blue_p = [1,0,0,0]
green_p = [0,1,0,0]
red_p = [0,0,1,0]
ball_p = [0,0,0,1]
# combine properties of elements in picture
colors_p = [blue_p, red_p]
colors_p_nored = [blue_p, green_p]
shapes_p = [ball_p]
# define number of target-present and target-absent trials
targetpresencetrain = ['present','absent'] * int((train_size)/2)
targetpresencetest = ['present','absent'] * int((100)/2)
random.shuffle(targetpresencetrain)
random.shuffle(targetpresencetest)
# define possible target positions for pseudorandom simulation
possibletargpos = [4,5,6,7,8,9]
# training size is input to the function; add 100 test trials
runs = train_size + 100
# generate training and test trials
for runs in range(0,runs):
if runs < train_size:
trial = targetpresencetrain[runs]
else:
trial = targetpresencetest[runs-train_size]
# run the loop until an input-output combination is generated that is new
is_trial_new = False
while is_trial_new == False:
### generate phrase
# pick random properties for target phrase
ordinal = random.choice(ordinals)
shape = random.choice(shapes)
# the properties of possible targets depend on type of generalization test
if gen_type == 'ext':
if runs < train_size:
color = random.choice(colors_nored)
else:
ordinal = [0,0,1,0,0,0,0,0,0,0] # ordinal is third
color = [0,0,0,0,0,0,0,0,1,0] # color is red
elif gen_type == 'int':
if runs < train_size:
color = random.choice(colors)
if ordinal == [0,0,1,0,0,0,0,0,0,0]: # if ordinal is third
color = random.choice(colors_nored) # then color cannot be red
else:
color = [0,0,0,0,0,0,0,0,1,0] # color is red
ordinal = [0,0,1,0,0,0,0,0,0,0] # ordinal is third
# the target item for in the picture
item_p = [[],[]]
# save properties of target for creating picture
if (color == blue) == True:
item_p[0] = [1,0,0,0]
if (color == green) == True:
item_p[0] = [0,1,0,0]
if (color == red) == True:
item_p[0] = [0,0,1,0]
if (shape == ball) == True:
item_p[1] = [0,0,0,1]
# get what the ordinal was
for ord in range(9):
if ordinal[ord] == 1:
ordnum = ord + 1
if ordnum == 1:
ordnum = 7 # seventh replaces first
### generate a picture
# run the loop to make sure the trial is divergent:
# the linear interpretation is not the hierarchical interpretation (if linear target is present)
is_picture_divergent = False
while is_picture_divergent == False:
# the properties of possible items in picture depend on type of generalization test
if gen_type == 'ext':
if runs < train_size:
colors_p = [blue_p, green_p]
else:
colors_p = [blue_p, green_p, red_p]
elif gen_type == 'int':
colors_p = [blue_p, green_p, red_p]
# define all elements in the picture
picture = []
for i in range(8): # picture has len = 8
picture.append([random.choice(colors_p),random.choice(shapes_p)])
# uncomment if linear answer must be in the picture
#picture[ordnum-1] = item_p
# count how many items are the same as the target
count = 0
for elements in range(len(picture)):
if picture[elements] == item_p:
count += 1
# make sure trial is divergent on target-present trials:
# run through picture and count how many elements to the left of the target are the same as the target
targetnum = 0
for elements in range(ordnum-1):
if picture[elements] == item_p:
targetnum += 1
if trial == 'present':
# if there are enough items the same as the target (so the hierarchical target is present)
if count >= (ordnum):
# and if the trial is divergent
if targetnum != ordnum-1:
is_picture_divergent = True
elif trial == 'absent':
if count < (ordnum):
is_picture_divergent = True
if trial == 'present':
# now that we know that the hierarchical target is present, find it
targetfound = False
while targetfound == False:
targetnum = 0
# run through all elements in the picture
for elements in range(len(picture)):
# if an element is the same as the target, count it
if picture[elements] == item_p:
targetnum += 1
# if the ordinal of target is reached, save target position and exit the loop
if targetnum == ordnum:
targetposition = elements+1
targetfound = True
elif trial == 'absent':
targetposition = 9
# to generate pseudorandom input-output mappings
if random_training == True:
targetposition = random.choice(possibletargpos)
# save the output vector
output_vec = [0] * 9
output_vec[targetposition-1] = 1
# flatten the picture, which is an array of arrays
pict_flat = [item for sublist in picture for item in sublist]
pict_flat = [item for sublist in pict_flat for item in sublist]
pict_flat = np.array(pict_flat)
# normalize the picture to make sure it has "length" 1 (length defined as Euclidean distance), as all one-hot vectors do
# this ensures that the input to the lstm has the same "net content" across all vectors
pict_norm = pict_flat / np.sqrt(np.sum(pict_flat**2))
# add to the left of the picture 10 zeros to incorporate the length of each word of the phrase
pict_pad = np.pad(pict_norm,(10,0), 'constant')
# add to the right of each word 64 zeros to incorporate the length of the picture
ordinal_pad = np.pad(ordinal,(0,64), 'constant')
color_pad = np.pad(color,(0,64), 'constant')
shape_pad = np.pad(shape,(0,64), 'constant')
# combine all three words and the picture into one flat list
input_vec = [list(ordinal_pad),list(color_pad),list(shape_pad),list(pict_pad)]
input_vec = [s for sublist in input_vec for s in sublist]
# cut the list up into four equal-sized numpy arrays
input_vec = np.array(input_vec)
input_vec = np.reshape(input_vec,newshape=(4,74))
# check whether the input-output combination is new:
# compare it to all previously generated examples in input_batch and exit loop if the generated trial is new
if runs == 0:
is_trial_new = True
else:
## make sure that the input picture is new/hasn't been created before
newness = np.zeros(runs)
for inputs in range(1,runs):
# element-wise comparison; if all are True, the two input_vecs were identical
if (input_vec == input_batch[inputs-1]).all() == True:
newness[inputs] = 1
break # stop looping through more runs
if sum(newness) == 0.0: # if current input_vec is not identical to any of the previous ones, exit the loop
is_trial_new = True
input_batch.append(input_vec)
output_batch.append(output_vec)
# return input and corresponding output for training and set sets
return np.array(input_batch[0:train_size]), np.array(output_batch[0:train_size]), np.array(input_batch[train_size:]), np.array(output_batch[train_size:])
######################################################
#### generate generalization data (interpolation &
#### extrapolation using full word embeddings as input
######################################################
def gen_hier_embful(train_size = 2, gen_type = 'int'):
target = []
input_batch = []
output_batch = []
# load full word embeddings
myembeddings = pd.read_csv('word2vec/embeddings.csv', header = 0)
myembeddings = myembeddings['embedding']
# convert the embeddings from list to string of floats
second = np.array(np.matrix(myembeddings[0])).ravel()
third = np.array(np.matrix(myembeddings[1])).ravel()
fourth = np.array(np.matrix(myembeddings[2])).ravel()
fifth = np.array(np.matrix(myembeddings[3])).ravel()
sixth = np.array(np.matrix(myembeddings[4])).ravel()
seventh = np.array(np.matrix(myembeddings[5])).ravel()
blue = np.array(np.matrix(myembeddings[6])).ravel()
green = np.array(np.matrix(myembeddings[7])).ravel()
red = np.array(np.matrix(myembeddings[8])).ravel()
ball = np.array(np.matrix(myembeddings[9])).ravel()
# combines words into their category
ordinals = [second, third, fourth, fifth, sixth, seventh] # 'first' not included because all trials must be divergent
colors = [blue, green, red]
colors_nored = [blue, green]
shapes = [ball]
# words in one-hot encoding; for the picture
blue_p = [1,0,0,0]
green_p = [0,1,0,0]
red_p = [0,0,1,0]
ball_p = [0,0,0,1]
# combine properties of elements in picture
colors_p = [blue_p, green_p, red_p]
colors_p_nored = [blue_p, green_p]
shapes_p = [ball_p]
# define number of target-present and target-absent trials
targetpresencetrain = ['present', 'absent'] * int((train_size)/2)
targetpresencetest = ['present', 'absent'] * int((100)/2)
random.shuffle(targetpresencetrain)
random.shuffle(targetpresencetest)
# training size is input to the function; add 100 test trials
runs = train_size + 100
# generate training and test trials
for runs in range(0,runs):
if runs < train_size:
trial = targetpresencetrain[runs]
else:
trial = targetpresencetest[runs-train_size]
# run the loop until an input-output combination is generated that is new
is_trial_new = False
while is_trial_new == False:
### generate phrase
# pick random properties for target phrase
ordinal = random.choice(ordinals)
shape = random.choice(shapes)
# the properties of possible targets depend on type of generalization test
if gen_type == 'int':
if runs < train_size:
color = random.choice(colors_nored)
else:
color = red # color is red
ordinal = third # ordinal is third
elif gen_type == 'ext':
if runs < train_size:
color = random.choice(colors)
if (ordinal == third).all() == True: # if ordinal is third
color = random.choice(colors_nored) # then color cannot be red
else:
color = red # color is red
ordinal = third # ordinal is third
# the target item for in the picture
item_p = [[],[]]
# save properties of target for creating picture
if (color == blue).all() == True:
item_p[0] = [1,0,0,0]
elif (color == green).all() == True:
item_p[0] = [0,1,0,0]
elif (color == red).all() == True:
item_p[0] = [0,0,1,0]
if (shape == ball).all() == True:
item_p[1] = [0,0,0,1]
# save what the ordinal was
if (ordinal == second).all() == True:
ordnum = 2
elif (ordinal == third).all() == True:
ordnum = 3
elif (ordinal == fourth).all() == True:
ordnum = 4
elif (ordinal == fifth).all() == True:
ordnum = 5
elif (ordinal == sixth).all() == True:
ordnum = 6
elif (ordinal == seventh).all() == True:
ordnum = 7
### generate a picture
# run the loop to make sure the hierarchical target is there
# and the trial is divergent
is_picture_divergent = False
while is_picture_divergent == False:
count = 0
# the properties of possible items in picture depend on type of generalization test
if gen_type == 'int':
if runs < train_size:
colors_p = [blue_p, green_p]
else:
colors_p = [blue_p, green_p, red_p]
elif gen_type == 'ext':
colors_p = [blue_p, green_p, red_p]
# define all elements in the picture
picture = []
for i in range(8): # picture has len = 8
picture.append([random.choice(colors_p),random.choice(shapes_p)])
# uncomment if linear answer must be in the picture
#picture[ordnum-1] = item_p
# count how many items are the same as the target
count = 0
for elements in range(len(picture)):
if picture[elements] == item_p:
count += 1
# make sure trial is divergent on target-present trials:
# run through picture and count how many elements to the left of the target are the same as the target
targetnum = 0
for elements in range(ordnum-1):
if picture[elements] == item_p:
targetnum += 1
if trial == 'present':
# if there are enough items the same as the target (so the hierarchical target is present)
if count >= (ordnum):
# and if the trial is divergent
if targetnum != ordnum-1:
is_picture_divergent = True
elif trial == 'absent':
if count < (ordnum):
is_picture_divergent = True
if trial == 'present':
# now that we know that the hierarchical target is present, find it
targetfound = False
while targetfound == False:
targetnum = 0
# run through all elements in the picture
for elements in range(len(picture)):
# if an element is the same as the target, count it
if picture[elements] == item_p:
targetnum += 1
# if the ordinal of target is reached, save target position and exit the loop
if targetnum == ordnum:
targetposition = elements+1
targetfound = True
elif trial == 'absent':
targetposition = 9
# save the output vector
output_vec = [0] * 9
output_vec[targetposition-1] = 1
# flatten the picture, which is an array of arrays
pict_flat = [item for sublist in picture for item in sublist]
pict_flat = [item for sublist in pict_flat for item in sublist]
pict_flat = np.array(pict_flat)
# normalize the picture to make sure it has "length" 1 (length defined as Euclidean distance), as all one-hot vectors do
# this ensures that the input to the lstm has the same "net content" across all vectors
pict_norm = pict_flat / np.sqrt(np.sum(pict_flat**2))
# add to the left of the picture 300 zeros to incorporate the length of each full embedding
pict_pad = np.pad(pict_norm,(300,0), 'constant')
# add to the right of each word 64 zeros to incorporate the length of the picture
ordinal_pad = np.pad(ordinal,(0,64), 'constant')
color_pad = np.pad(color,(0,64), 'constant')
shape_pad = np.pad(shape,(0,64), 'constant')
# combine all three words and the picture into one flat list
input_vec = [list(ordinal_pad),list(color_pad),list(shape_pad),list(pict_pad)]
input_vec = [s for sublist in input_vec for s in sublist]
# cut the list up into four equal-sized numpy arrays
input_vec = np.array(input_vec)
input_vec = np.reshape(input_vec,newshape=(4,364))
# check whether the input-output combination is new:
# compare it to all previously generated examples in input_batch and exit loop if the generated trial is new
if runs == 0:
is_trial_new = True
else:
## make sure that the input picture is new/hasn't been created before
newness = np.zeros(runs)
for inputs in range(1,runs):
# element-wise comparison; if all are True, the two input_vecs were identical
if (input_vec == input_batch[inputs-1]).all() == True:
newness[inputs] = 1
break # stop looping through more runs
if sum(newness) == 0.0: # if current input_vec is not identical to any of the previous ones, exit the loop
is_trial_new = True
input_batch.append(input_vec)
output_batch.append(output_vec)
# return input and corresponding output for training and set sets
return np.array(input_batch[0:train_size]), np.array(output_batch[0:train_size]), np.array(input_batch[train_size:]), np.array(output_batch[train_size:])
##########################################################
#### generate generalization data (interpolation &
#### extrapolation using reduced word embeddings as input
##########################################################
def gen_hier_embred(train_size = 2, gen_type = 'int'):
target = []
input_batch = []
output_batch = []
# load dimensionality-reduced word embeddings
myembeddings = pd.read_csv('word2vec/reduced_embeddings.csv', header = 0)
myembeddings = myembeddings['embedding']
# convert the embeddings from list to string of floats
second = np.array(np.matrix(myembeddings[0])).ravel()
third = np.array(np.matrix(myembeddings[1])).ravel()
fourth = np.array(
|
np.matrix(myembeddings[2])
|
numpy.matrix
|
"""Perform a suite of ringtests for a specific combination of star, galaxy, and PSF structural and
spectral parameters while varying the redshift. Use `python ring_vs_z.py --help` to see a list of
available command line options.
This procedure compares the analytic estimate for chromatic biases to that obtained by simulating
images and fitting their ellipticity either by fitting model parameters with least squares, or
using the Hirata-Seljak-Mandelbaum regaussianization PSF correction algorithm.
"""
import os
from argparse import ArgumentParser
import logging
import lmfit
import galsim
import numpy as np
import _mypath
import chroma
def measure_shear_calib(gparam, gen_target_image, fit_tool, measurer, nring=3):
# This will serve as the function that returns an initial guess of the sheared and rotated
# galaxy parameters.
def get_ring_params(gamma, beta):
return fit_tool.get_ring_params(gparam, beta, galsim.Shear(g1=gamma.real, g2=gamma.imag))
# Do ring test for two values of the complex reduced shear `gamma`, solve for m and c.
gamma0 = 0.0 + 0.0j
gamma0_hat = chroma.ringtest(gamma0, nring, gen_target_image, get_ring_params, measurer,
silent=True)
# c is the same as the estimated reduced shear `gamma_hat` when the input reduced shear
# is (0.0, 0.0)
c = gamma0_hat.real, gamma0_hat.imag
gamma1 = 0.01 + 0.02j
gamma1_hat = chroma.ringtest(gamma1, nring, gen_target_image, get_ring_params, measurer,
silent=True)
# solve for m
m0 = (gamma1_hat.real - c[0])/gamma1.real - 1.0
m1 = (gamma1_hat.imag - c[1])/gamma1.imag - 1.0
m = m0, m1
return m, c
def ring_vs_z(args):
""" Measure shear calibration parameters `m` and `c` as a function of redshift for a specific
combination of star, galaxy, and PSF structural and spectral parameters. Run
`python ring_vs_z.py --help` for a list of available command line options.
"""
dirname = os.path.dirname(args.outfile)
if not os.path.isdir(dirname):
os.mkdir(dirname)
logging.basicConfig(format="%(message)s", level=logging.INFO,
filename=args.outfile,
filemode='w')
logger = logging.getLogger("ring_vs_z")
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
# build filter bandpass
bandpass = galsim.Bandpass(args.datadir+args.filter)
# build star SED
star_SED = galsim.SED(args.datadir+args.starspec)
# Thin bandpass and spectra if requested
if args.thin is not None:
star_SED = star_SED.thin(args.thin)
bandpass = bandpass.thin(args.thin)
# Use effective wavelength to set FWHM
PSF_wave = bandpass.effective_wavelength
# scale SEDs
# This probably isn't strictly required, but in general I think it's good to work with numbers
# near one.
star_SED = star_SED.withFlux(1.0, bandpass)
# By default, use args.PSF_FWHM = 0.7. However, override args.PSF_FWHM if
# PSF_r2 is explicitly set.
if args.PSF_r2 is not None:
if args.moffat:
args.PSF_FWHM = args.PSF_r2 / np.sqrt(
2.0 / (8.0*(2.0**(1.0/args.PSF_beta)-1.0)*(args.PSF_beta-2.0)))
elif args.kolmogorov:
# This line is wrong!!! What is the relation b/n FWHM and r^2 for a Kolmogorov
# profile?
args.PSF_FWHM = args.PSF_r2 / np.sqrt(2.0/np.log(256.0))
else: # default is Gaussian
args.PSF_FWHM = args.PSF_r2 / np.sqrt(2.0/np.log(256.0))
# Define the PSF
if args.moffat:
monochromaticPSF = galsim.Moffat(fwhm=args.PSF_FWHM, beta=args.PSF_beta)
elif args.kolmogorov:
monochromaticPSF = galsim.Kolmogorov(lam_over_r0 = args.PSF_FWHM / 0.976)
else:
monochromaticPSF = galsim.Gaussian(fwhm=args.PSF_FWHM)
monochromaticPSF = monochromaticPSF.shear(
g=args.PSF_ellip, beta=args.PSF_phi * galsim.degrees)
if not args.noDCR: #include DCR
PSF = galsim.ChromaticAtmosphere(monochromaticPSF, base_wavelength=PSF_wave,
zenith_angle=args.zenith_angle * galsim.degrees,
parallactic_angle=args.parallactic_angle * galsim.degrees,
alpha=args.alpha)
else: # otherwise just include a powerlaw wavelength dependent FWHM
PSF = galsim.ChromaticObject(monochromaticPSF)
PSF = PSF.dilate(lambda w:(w/PSF_wave)**args.alpha)
# Calculate sqrt(r^2) for PSF here...
# Ignoring corrections due to ellipticity for now.
if args.moffat:
r2_PSF = args.PSF_FWHM * np.sqrt(
2.0 / (8.0*(2.0**(1.0/args.PSF_beta)-1.0)*(args.PSF_beta-2.0)))
elif args.kolmogorov:
# This line is wrong!!! What is the relation b/n FWHM and r^2 for a Kolmogorov profile?
r2_PSF = args.PSF_FWHM * np.sqrt(2.0/np.log(256.0))
else: # default is Gaussian
r2_PSF = args.PSF_FWHM * np.sqrt(2.0/np.log(256.0))
offset = (args.image_x0, args.image_y0)
logger.info('# ')
logger.info('# General settings')
logger.info('# ----------------')
logger.info('# stamp size: {}'.format(args.stamp_size))
logger.info('# pixel scale: {} arcsec/pixel'.format(args.pixel_scale))
logger.info('# ring test angles: {}'.format(args.nring))
logger.info('# ')
logger.info('# Spectra settings')
logger.info('# ----------------')
logger.info('# Data directory: {}'.format(args.datadir))
logger.info('# Filter effective wavelength: {}'.format(PSF_wave))
logger.info('# Filter: {}'.format(args.filter))
logger.info('# Thinning filter by factor: {}'.format(args.thin))
logger.info('# Galaxy SED: {}'.format(args.galspec))
logger.info('# Star SED: {}'.format(args.starspec))
logger.info('# ')
if args.moffat:
logger.info('# Moffat PSF settings')
logger.info('# -------------------')
logger.info('# PSF beta: {}'.format(args.PSF_beta))
else:
logger.info('# Gaussian PSF settings')
logger.info('# ---------------------')
logger.info('# PSF phi: {}'.format(args.PSF_phi))
logger.info('# PSF ellip: {}'.format(args.PSF_ellip))
logger.info('# PSF FWHM: {} arcsec'.format(args.PSF_FWHM))
logger.info('# PSF alpha: {}'.format(args.alpha))
logger.info('# PSF sqrt(r^2): {}'.format(r2_PSF))
if not args.noDCR:
logger.info('# ')
logger.info('# Observation settings')
logger.info('# --------------------')
logger.info('# zenith angle: {} degrees'.format(args.zenith_angle))
logger.info('# ')
logger.info('# Galaxy settings')
logger.info('# ---------------')
logger.info('# Galaxy Sersic index: {}'.format(args.sersic_n))
logger.info('# Galaxy ellipticity: {}'.format(args.gal_ellip))
if args.gal_convFWHM is not None:
logger.info('# Galaxy PSF-convolved FWHM: {:6.3f} arcsec'.format(
args.gal_convFWHM))
elif args.gal_HLR is not None:
logger.info('# Galaxy HLR: {} arcsec'.format(args.gal_HLR))
else:
logger.info('# Galaxy sqrt(r2): {} arcsec'.format(args.gal_r2))
logger.info('# ')
logger.info('# Shear Calibration Results')
logger.info('# -------------------------')
logger.info(('# {:>5s}'+' {:>9s}'*8).format('z', 'anltc m1', 'ring m1', 'anltc m2', 'ring m2',
'anltc c1', 'ring c1', 'anltc c2', 'ring c2'))
zs =
|
np.arange(args.zmin, args.zmax+0.001, args.dz)
|
numpy.arange
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 21 08:56:18 2019
@author: rdamseh
"""
import os
from VascGraph.Tools.CalcTools import prunG, reduceG,\
getMiddleGraph, rescaleG, \
findNodes, getBranches, fixG, getCoreGraph
from VascGraph.Tools.VisTools import visG
import numpy as np
try:
from mayavi import mlab
except: pass
try:
from matplotlib import pyplot as plt
except: pass
class ValidateNetMets:
def __init__(self, Gr, Ge,
rescale=False,
middle=False,
prune=False,
outputfolder='results',
sigma=[10,20,30,40,50,60]):
self.Gr=Gr.copy()
self.Ge=Ge.copy()
if prune:
self.Gr=prunG(Gr.copy())
self.Ge=prunG(Ge.copy())
#middle graphs
if middle:
self.Gr=getMiddleGraph(self.Gr, middle)
self.Ge=getMiddleGraph(self.Ge, middle)
#rescale graphs
if rescale:
self.Gr=rescaleG(self.Gr)
self.Ge=rescaleG(self.Ge)
#find graphs vertices
self.points_real=np.array(self.Gr.GetNodesPos())
self.points_exp=np.array(self.Ge.GetNodesPos())
#find burifications (junction nodes)
self.idNodes_real, self.nodes_real = findNodes(self.Gr)
self.nodes_real=np.array(self.nodes_real)
#
self.idNodes_exp, self.nodes_exp = findNodes(self.Ge)
self.nodes_exp=np.array(self.nodes_exp)
# num of all nodes
self.n_nodes_r=np.shape(self.nodes_real)[0]
self.n_nodes_e=np.shape(self.nodes_exp)[0]
#reduced graphs
self.G_real_reduced=reduceG(self.Gr.copy())
self.G_exp_reduced=reduceG(self.Ge.copy())
# get branches
self.branches1=getBranches(self.Gr)
self.branches2=getBranches(self.Ge)
self.outputfolder=outputfolder
self.sigma=sigma
def vis(self, save=False, name=None, cam=None):
from VascGraph.Tools.VisTools import setCam, createCam
from VascGraph.GraphLab import GraphPlot
def plot(g, color):
gplot=GraphPlot()
gplot.Update(g)
gplot.SetGylphSize(.01)
gplot.SetTubeRadius(2)
gplot.SetTubeColor(color)
gplot.SetTubeRadiusByScale(True)
bgcolor=(1,1,1)
if cam is None:
position = [1194.8393680906522, 1491.5272445674307, -874.4021568391549]
focal_point = [257.15006008258143, 256.92547521800316, 330.6489784843938]
view_angle = 30.0
view_up = [-0.4853531757850406, -0.39346331460859185, -0.7807809646838195]
clipping_range = [940.3721291401878, 3256.3268137240707]
cam=createCam(position=position,
focal_point=focal_point,
view_angle=view_angle,
view_up=view_up,
clipping_range=clipping_range)
# visulize matching
mlab.figure(bgcolor=bgcolor)
plot(self.Gr, color=(.3,.3,.8))
plot(self.Gcore_real, color=(.3,.3,.8))
plot(self.Gcompared_real, color=(.9,.9,.1))
setCam(cam)
if save:
mlab.savefig(name+'_FN.png', size=(1024,1024))
#
mlab.figure(bgcolor=bgcolor)
plot(self.Ge, color=(.3,.3,.8))
plot(self.Gcore_exp, color=(.3,.3,.8))
plot(self.Gcompared_exp, color=(.9,.9,.1))
setCam(cam)
if save:
mlab.savefig(name+'_FP.png', size=(1024,1024))
def matchG(self):
# REAL TO EXP
self.dist1=[]
for idx, i in enumerate(self.nodes_real):
self.dist1.append(np.sum((i-self.nodes_exp)**2, axis=1))
#real nodes with the corresponding exp. ones
self.idx1=np.argmin(self.dist1, axis=1)
self.d1=[i[self.idx1[j]]**.5 for j, i in enumerate(self.dist1)]
self.idNodes_exp_m=np.array(self.idNodes_exp)[self.idx1]
self.nodes_exp_m=self.nodes_exp[self.idx1]
# EXP TO REAL
self.dist2=[]
for idx, i in enumerate(self.nodes_exp):
self.dist2.append(np.sum((i-self.nodes_real)**2, axis=1))
#exp nodes with the corresponding real. ones
self.idx2=np.argmin(self.dist2, axis=1)
self.d2=[i[self.idx2[j]]**.5 for j, i in enumerate(self.dist2)]
self.idNodes_real_m=np.array(self.idNodes_real)[self.idx2]
self.nodes_real_m=self.nodes_real[self.idx2]
def scoresG(self, portion=[.99],
save=False,
foldername=None):
sigma=self.sigma
self.matchG()
if foldername:
pass
else:
foldername=self.outputfolder
def decideThresh(v, portion):
vals,bins=np.histogram(v,bins=1000)
vals=vals.astype(float)/sum(vals)
s=0
thresh=0
for idx, i in enumerate(vals):
s+=i
if s>portion:
thresh=bins[idx]
break
return thresh
# match nodes and get G scores
self.GFNR=[]
self.GFPR=[]
for j in portion:
thresh1=decideThresh(self.d1,j)
thresh2=decideThresh(self.d2,j)
g_FNR_=[]
for i in sigma:
v1=np.array(self.d1)
v1=v1*(v1<thresh1)
v2=1-np.exp(-v1**2/(2*i*i))
v3=np.mean(v2); g_FNR_.append(v3)
self.GFNR.append(g_FNR_)
g_FPR_=[]
for i in sigma:
v1=np.array(self.d2)
v1=v1*(v1<thresh2)
v2=1-np.exp(-v1**2/(2*i*i))
v3=np.mean(v2); g_FPR_.append(v3)
self.GFPR.append(g_FPR_)
# ravel lists
self.GFNR=np.ravel(self.GFNR)
self.GFPR=np.ravel(self.GFPR)
if save:
path=os.getcwd()
dirr=path+'/'+foldername
if not os.path.exists(dirr):
os.mkdir(dirr)
np.savetxt(dirr+'/GFNR.txt', self.GFNR)
np.savetxt(dirr+'/GFPR.txt', self.GFPR)
np.savetxt(dirr+'/stats.txt', [self.n_nodes_r,
self.n_nodes_e,
self.n_branches_r,
self.n_branches_e])
def plotDist(self, save=False, foldername=None):
try:
import seaborn as sns
except:
print('To run this function, \'seaborn\' sould be installed.')
return
sns.set_style('darkgrid')
if foldername:
pass
else:
foldername=self.outputfolder
plt.figure(figsize=(8.3,5.5))
sns.kdeplot(self.d1,
label=r'$\mathbf{J}_{r}$ $\rightarrow$ $\mathbf{J}_{exp}$',
cut=0, marker='s', markevery=0.05, linewidth=2)
sns.kdeplot(self.d2,
label=r'$\mathbf{J}_{e}$ $\rightarrow$ $\mathbf{J}_{real}$',
cut=0, marker='8', markevery=0.05, linewidth=2)
plt.legend(fontsize=22)
plt.ylabel('Probability', fontsize=20); plt.xlabel('$D$', fontsize=20)
plt.xlim(xmin=0 , xmax=80)
plt.xticks(fontsize = 16)
plt.yticks(fontsize = 16)
if save:
path=os.getcwd()
dirr=path+'/'+foldername
if not os.path.exists(dirr):
os.mkdir(dirr)
plt.savefig(dirr+'/dist.eps', format='eps', dpi=1000, transparent=True)
plt.close()
def matchC(self, sigma=10):
############################
# match nodes in both graphs based on distance threshold
############################
# REAL TO EXP
self.matchG()
self.d1C=np.array(self.d1)
self.idx1_pass=np.where(self.d1C<sigma)[0] #to find matched nodes that pass the condition
self.idNodes_real_pass=np.array(self.idNodes_real)[self.idx1_pass]
self.idx1_fail=np.where(self.d1C>sigma)[0] #to find matched nodes that fail the condition
self.idNodes_real_fail=np.array(self.idNodes_real)[self.idx1_fail]
#find mapping1
self.mapping1=[[i,j] for i,j in zip(self.idNodes_real, self.idNodes_exp_m)]
# REAL TO EXP
self.d2C=np.array(self.d2)
self.idx2_pass=np.where(self.d2C<sigma)[0] #to find matched nodes that pass the condition
self.idNodes_exp_pass=
|
np.array(self.idNodes_exp)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 23 19:37:08 2018
test test
@author: Administrator
"""
import logging
import math
import os
import os.path
import sys
import time
import uuid
#import hashlib
import time
import base64
import os
import sys
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import numpy as np
import openpyxl as xl
import pandas as pd
import sympy as sp
import scipy.optimize as op
from mpl_toolkits.mplot3d import proj3d
from openpyxl.styles import Alignment, Border, Font, PatternFill, Side
from PyQt5.QtWidgets import QTableWidgetItem as Qitem
import ui
import Func
import parameters
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QBrush, QColor
import pyomo.environ as pe
from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget,QTabWidget
import Fig
class MainWindow(QTabWidget, ui.Ui_MainWindow):
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.setWindowTitle("Kinematics Calculation of wiper linkage")
self.resize(1000, 1000)
# window_pale = QtGui.QPalette()
# window_pale.setColor(self.backgroundRole(),QColor(0,0,255))
# self.setPalette(window_pale)
# self.setAutoFillBackground(True)
#self.setupUi(self)
self.WindowInput = MainWindow_Input()
self.WindowOpt = MainWindow_Opt()
self.WindowAlpha = MainWindow_alphaNumeric()
self.WindowExtreme = MainWindow_extreme()
self.WindowTolerance = MainWindow_Tolerance()
self.addTab(self.WindowInput,u"input")
self.addTab(self.WindowOpt,u"optimization")
self.addTab(self.WindowAlpha,u"Output-AlphaNumeric")
self.addTab(self.WindowExtreme,u"Output-ExtremeValues")
self.addTab(self.WindowTolerance,u"Tolerance Calculation")
self.WindowInput.LoadButton.clicked.connect(self.WindowInput.LoadClicked) # laod data
self.WindowInput.LoadButton.clicked.connect(self.WindowOpt.OptClicked) # optimization
self.WindowInput.LoadButton.clicked.connect(self.WindowInput.UpdateOptValue) # set items in input sheet
self.WindowInput.LoadButton.clicked.connect(self.WindowOpt.setOptItem) # set items in optimization sheet
self.WindowInput.OutputButton.clicked.connect(self.WindowInput.close)
self.WindowInput.OutputButton.clicked.connect(self.WindowAlpha.show)
self.WindowInput.OutputButton.clicked.connect(self.resetAlpha) # move to output sheet
self.WindowOpt.pushButton.clicked.connect(self.WindowOpt.OptClicked) # opt
self.WindowOpt.pushButton.clicked.connect(self.WindowOpt.setOptItem) # set itmes in optimization sheet
self.WindowAlpha.TableButton.clicked.connect(self.WindowAlpha.alphaTable) # do alphanumeric calculation
#self.WindowAlpha.plotButton.clicked.connect(lambda: self.WindowAlpha.plot(gs.outKPIall)) # do alphanumeric calculation
self.WindowAlpha.plotButton.clicked.connect(self.WindowAlpha.plot) # do alphanumeric calculation
#self.WindowAlpha.plotButton.clicked.connect(figs.show) # do alphanumeric calculation
# self.WindowAlpha.AnimationButton.clicked.connect(self.WindowAlpha.animate) # do alphanumeric calculation
self.WindowAlpha.ExtremeButton.clicked.connect(self.WindowExtreme.ExtremeTable) # do extremetable calculation
self.WindowAlpha.ExtremeButton.clicked.connect(self.WindowAlpha.close)
self.WindowAlpha.ExtremeButton.clicked.connect(self.WindowExtreme.show) # move to extreme tab
self.WindowAlpha.ExtremeButton.clicked.connect(self.reset2)
self.WindowInput.ToleranceButton.clicked.connect(self.WindowInput.close)
self.WindowInput.ToleranceButton.clicked.connect(self.WindowTolerance.show) # move to tolerance tab
self.WindowInput.ToleranceButton.clicked.connect(self.resetTolerance)
self.WindowTolerance.ButtonCal.clicked.connect(self.WindowTolerance.Tolerance) # do tolerance calculation
self.WindowInput.SaveButton.clicked.connect(self.writeMainCordinate) # write cordinate
self.WindowInput.SaveButton.clicked.connect(self.writeDetailCordinate)
self.WindowInput.SaveButton.clicked.connect(self.WindowInput.updateNewsheet)
self.WindowInput.SaveButton.clicked.connect(self.saveExcel) # save excel
self.WindowInput.SaveButton.clicked.connect(self.writejson) # save excel
def writejson(self):
outputjson ='output.json'
Func.writejson(outputjson,outputs,gs)
def saveExcel(self):
gs.wb.save(gs.excel_out)
print('save completed')
def writeMainCordinate(self):
outM1=Func.Output(gs.BC,gs.CD,gs.ED,gs.xm1,gs.A,gs.B,gs.E,gs.F,KBEW=gs.KBEW) #Master -UWL,[alpha,beta,NYS_T,NYS_A,NYK_T,NYK_A,C[0][0],C[0][1],C[0][2],Db[0][0],Db[0][1],Db[0][2]]
outM2=Func.Output(gs.BC,gs.CD,gs.ED,gs.xm2,gs.A,gs.B,gs.E,gs.F,KBEW=gs.KBEW) #Master-OWL [alpha,beta,NYS_T,NYS_A,NYK_T,NYK_A,C[0][0],C[0][1],C[0][2],Db[0][0],Db[0][1],Db[0][2]]
outS3=Func.Output(gs.BC2,gs.CD2,gs.ED2,gs.xm12,gs.A2,gs.B2,gs.E2,gs.F2,KBEW=gs.KBEW2) #slave UWL
outS4=Func.Output(gs.BC2,gs.CD2,gs.ED2,gs.xm22,gs.A2,gs.B2,gs.E2,gs.F2,KBEW=gs.KBEW2) #slave OWL
if gs.MechanicType =='Center':
outM3=Func.Output(gs.BC,gs.CD,gs.ED,gs.xm12,gs.A,gs.B,gs.E,gs.F,KBEW=gs.KBEW) #Slave -UWL,
outM4=Func.Output(gs.BC,gs.CD,gs.ED,gs.xm22,gs.A,gs.B,gs.E,gs.F,KBEW=gs.KBEW) #Slave -OWL,
outS1=Func.Output(gs.BC2,gs.CD2,gs.ED2,gs.xm1,gs.A2,gs.B2,gs.E2,gs.F2,KBEW=gs.KBEW2) #Master -UWL,
outS2=Func.Output(gs.BC2,gs.CD2,gs.ED2,gs.xm2,gs.A2,gs.B2,gs.E2,gs.F2,KBEW=gs.KBEW2) #Master -OWL,
if gs.Master =='driver side':
array = [gs.A,gs.Ap,gs.B,gs.B2 , gs.F, gs.Fp, gs.E, gs.F2,gs.Fp2,gs.E2,outM1[6:12],outS1[6:12],
outM1[6:12],outS1[6:12],outM2[6:12],outS2[6:12],outM3[6:12],outS3[6:12],outM4[6:12],outS4[6:12]]
else :
array = [gs.A,gs.Ap,gs.B2,gs.B , gs.F2, gs.Fp2, gs.E2, gs.F,gs.Fp,gs.E,outS1[6:12],outM1[6:12],
outS1[6:12],outM1[6:12],outS2[6:12],outM2[6:12],outS3[6:12],outM3[6:12],outS4[6:12],outM4[6:12]]
gs.CordinateList =[y for x in array for y in x]
num = len(gs.CordinateList)
for i in range(num):
Func.write(gs.sheetDesign1,i+1,2,'%.4f'%gs.CordinateList[i])
gs.wb1.save(filename=gs.excel_design1)
else:
if gs.Master =='driver side':
array = [gs.A,gs.Ap,gs.F,gs.Fp,gs.F2,gs.Fp2 ,gs.B, gs.E, gs.B2, gs.E2,outM1[6:12],outS3[6:12],
outM1[6:12],outS3[6:12],outM2[6:12],outS4[6:12]]
else:
array = [gs.A,gs.Ap,gs.F2,gs.Fp2,gs.F,gs.Fp ,gs.B2, gs.E2, gs.B, gs.E,outS3[6:12],outM1[6:12],
outS3[6:12],outM1[6:12],outS4[6:12],outM2[6:12]]
gs.CordinateList =[y for x in array for y in x]
num = len(gs.CordinateList)
for i in range(num):
Func.write(gs.sheetDesign1,i+1,2,'%.4f'%gs.CordinateList[i])
gs.wb1.save(filename=gs.excel_design1)
def writeDetailCordinate (self):
alphaList = np.linspace(gs.xm1,gs.xm1+360,12,endpoint =False)
for alpha in alphaList:
if gs.MechanicType =='Center' :
outM= Func.Output(gs.BC,gs.CD,gs.ED,alpha,gs.A,gs.B,gs.E,gs.F,KBEW=gs.KBEW) #
outS = Func.Output(gs.BC2,gs.CD2,gs.ED2,alpha,gs.A2,gs.B2,gs.E2,gs.F2,KBEW=gs.KBEW2) #
else:
outM= Func.Output(gs.BC, gs.CD, gs.ED, alpha, gs.A, gs.B , gs.E, gs.F, KBEW=gs.KBEW) #
alpha2 =outM[1]+gs.Delta2
outS = Func.Output(gs.BC2,gs.CD2, gs.ED2,alpha2,gs.A2,gs.B2, gs.E2, gs.F2, KBEW=gs.KBEW2) #
if gs.Master =='driver side':
gs.CordinateDetailList.extend(outM[6:12])
gs.CordinateDetailList.extend(outS[6:12])
else:
gs.CordinateDetailList.extend(outS[6:12])
gs.CordinateDetailList.extend(outM[6:12])
num = len(gs.CordinateDetailList)
for i in range(num):
Func.write(gs.sheetDesign2,i+1,2,'%.4f'%gs.CordinateDetailList[i])
gs.wb2.save(filename=gs.excel_design2)
def resetTolerance(self):
self.addTab(self.WindowTolerance, u"Tolerance Calculation")
self.addTab(self.WindowInput,u"input")
self.addTab(self.WindowOpt,u"optimization")
self.addTab(self.WindowAlpha,u"AlphaNumeric")
self.addTab(self.WindowExtreme,u"Extreme values")
def resetAlpha(self):
self.addTab(self.WindowAlpha,u"AlphaNumeric")
self.addTab(self.WindowExtreme,u"Extreme values")
self.addTab(self.WindowTolerance, u"Tolerance Calculation")
self.addTab(self.WindowInput,u"input")
self.addTab(self.WindowOpt,u"optimization")
def reset(self):
self.addTab(self.WindowOpt,u"optimization")
self.addTab(self.WindowAlpha,u"AlphaNumeric")
self.addTab(self.WindowExtreme,u"Extreme values")
self.addTab(self.WindowTolerance, u"Tolerance Calculation")
self.addTab(self.WindowInput,u"input")
def reset2(self):
self.addTab(self.WindowExtreme,u"Extreme values")
self.addTab(self.WindowTolerance, u"Tolerance Calculation")
self.addTab(self.WindowInput,u"input")
self.addTab(self.WindowOpt,u"optimization")
self.addTab(self.WindowAlpha,u"AlphaNumeric")
# =============================================================================
class MainWindow_Tolerance(QMainWindow,ui.Ui_Tolerance):
def __init__(self,parent=None):
super(MainWindow_Tolerance,self).__init__(parent)
self.setupUi(self)
def Tolerance(self):
gs.noCrank = self.comboLink.currentText() #Master,Slave
obj = self.comboObj.currentText() #wipping angle,NYS_T
listToleranceStrall = ["BC", "ED", 'Delta','CD',"F_X", "F_Y", "F_Z","Fp_X", "Fp_Y", "Fp_Z",'FE',"A_X", "A_Y", "A_Z","Ap_X", "Ap_Y", "Ap_Z",'Distance',
"BC2", "ED2", 'Delta2','CD2',"F_X2", "F_Y2", "F_Z2", "Fp_X2", "Fp_Y2", "Fp_Z2",'FE2',"A_X2", "A_Y2", "A_Z2","Ap_X2", "Ap_Y2", "Ap_Z2",'Distance2'] #TBD:DElta,Distance
numTolerance = int(len(listToleranceStrall)/2)
if obj =='outputAngle':
t = 0
gs.index = 1 # to write out
elif obj =='tangent force angle':
t = 1
gs.index =4 # to write out
else:
print('please configure objective function')
if gs.noCrank=='Master':
listToleranceStr = listToleranceStrall[0:numTolerance]
Base = [eval('gs.'+st) for st in listToleranceStr]
Target = Func.Tolerance(Base,gs.xm1,gs.xm2)[t]
gs.no = 2
elif gs.noCrank == 'Slave':
listToleranceStr = listToleranceStrall[numTolerance:]
Base = [eval('gs.'+st) for st in listToleranceStr]
Target = Func.Tolerance(Base,gs.xm12,gs.xm22)[t]
gs.no=3
else:
print('please configure noCrank')
errorPList = [ 'gs.errorP.'+t for t in listToleranceStr]
errorPositive = [eval(t) for t in errorPList]
errorNList = [ 'gs.errorN.'+t for t in listToleranceStr]
errorNegative = [eval(t) for t in errorNList]
ToleranceValue = Base.copy() # to be changed
if gs.noCrank=='Master': #master
for i in range(numTolerance):
Func.updateTolerance(ToleranceValue, gs.w2ErrorList, gs.w2List, gs.kpiList, i, t,Target, errorPositive[i],
errorNegative[i], gs.xm1, gs.xm2)
arrayi = ToleranceValue.copy()
print(ToleranceValue)
for i in range(numTolerance):
arrayi[i] = Base[i]
Func.updateTolerance(arrayi, gs.w2ErrorList, gs.w2List, gs.kpiList, i, t,Target, errorPositive[i],
errorNegative[i], gs.xm1, gs.xm2)
elif gs.noCrank == 'Slave':
for i in range(numTolerance):
Func.updateTolerance(ToleranceValue, gs.w2ErrorList, gs.w2List, gs.kpiList, i, t,Target,
errorPositive[i],
errorNegative[i], gs.xm12, gs.xm22)
arrayi = ToleranceValue.copy()
for i in range(numTolerance):
arrayi[i] = Base[i]
Func.updateTolerance(arrayi,gs.w2ErrorList, gs.w2List, gs.kpiList, i, t,Target,
errorPositive[i],
errorNegative[i], gs.xm12, gs.xm22)
gs.Parameter = np.tile(listToleranceStr, 2)
#gs.w2ErrorList=np.array(gs.w2ErrorList).cumsum()
# start wrtie to ui/excel
self.tableTolerance.setRowCount(2*numTolerance)
for i in range(2*numTolerance):
self.tableTolerance.setItem(i, 0, Qitem(str(gs.no)))
self.tableTolerance.setItem(i, 1, Qitem(gs.Parameter[i]))
self.tableTolerance.setItem(i, 2, Qitem('%.4f'%gs.kpiList[i]))
self.tableTolerance.setItem(i, 3, Qitem('%.4f'%gs.w2List[i]))
self.tableTolerance.setItem(i, 4, Qitem('%.4f'%gs.w2ErrorList[i]))
Func.write(gs.sheet6,3,4,gs.no)
Func.write(gs.sheet6,3,6,gs.index)
startColumn = 2
startRow = 10
for i in range(2*numTolerance):
Func.write(gs.sheet6, startRow + i, startColumn, gs.no) # TBD:N2
Func.write(gs.sheet6, startRow + i, startColumn + 1, gs.Parameter[i]) # parameter
Func.write(gs.sheet6, startRow + i, startColumn + 2, '%.4f'%gs.kpiList[i])
Func.write(gs.sheet6, startRow + i, startColumn + 3, '%.4f'%gs.w2List[i])
Func.write(gs.sheet6, startRow + i, startColumn + 4, '%.4f'%gs.w2ErrorList[i])
print('Tolerance calculation completed')
class MainWindow_extreme(QMainWindow,ui.Ui_extreme):
def __init__(self,parent=None):
super(MainWindow_extreme,self).__init__(parent)
self.setupUi(self)
def ExtremeTable(self):
if gs.MechanicType =='Center':
gs.UWL1 = gs.xm2+90
gs.UWL2 = gs.xm2+90
else:
gs.UWL1 = gs.xm2 + 90
gs.UWL2 = gs.xm22 + 90 #TBD
if gs.DriveType =='Standard':
gs.Park = gs.xm1+90
else:
gs.Park = gs.xm1+90 #TBD
self.tableExtreme1.setItem(0, 2, Qitem('%.2f' % gs.Park))
self.tableExtreme1.setItem(0, 3, Qitem('%.2f' % gs.UWL1))
self.tableExtreme1_2.setItem(0, 2, Qitem('%.2f' % gs.Park))
self.tableExtreme1_2.setItem(0, 3, Qitem('%.2f' % gs.UWL2))
self.tableExtreme1.setItem(0,0,Qitem('%.2f'%gs.w2Target))
self.tableExtreme1.setItem(0,1,Qitem('%.2f'%gs.w2cal))
self.tableExtreme1_2.setItem(0,0,Qitem('%.2f'%gs.w3Target))
self.tableExtreme1_2.setItem(0,1,Qitem('%.2f'%gs.w3cal))
for i in range(10):
self.tableExtreme2.setItem(0,i,Qitem('%.2f'%gs.maxArray[i]))
self.tableExtreme2.setItem(1,i,Qitem('%.2f'%gs.minArray[i]))
self.tableExtreme2_2.setItem(0,i,Qitem('%.2f'%gs.maxArray2[i]))
self.tableExtreme2_2.setItem(1,i,Qitem('%.2f'%gs.minArray2[i]))
for i in range(4):
self.tableExtreme3.setItem(0,6+i,Qitem('%.2f'%gs.maxArray[10+i]))
self.tableExtreme3.setItem(1,6+i,Qitem('%.2f'%gs.minArray[10+i]))
self.tableExtreme3_2.setItem(0,6+i,Qitem('%.2f'%gs.maxArray2[10+i]))
self.tableExtreme3_2.setItem(1,6+i,Qitem('%.2f'%gs.minArray2[10+i]))
startRow = [10, 15, 27, 32]
startCol = [3, 9, 3, 9]
Func.write(gs.sheet5, 3, 6, gs.w2Target)
Func.write(gs.sheet5, 4, 6, gs.w2cal)
Func.write(gs.sheet5, 20, 6, gs.w3Target)
Func.write(gs.sheet5, 21, 6, gs.w3cal)
Func.write(gs.sheet5, 3, 11, '%.2f' % gs.Park)
Func.write(gs.sheet5, 4, 11, '%.2f' % gs.UWL1)
Func.write(gs.sheet5, 20, 11, '%.2f' % gs.Park)
Func.write(gs.sheet5, 21, 11, '%.2f' % gs.UWL2)
for i in range(10):
Func.write(gs.sheet5, startRow[0], startCol[0] + i, gs.maxArray[i])
Func.write(gs.sheet5, startRow[0] + 1, startCol[0] + i, gs.minArray[i])
Func.write(gs.sheet5, startRow[2], startCol[2] + i, gs.maxArray2[i])
Func.write(gs.sheet5, startRow[2] + 1, startCol[2] + i, gs.minArray2[i])
for i in range(4):
Func.write(gs.sheet5, startRow[1], startCol[1] + i, gs.maxArray[10 + i])
Func.write(gs.sheet5, startRow[1] + 1, startCol[1] + i, gs.minArray[10 + i])
Func.write(gs.sheet5, startRow[3], startCol[3] + i, gs.maxArray2[10 + i])
Func.write(gs.sheet5, startRow[3] + 1, startCol[3] + i, gs.minArray2[10 + i])
# write alphanumeric
print('extreme values complete')
class MainWindow_alphaNumeric(QMainWindow,ui.Ui_alphaNumeric):
def __init__(self,parent=None):
super(MainWindow_alphaNumeric,self).__init__(parent)
self.setupUi(self)
@staticmethod
def animate():
number=gs.num
Atemp=np.array([gs.A]*number).T
Btemp=np.array([gs.B]*number).T
Etemp=np.array([gs.E]*number).T
Ftemp=np.array([gs.F]*number).T
A2temp=np.array([gs.A2]*number).T
B2temp=np.array([gs.B2]*number).T
E2temp=np.array([gs.E2]*number).T
F2temp=np.array([gs.F2]*number).T
Ctemp= np.array((gs.outCordinateall['Cx'],gs.outCordinateall['Cy'],gs.outCordinateall['Cz']))
Dtemp= np.array((gs.outCordinateall['Dx'],gs.outCordinateall['Dy'],gs.outCordinateall['Dz']))
C2temp= np.array((gs.outCordinateall['Cx2'],gs.outCordinateall['Cy2'],gs.outCordinateall['Cz2']))
D2temp= np.array((gs.outCordinateall['Dx2'],gs.outCordinateall['Dy2'],gs.outCordinateall['Dz2']))
dataAB=np.r_[Atemp,Btemp]
dataAB2=np.r_[A2temp,B2temp]
dataEF=np.r_[Etemp,Ftemp]
dataEF2=np.r_[E2temp,F2temp]
dataCD =np.r_[Ctemp,Dtemp]
dataCD2=np.r_[C2temp,D2temp]
dataBC=np.r_[Btemp,Ctemp]
dataDE=np.r_[Dtemp,Etemp]
dataBC2=np.r_[B2temp,C2temp]
dataDE2=np.r_[D2temp,E2temp]
dataCD2=np.r_[C2temp,D2temp]
data=np.ones((10,6,number))
data[0]=dataAB
data[1]=dataBC
data[2]=dataCD
data[3]=dataDE
data[4]=dataEF
data[5]=dataAB2
data[6]=dataBC2
data[7]=dataCD2
data[8]=dataDE2
data[9]=dataEF2
#TBD
# initial point
def orthogonal_proj(zfront, zback):
a = (zfront+zback)/(zfront-zback)
b = -2*(zfront*zback)/(zfront-zback)
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,a,b],
[0,0, -0.0001,zback]])
proj3d.persp_transformation = orthogonal_proj
fig2 = plt.figure()
ax2 = p3.Axes3D(fig2)
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.set_zlabel('z')
Label=['AB','BC','CD','DE','EF','AB2','BC2','CD2','DE2','EF2']
colors ='bgcmkbgcmk'
linestyles=['-','-','-','-','-','--','--','--','--','--']
cordinateMin = np.max(data[0],axis=1)
cordinateMax = np.max(data[0],axis=1)
#gs.data =data
for item in data:
cordinateMaxNew = np.max(item,axis=1)
cordinateMinNew = np.min(item,axis=1)
for i in range(6):
if cordinateMaxNew[i] > cordinateMax[i]:
cordinateMax[i] = cordinateMaxNew[i]
if cordinateMinNew[i] < cordinateMin[i]:
cordinateMin[i] = cordinateMinNew[i]
xmin = min (cordinateMin[0],cordinateMin[3])
ymin = min (cordinateMin[1],cordinateMin[4])
zmin = min (cordinateMin[2],cordinateMin[5])
xmax = max (cordinateMax[0],cordinateMax[3])
ymax = max (cordinateMax[1],cordinateMax[4])
zmax = max (cordinateMax[2],cordinateMax[5])
xxrange = xmax-xmin
yrange = ymax-ymin
zrange = zmax-zmin
ab=gs.B-gs.A
lines = [ax2.plot([data[i][0,0],data[i][3,0]],[data[i][1,0],data[i][4,0]],[data[i][2,0],data[i][5,0]],label=Label[i],color=colors[i],linestyle=linestyles[i])[0] for i in range(10)]
gs.ab = [ab[0]/xxrange,ab[1]/yrange,ab[2]/zrange] #scale axis equal
az=-90-Func.degree(math.atan(gs.ab[0]/gs.ab[1]))
el=-np.sign(ab[1])*Func.degree(math.atan(gs.ab[2]/math.sqrt(gs.ab[0]**2+gs.ab[1]**2)))
ax2.set_xlim3d(xmin,xmax)
ax2.set_ylim3d(ymin,ymax)
ax2.set_zlim3d(zmin,zmax)
ax2.set_title('initial position')
#ax2.axis('scaled')
ax2.view_init(azim=az, elev=el)
# Attaching 3D axis to the figure
gs.pause =False
def onClick(event):
gs.pause ^= True
def update_lines ( num, dataLines, lines, ax):
if not gs.pause:
for line, data in zip(lines, dataLines):
# NOTE: there is no .set_data() for 3 dim data.
Cx = data[0, num]
Cy = data[1, num]
Cz = data[2, num]
Dx = data[3, num]
Dy = data[4, num]
Dz = data[5, num]
temp = [[Cx, Dx], [Cy, Dy]]
line.set_data(temp)
line.set_3d_properties([Cz, Dz])
return lines
fig = plt.figure()
ax = p3.Axes3D(fig)
# =============================================================================
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_title('kinematics')
#ax.set_xbound(0.9*xmin,1.1*xmax)
#ax.set_ybound(0.9*ymin,1.1*ymax)
#ax.set_zbound(0.9*zmin,1.1*zmax)
# =============================================================================
#ax.axis('equal')
ax.view_init(azim=az,elev=el)
# =============================================================================
txt=['A','B','C','D','E','F','A2','B2','C2','D2','E2','F2']
# =============================================================================
# for i in range(10):
# ax.text(data[i][0][0],data[i][1][0],data[i][2][0],txt[i])
# =============================================================================
# Creating the Animation object
Label=['AB','BC','CD','DE','EF','AB2','BC2','CD2','DE2','EF2']
lines = [ax.plot([data[i][0,0],data[i][3,0]],[data[i][1,0],data[i][4,0]],[data[i][2,0],data[i][5,0]],label=Label[i],color=colors[i],linestyle=linestyles[i])[0] for i in range(10)]
#start of each line
#fig.canvas.mpl_connect('button_press_event', onClick)
gs.line_ani=animation.FuncAnimation(fig, update_lines, number, fargs=(data, lines,ax),interval=int(3600/gs.num), blit=True)
#==============================================================================
# plt.rcParams['animation.ffmpeg_path']='G:\\wai\\ffmpeg\\bin\\ffmpeg.exe'
#==============================================================================
plt.legend()
plt.show()
#return gs.line_ani
def plot(self):
self.figs = Fig.ApplicationWindow(gs)
self.figs.show( )
def getextreme(self):
n = len(gs.outKPIall.columns)
assert (n%2==1)
n1 = int((n+1)/2) # master part
for i in range(1,n1):
listT = Func.getMax(gs.outKPIall,i)
gs.maxArray.append(listT[0])
gs.maxArray.append(listT[1])
listT = Func.getMin(gs.outKPIall,i)
gs.minArray.append(listT[0])
gs.minArray.append(listT[1])
for i in range(n1,n):
listT = Func.getMax(gs.outKPIall,i)
gs.maxArray2.append(listT[0])
gs.maxArray2.append(listT[1])
listT = Func.getMin(gs.outKPIall,i)
gs.minArray2.append(listT[0])
gs.minArray2.append(listT[1])
gs.w2cal=gs.maxArray[1]-gs.minArray[1]
gs.w3cal=gs.maxArray2[1]-gs.minArray2[1]
print('w2=%.2f'%gs.w2cal+'\tw3=%.2f'%gs.w3cal)
return [gs.maxArray,gs.minArray,gs.maxArray2,gs.minArray2]
#outCordinate.to_excel('outputCordinate.xlsx')
#Range=[1.2,50,8,90,8]# 90 TBD
#write to excel
def getoutall(self): # alpha calculation ,return outkpi,outcordinate dataframe
print('-----------------------start map calculation-------------------------')
output=[]
output2=[]
# =============================================================================
alpha0=gs.xm1
gs.step=int(self.Textstep.text())
if gs.DriveType=='Standard':
gs.num =int(abs(360/gs.step))
else:
gs.num=math.ceil(abs(gs.Alfa/gs.step))+1
alphaList = [alpha0 +gs.step * x for x in range(gs.num-1)]
alphaList.append(alpha0+gs.Alfa)
for alpha in alphaList:
if gs.MechanicType =='Center':
outM= Func.Output(gs.BC,gs.CD,gs.ED,alpha,gs.A,gs.B,gs.E,gs.F,KBEW= gs.KBEW) #
outS = Func.Output(gs.BC2,gs.CD2,gs.ED2,alpha,gs.A2,gs.B2,gs.E2,gs.F2,KBEW = gs.KBEW2) #
else:
outM= Func.Output(gs.BC,gs.CD,gs.ED,alpha,gs.A,gs.B,gs.E,gs.F,KBEW = gs.KBEW) #
alpha2 =outM[1]+gs.Delta2
outS = Func.Output(gs.BC2,gs.CD2,gs.ED2,alpha2,gs.A2,gs.B2,gs.E2,gs.F2,KBEW = gs.KBEW2) #
output.append(outM)
output2.append(outS)
beta_sList=[]
beta_ssList=[]
beta_s2List=[]
beta_ss2List=[]
out = pd.DataFrame(output)
out2= pd.DataFrame(output2)
out.columns =['alpha','beta','NYS_T','NYS_A','NYK_T','NYK_A','Cx','Cy','Cz','Dx','Dy','Dz']
out2.columns=['alpha2','beta2','NYS_T2','NYS_A2','NYK_T2','NYK_A2','Cx2','Cy2','Cz2','Dx2','Dy2','Dz2']
series1=pd.Series([gs.zeroAngle]*gs.num)
series2=pd.Series([out['beta'][0]]*gs.num)
series3=pd.Series([out2['alpha2'][0]]*gs.num)
series4=pd.Series([out2['beta2'][0]]*gs.num)
out['alpha']=out['alpha'].sub(series1,axis=0)
out['beta'] = out['beta'].sub(series2,axis=0)
out2['alpha2'] = out2['alpha2'].sub(series3,axis=0)
out2['beta2'] = out2['beta2'].sub(series4,axis=0)
for i in range(gs.num-1):
beta_s =(out['beta'][i+1]-out['beta'][i])/(alphaList[i+1]-alphaList[i])
beta_sList.append(beta_s)
beta_s2 =(out2['beta2'][i+1]-out2['beta2'][i])/(alphaList[i+1]-alphaList[i])
beta_s2List.append(beta_s2)
beta_sList.append(0) # fill with 0
beta_s2List.append(0) # fill with 0
for i in range(gs.num-1):
beta_ss =(beta_sList[i+1]-beta_sList[i])/(alphaList[i+1]-alphaList[i])
beta_ssList.append(beta_ss)
beta_ss2 =(beta_s2List[i+1]-beta_s2List[i])/(alphaList[i+1]-alphaList[i])
beta_ss2List.append(beta_ss2)
beta_ssList.append(0) # fill with 0
beta_ss2List.append(0) # fill with 0
out['beta_s']=beta_sList
out['beta_ss']=beta_ssList
out2['beta_s2']=beta_s2List
out2['beta_ss2']=beta_ss2List
#outKPIall=pd.concat([outKPI,outKPI2],axis=1)
outall = pd.concat([out,out2],axis=1)
outall = outall.astype('float64')
cols =['alpha','beta','beta_s','beta_ss','NYS_T','NYS_A','NYK_T','NYK_A']
cols2=['beta2','beta_s2','beta_ss2','NYS_T2','NYS_A2','NYK_T2','NYK_A2']
colskpi = cols+cols2
colsCordinate=['Cx','Cy','Cz','Dx','Dy','Dz']
colsCordinate2=['Cx2','Cy2','Cz2','Dx2','Dy2','Dz2']
colsCordinateall = colsCordinate + colsCordinate2
# =============================================================================
gs.outKPIall = outall.loc[:,colskpi]
gs.outCordinateall = outall.loc[:,colsCordinateall]
# outKPI['NYS_T']=outKPI['NYS_T'].astype('float64')
# outKPI['NYS_A']=outKPI['NYS_A'].astype('float64')
# outKPI['NYK_T']=outKPI['NYK_T'].astype('float64')
# outKPI['NYK_A']=outKPI['NYK_A'].astype('float64')
# outKPI2['NYS_T2']=outKPI2['NYS_T2'].astype('float64')
# outKPI2['NYS_A2']=outKPI2['NYS_A2'].astype('float64')
# outKPI2['NYK_T2']=outKPI2['NYK_T2'].astype('float64')
# outKPI2['NYK_A2']=outKPI2['NYK_A2'].astype('float64')
return [gs.outKPIall, gs.outCordinateall]
def writealphaNumeric(self):
startRow = 10
startCol = 1
self.tableAlfa.setRowCount(gs.num)
for i in range(gs.num):
for j in range(gs.outKPIall.shape[1]):
self.tableAlfa.setItem(i,j,Qitem('%.2f'%gs.outKPIall.iloc[i,j]))
Func.write(gs.sheet4,startRow+i , startCol+j , gs.outKPIall.iloc[i,j])
Func.write(gs.sheet4,3,6,gs.step)
def alphaTable(self):
[gs.outKPIall, gs.outCordinateall] = self.getoutall()
self.writealphaNumeric()
self.getextreme()
#self.plot(gs.outKPIall)
#animation
class MainWindow_Opt(QMainWindow,ui.Ui_Optimization):
def __init__(self,parent=None):
super(MainWindow_Opt,self).__init__(parent)
self.setupUi(self)
def setOptItem(self):
gs.listOpt1 = [[gs.EDb,gs.CDb,gs.xs1b,gs.xs2b,gs.w2optb,gs.M1NYS_Tb,gs.M2NYS_Tb,gs.M1NYS_Tb+gs.M2NYS_Tb]]
gs.listOpt1.append([gs.ED,gs.CD,gs.xs1,gs.xs2,gs.w2opt,gs.M1NYS_T,gs.M2NYS_T,gs.M1NYS_T+gs.M2NYS_T])
gs.listOpt2 = [[gs.ED2b,gs.CD2b,gs.xs12b,gs.xs22b,gs.w3optb,gs.S1NYS_Tb,gs.S2NYS_Tb,gs.S1NYS_Tb+gs.S2NYS_Tb]]
gs.listOpt2.append([gs.ED2,gs.CD2,gs.xs12,gs.xs22,gs.w3opt,gs.S1NYS_T,gs.S2NYS_T,gs.S1NYS_T+gs.S2NYS_T])
if gs.MechanicType =='Center':
gs.parameterOpt =='ED'
else:
gs.parameterOpt =='BC'
gs.listOpt2[0][0] = gs.BC2b
gs.listOpt2[1][0] = gs.BC2
for i,item in enumerate(gs.listOpt1):
for j,par in enumerate(item):
self.TableOpt.setItem(i,j,Qitem('%.4f'%par))
self.TableOpt.setItem(2,1,Qitem('%.2f'%gs.w2opt))
self.TableOpt.setItem(2,6,Qitem('%.2f'%gs.M1NYS_T))
self.TableOpt.setItem(2,7,Qitem('%.2f'%gs.M2NYS_T))
for i,item in enumerate(gs.listOpt2):
for j,par in enumerate(item):
self.TableOpt2.setItem(i,j,Qitem('%.4f'%par))
self.TableOpt2.setItem(2,1,Qitem('%.2f'%gs.w3opt))
self.TableOpt2.setItem(2,6,Qitem('%.2f'%gs.S1NYS_T))
self.TableOpt2.setItem(2,7,Qitem('%.2f'%gs.S2NYS_T))
self.textEdit_3.setText(gs.parameterOpt)
self.TextW2.setText('%.4f'%gs.w2Target)
self.TextW3.setText('%.4f'%gs.w3Target)
@staticmethod
def rounding2():
gs.CD2=round(gs.CD2b,1)
if gs.MechanicType =='Center':
gs.ED2= round(gs.ED2b,1)
else:
gs.BC2=round(gs.BC2b,1)
gs.xm12 = gs.xm12b
gs.xm22 = gs.xm22b
gs.Ra_Rb2=float(gs.BC2/gs.CD2)
outS3=Func.Output(gs.BC2,gs.CD2,gs.ED2,gs.xm12,gs.A2,gs.B2,gs.E2,gs.F2,KBEW=gs.KBEW2) #slave UWL
outS4=Func.Output(gs.BC2,gs.CD2,gs.ED2,gs.xm22,gs.A2,gs.B2,gs.E2,gs.F2,KBEW=gs.KBEW2) #slave OWL
#[alpha,beta,NYS_T,NYS_A,NYK_T,NYK_A,C[0][0],C[0][1],C[0][2],D[0][0],D[0][1],D[0][2]]
gs.xs12 = outS3[1]
gs.xs22 = outS4[1]
gs.w3opt=Func.angleDiff(gs.xs22,gs.xs12)
gs.S1NYS_T=outS3[2]
gs.S2NYS_T=outS4[2]
@staticmethod
def rounding():
gs.CD=round(gs.CDb,1)
gs.ED=round(gs.EDb,1)
gs.xm1 = gs.xm1b
gs.xm2 = gs.xm2b
if gs.DriveType=='Reversing':
gs.Alfa = round(gs.Alfab,1)
gs.offsetAngle = round(gs.offsetAngleb,1)
[equal1,equal2,NYK_T,NYS_T,Dbx,Dby,Dbz] = Func.OutputSymbol (gs.A,gs.B,gs.E,gs.F)
Equal1inline = repr(equal1[0]).replace('xCrank','gs.ED').replace('xLink','gs.CD').replace('xm','angle[0]').replace('xs','angle[1]').replace('sin','sp.sin').replace('cos','sp.cos').replace('sqrt','sp.sqrt').replace('BC','gs.BC') # link length equation for master link
Equal2inline = repr(equal2).replace('xCrank','gs.ED').replace('xLink','gs.CD').replace('xm','angle[0]').replace('xs','angle[1]').replace('sin','sp.sin').replace('cos','sp.cos').replace('sqrt','sp.sqrt').replace('BC','gs.BC')
EqualInline = [Equal1inline,Equal2inline]
def Finline(angle):
F1 = eval(Equal1inline)
F2 = 1000*(eval(Equal2inline)**2-1)
return([F1,F2])
if gs.KBEW =='+x':
[xim1,xis1]= op.fsolve(Finline,[0,0])
else :
[xim1,xis1] = op.fsolve(Finline,[math.pi,math.pi])
#[xim2,xis2]= op.fsolve(Finline,[math.pi,math.pi])
#gs.xm1 = gs.xm3+gs.offsetAngle
gs.xm1 = gs.xm1b
gs.xm2 = gs.xm1+gs.Alfa
else:
gs.offsetAngle = 0
gs.Alfa = 360
gs.Ra_Rb = float(gs.BC/gs.CD)
outM1=Func.Output(gs.BC,gs.CD,gs.ED,gs.xm1,gs.A,gs.B,gs.E,gs.F,KBEW=gs.KBEW) #Master -UWL,[alpha,beta,NYS_T,NYS_A,NYK_T,NYK_A,C[0][0],C[0][1],C[0][2],Db[0][0],Db[0][1],Db[0][2]]
outM2=Func.Output(gs.BC,gs.CD,gs.ED,gs.xm2,gs.A,gs.B,gs.E,gs.F,KBEW= gs.KBEW) #Master-OWL [alpha,beta,NYS_T,NYS_A,NYK_T,NYK_A,C[0][0],C[0][1],C[0][2],Db[0][0],Db[0][1],Db[0][2]]
gs.xs1 = outM1[1]
gs.xs2 = outM2[1]
# cxs1 = math.cos(Func.rad(gs.xs1))
# cxs2 = math.cos(Func.rad(gs.xs2))
# sxs1 = math.sin(Func.rad(gs.xs1))
# sxs2 = math.sin(Func.rad(gs.xs2))
gs.w2opt = Func.wipAngle(gs.xs2,gs.xs1,gs.KBEW)
gs.M1NYS_T=float(outM1[2])
gs.M2NYS_T=float(outM2[2])
gs.M1NYK_T=float(outM1[4])
gs.M2NYK_T=float(outM2[4])
def OptClicked(self):
ab = gs.B-gs.A
ab2 = gs.B2-gs.A2
BC = gs.BC
Distance = np.linalg.norm(ab)
gs.Distance = np.sign(ab[2])*Distance
Distance2 =
|
np.linalg.norm(ab2)
|
numpy.linalg.norm
|
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
import pandas as pd
MMI = {'z0': np.arange(0, 10),
'z1': np.arange(1, 11),
'rgb0': [(255, 255, 255),
(255, 255, 255),
(191, 204, 255),
(160, 230, 255),
(128, 255, 255),
(122, 255, 147),
(255, 255, 0),
(255, 200, 0),
(255, 145, 0),
(255, 0, 0)],
'rgb1': [(255, 255, 255),
(191, 204, 255),
(160, 230, 255),
(128, 255, 255),
(122, 255, 147),
(255, 255, 0),
(255, 200, 0),
(255, 145, 0),
(255, 0, 0),
(200, 0, 0)],
'nan_color': (0, 0, 0, 0),
'resolution': 0.1}
POP = {'z0': [0, 4, 49, 99, 499, 999, 4999, 9999],
'z1': [4, 49, 99, 499, 999, 4999, 9999, 50000],
'rgb0': [(255, 255, 255),
(191, 191, 191),
(159, 159, 159),
(127, 127, 127),
(95, 95, 95),
(63, 63, 63),
(31, 31, 31),
(0, 0, 0)],
'rgb1': [(255, 255, 255),
(191, 191, 191),
(159, 159, 159),
(127, 127, 127),
(95, 95, 95),
(63, 63, 63),
(31, 31, 31),
(0, 0, 0)],
'nan_color': (0, 0, 0, 0),
'resolution': 1.0}
TOPO = {'z0': [-100, 0, 50, 350, 1000, 1800, 2300, 2600, 4000, 9000, 9100],
'z1': [0, 50, 350, 1000, 1800, 2300, 2600, 4000, 9000, 9200],
'rgb0': [(195, 255, 193),
(110, 135, 80),
(120, 160, 90),
(230, 220, 110),
(210, 170, 80),
(195, 140, 100),
(100, 80, 70),
(60, 60, 60),
(255, 255, 255),
(255, 255, 255),
(255, 128, 0)],
'rgb1': [(110, 135, 80),
(120, 160, 90),
(230, 220, 110),
(210, 170, 80),
(195, 140, 100),
(100, 80, 70),
(60, 60, 60),
(255, 255, 255),
(255, 255, 255),
(255, 128, 0),
(255, 0, 0)],
'nan_color': (128, 128, 128, 0),
'resolution': 1.0}
PALETTES = {'mmi': MMI,
'pop': POP,
'shaketopo': TOPO}
DEFAULT_NCOLORS = 256
class ColorPalette(object):
def __init__(self, name, z0, z1, rgb0, rgb1, resolution=None, nan_color=0, is_log=False):
"""Construct a DataColorMap from input Z values and RGB specs.
Args:
name: Name of colormap.
z0: Sequence of z0 values.
z1: Sequence of z1 values.
rgb0: Sequence of RGB triplets (values between 0-255).
rgb1: Sequence of RGB triplets (values between 0-255).
resolution: Desired Resolution of the data values in data units.
For example, the preset population color map has a resolution
of 1.0, meaning that we want to be able to distinguish between
color values associated with a difference of 1 person. This
sets the number of colors to be:
`max(256,int((max(z1)-min(z0))/resolution))`
nan_color: Either 0 or RGBA quadruplet (A is for Alpha, where 0 is
transparent, and 255 is opaque.)
"""
# validate that lengths are all identical
if len(z0) != len(z1) != len(rgb0) != len(rgb1):
raise Exception('Lengths of input sequences to ColorPalette() '
'must be identical.')
self._is_log = is_log
z0 = np.array(z0)
z1 = np.array(z1)
self._vmin = z0.min()
self._vmax = z1.max()
if isinstance(nan_color, int):
nan_color = [nan_color] * 4
self.nan_color = np.array(nan_color) / 255.0
# Change the z values to be between 0 and 1
adj_z0 = (z0 - self._vmin) / (self._vmax - self._vmin)
# should this be z0 - vmin?
adj_z1 = (z1 - self._vmin) / (self._vmax - self._vmin)
# loop over the sequences, and construct a dictionary of red, green,
# blue tuples
B = -.999 * 255
# this will mark the y0 value in the first row (isn't used)
E = .999 * 255
# this will mark the y1 value in the last row (isn't used)
# if we add dummy rows to our rgb sequences, we can do one simple loop
# through.
rgb0_t = rgb0.copy()
rgb1_t = rgb1.copy()
# append a dummy row to the end of RGB0
rgb0_t.append((E, E, E))
# prepend a dummy row to the beginning of RGB1
rgb1_t.insert(0, (B, B, B))
# Make the column of x values have the same length as the rgb sequences
x = np.append(adj_z0, adj_z1[-1])
cdict = {'red': [],
'green': [],
'blue': []
}
for i in range(0, len(x)):
red0 = rgb1_t[i][0] / 255.0
red1 = rgb0_t[i][0] / 255.0
green0 = rgb1_t[i][1] / 255.0
green1 = rgb0_t[i][1] / 255.0
blue0 = rgb1_t[i][2] / 255.0
blue1 = rgb0_t[i][2] / 255.0
cdict['red'].append((x[i], red0, red1))
cdict['green'].append((x[i], green0, green1))
cdict['blue'].append((x[i], blue0, blue1))
self._cdict = cdict.copy()
# choose the number of colors to store the colormap
# if we choose too low, then there may not be enough colors to
# accurately capture the resolution of our data.
# this isn't perfect
numcolors = DEFAULT_NCOLORS
if resolution is not None:
ncolors_tmp = np.ceil((self._vmax - self._vmin) / resolution)
numcolors = max(DEFAULT_NCOLORS, ncolors_tmp)
self._cmap = LinearSegmentedColormap(name, cdict, N=numcolors)
self._cmap.set_bad(self.nan_color)
@classmethod
def fromPreset(cls, preset):
"""Construct a ColorPalette from one of several preset color maps.
Args:
preset: String to represent one of the preset color maps (see
getPresets()).
Returns:
ColorPalette object.
"""
if preset not in PALETTES:
raise Exception('Preset %s not in list of supported presets.'
% preset)
z0 = PALETTES[preset]['z0'].copy()
z1 = PALETTES[preset]['z1'].copy()
rgb0 = PALETTES[preset]['rgb0'].copy()
rgb1 = PALETTES[preset]['rgb1'].copy()
nan_color = PALETTES[preset]['nan_color']
resolution = None
if 'resolution' in PALETTES[preset]:
resolution = PALETTES[preset]['resolution']
return cls(preset, z0=z0, z1=z1, rgb0=rgb0, rgb1=rgb1,
nan_color=nan_color, resolution=resolution)
@classmethod
def getPresets(cls):
"""Get list of preset color palettes.
Returns:
List of strings which can be used with fromPreset() to create a
ColorPalette.
"""
return list(PALETTES.keys())
@classmethod
def fromFile(cls, filename):
"""Load a ColorPalette from a file.
ColorPalette files should be formatted as below:
--------------------------------------------
#This file is a test file for ColorPalette.
#Lines beginning with pound signs are comments.
#Lines beginning with pound signs followed by a "$" are variable
#definition lines.
#For example, the following line defines a variable called nan_color.
#$nan_color: 0,0,0,0
#$name: test
#$resolution: 0.01
Z0 R0 G0 B0 Z1 R1 G1 B1
0 0 0 0 1 85 85 85
1 85 85 85 2 170 170 170
2 170 170 170 3 255 255 255
--------------------------------------------
These files contain all the information needed to assign colors to any
data value. The data values are in the Z0/Z1 columns, the colors
(0-255) are in the RX/GX/BX columns. In the sample file above, a data
value of 0.5 would be assigned the color (42.5/255,42.5/255,42.5/255).
Args:
filename: String file name pointing to a file formatted as above.
Returns:
ColorPalette object.
"""
nan_color = (0, 0, 0, 0)
name = 'generic'
resolution = None
f = open(filename, 'rt')
for line in f.readlines():
tline = line.strip()
if tline.startswith('#$nan_color'):
parts = tline[2:].split(':')
value = parts[1].split(',')
colortuple = tuple([int(xpi) for xpi in value])
nan_color = colortuple
elif tline.startswith('#$name'):
parts = tline[2:].split(':')
name = parts[1].strip()
elif tline.startswith('#$resolution'):
parts = tline[2:].split(':')
resolution = float(parts[1].strip())
f.close()
df = pd.read_csv(filename, comment='#', sep=r'\s+', header=0)
rgb0 = list(zip(df.R0, df.G0, df.B0))
rgb1 = list(zip(df.R1, df.G1, df.B1))
return cls(name=name, z0=df.Z0, z1=df.Z1, rgb0=rgb0, rgb1=rgb1,
nan_color=nan_color, resolution=resolution)
@classmethod
def fromColorMap(cls, name, z0, z1, cmap, resolution=None, nan_color=0, is_log=False):
"""Construct a ColorPalette from ranges of Z values and a Matplotlib Colormap.
Args:
name (str): Name of Colormap.
z0 (sequence): Sequence of z0 values.
z1 (sequence): Sequence of z1 values.
cmap (Colormap): Matplotlib Colormap object.
resolution (float): Desired Resolution of the data values in data units.
For example, the preset population color map has a resolution
of 1.0, meaning that we want to be able to distinguish between
color values associated with a difference of 1 person. This
sets the number of colors to be:
`max(256,int((max(z1)-min(z0))/resolution))`
nan_color (0 or 4 sequence): Either 0 or RGBA quadruplet (A is for Alpha, where 0 is
transparent, and 255 is opaque.)
"""
# use the whole dynamic range of the colormap
if len(z0) != len(z1):
raise Exception('Lengths of input sequences to '
'ColorPalette.fromColorMap() must be identical.')
zmin = np.min(z0)
zmax = np.max(z1)
rgb0 = []
rgb1 = []
for zbottom, ztop in zip(z0, z1):
znorm0 = (zbottom - zmin) / (zmax - zmin)
rgb_bottom = np.round(np.array(cmap(znorm0)[0:3]) * 255)
rgb0.append(rgb_bottom.tolist())
znorm1 = (ztop - zmin) / (zmax - zmin)
rgb_top = np.round(np.array(cmap(znorm1)[0:3]) * 255)
rgb1.append(rgb_top.tolist())
return cls(name, z0, z1, rgb0, rgb1, resolution=resolution, nan_color=nan_color, is_log=is_log)
@property
def vmin(self):
"""Property accessor for vmin.
Returns:
Minimum data value for ColorPalette.
"""
return self._vmin
@vmin.setter
def vmin(self, value):
"""Property setter for vmin.
Args:
value: Float data value to which vmin should be set.
"""
self._vmin = value
@property
def vmax(self):
"""Property accessor for vmax.
Returns:
Maximum data value for ColorPalette.
"""
return self._vmax
@vmax.setter
def vmax(self, value):
"""Property setter for vmax.
Args:
value: Float data value to which vmax should be set.
"""
self._vmax = value
@property
def cmap(self):
"""
Property accessor for the Matplotlib colormap contained within the
ColorPalette object.
Returns:
Matplotlib colormap object.
"""
return self._cmap
def getDataColor(self, value, color_format='mlab'):
"""Get the RGB color associated with a given data value.
Args:
value: Data value for which color should be retrieved.
color_format: Output format for color specification. Choices are:
- 'mlab' Return a 4 element tuple of (R,G,B,A) with float
values between 0 and 1.
- '255' Return a 4 element tuple of (R,G,B,A) with integer
values betwen 0 and 255.
- 'hex' Return an HTML-style hex color specification (#RRGGBB).
- 'array' Return an RGBA array of the same 1 or 2D dimensions as value.
Returns:
The color value associated with the input data value.
Raises:
AttributeError when color_format is not recognized.
"""
if self._is_log:
value =
|
np.log(value)
|
numpy.log
|
"""After analysis with WiFeS, this suite of routines can extract a star optimally
and calculate its radial velocity.
WARNING - this code is still not properly documented or complete. Any contributions
welcome!
example lines of code...
Executing from the code directory, e.g. with Margaret's output directory:
rv_process_dir('PROCESSED_DATA_DIRECTORY', outdir =/priv/mulga1/mstream/wifes/wifes/tools')
fn = 'T2m3wr-20140617.144009-0167.p11.fits'
flux,sig,wave = read_and_find_star_p11(fn)
"""
from __future__ import print_function
try:
import pyfits
except:
import astropy.io.fits as pyfits
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import scipy.optimize as op
import pdb
import glob
import pickle
#from readcol import readcol
from scipy.interpolate import InterpolatedUnivariateSpline
from astropy.modeling import models, fitting
from os.path import exists
plt.ion()
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return array[idx]
def onclick(event):
global ix, iy
ix, iy = event.xdata, event.ydata
# print 'x = %d, y = %d'%(
# ix, iy)
# assign global variable to access outside of function
global coords
coords.append((ix, iy))
# Disconnect after 2 clicks
if len(coords) == 2:
fig.canvas.mpl_disconnect(cid)
plt.close(1)
return
coords = []
def read_and_find_star_p11(fn, manual_click=False, npix=7, subtract_sky=True,sky_rad=2):
"""Read in a cube and find the star.
Return a postage stamp around the star and the coordinates
within the stamp
NB This didn't really work as the details of flux calibration doesn't easily
enable optimal extraction.
This function should probably be REMOVED.
"""
a = pyfits.open(fn)
#Assume Stellar mode if the flux is zero in any of the first columns
if a[0].data[0,0,1]==0:
flux = a[0].data[:,:,13:]
sig = a[1].data[:,:,13:]
else:
flux = a[0].data
sig = a[1].data
image = np.median(flux,axis=0)
maxpx = np.unravel_index(np.argmax(image[1:-1,1:-1]),image[1:-1,1:-1].shape)
maxpx = (maxpx[0]+1,maxpx[1]+1)
plt.clf()
plt.imshow(image,interpolation='nearest', vmin=0)
plt.plot(maxpx[1],maxpx[0],'rx')
if subtract_sky:
xy = np.meshgrid(range(image.shape[1]),range(image.shape[0]))
dist = np.sqrt((xy[0]-maxpx[1])**2.0 + (xy[1]-maxpx[0])**2.0)
sky = np.where( (xy[0] > 0) & (xy[1] > 0) &
(xy[0] < image.shape[1]-1) & (xy[1] < image.shape[0]-1) &
(dist > sky_rad) & (dist < image.shape[1]))
for i in range(flux.shape[0]):
flux[i,:,:] -= np.median(flux[i,sky[0],sky[1]])
ymin = np.min([np.max([maxpx[0]-3,0]),image.shape[0]-npix])
xmin = np.min([np.max([maxpx[1]-3,0]),image.shape[1]-npix])
flux_stamp = flux[:,ymin:ymin+npix,xmin:xmin+npix]
sig_stamp = sig[:,ymin:ymin+npix,xmin:xmin+npix]
wave = a[0].header['CRVAL3'] + np.arange(flux.shape[0])*a[0].header['CDELT3']
return flux_stamp,sig_stamp,wave
def read_and_find_star_p08(fn, manual_click=False, npix=7, subtract_sky=True,
sky_rad=2, fig_fn='', fig_title=None,
do_median_subtraction=False, arm='',min_slit_i=0,):
"""Read in a cube and find the star.
Return a postage stamp around the star and the wavelength scale
NB This didn't really work as the details of flux calibration doesn't easily
enable optimal extraction.
Note: This may give unexpected results when more than a single star is
within the IFU.
Parameters
----------
fn: string
filename
npix: int
Number of pixels to extract
"""
a = pyfits.open(fn)
Obj_name = a[0].header['OBJNAME']
Obs_date = a[0].header['DATE-OBS'].split('T')[0]
RA = a[0].header['RA']
DEC = a[0].header['DEC']
# Determine the spectrograph mode
# ccd_sec has form [x_min:x_max, y_min:y_max]
y_min = int(a[0].header["CCDSEC"].split(",")[-1].split(":")[0])
# Using Full Frame
if y_min == 1:
flux = np.array([a[i].data for i in range(1,26)])
# Stellar mode (i.e. half frame)
else:
flux = np.array([a[i].data for i in range(1,13)])
wave = a[1].header['CRVAL1'] + np.arange(flux.shape[2])*a[1].header['CDELT1']
image = np.median(flux,axis=2)
if do_median_subtraction:
image = np.log10(image)
image -= np.median(image)
#!!! 1->7 is a HACK - because WiFeS seems to often fail on the edge pixels !!!
plt.clf()
global fig
fig = plt.figure(1)
plt.imshow(image,interpolation='nearest')
# Set title
if fig_title is not None:
plt.title(fig_title)
if manual_click == True:
global coords
# Call click func
global cid
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show(1)
maxpx = (int(round(np.min([coords[0][1], coords[1][1]]))),
int(round(np.min([coords[0][0], coords[1][0]]))))
coords = []
else:
maxpx = np.unravel_index(np.argmax(image[:,10:-10]),image[:,10:-10].shape)
maxpx = (maxpx[0],maxpx[1]+10)
# Plotting image
plt.close("all")
fig, axes = plt.subplots(2,2)
ax_im, ax_y, ax_x, _ = axes.flatten()
_.set_visible(False)
im_cmap = ax_im.imshow(image,interpolation='nearest')
cb = fig.colorbar(im_cmap, ax=ax_im, fraction=0.0155, pad=0.0)
ax_im.plot(maxpx[1],maxpx[0],'wx')
fig.suptitle(str(Obj_name) + '_' + str(Obs_date) + '_(' + str(RA) + ','
+ str(DEC) + ')_' + arm)
# Plotting X and Y distributions
ax_y.plot(np.log10(np.sum(image[:,min_slit_i:], axis=1)),
np.arange(image.shape[0]), "r.-")
ax_y.set_ylim(image.shape[0],0)
#ax_y.set_xscale('log')
ax_x.plot(np.arange(image.shape[1]), np.log10(np.sum(image, axis=0)), ".-")
#ax_x.set_yscale('log')
# Set aspect the same
asp_im = np.abs(float(np.diff(ax_im.get_xlim())[0]) / np.diff(ax_im.get_ylim())[0])
asp_x = float(np.diff(ax_x.get_xlim())[0]) / np.diff(ax_x.get_ylim())[0]
asp_y = float(np.diff(ax_y.get_xlim())[0]) / -np.diff(ax_y.get_ylim())[0]
ax_x.set_aspect(asp_x/asp_im)
ax_y.set_aspect(asp_y/asp_im)
ax_x.set_xlabel('x pixel')
ax_x.set_ylabel(r'$\log_{10}$(x counts)')
ax_im.set_ylabel('y pixel')
ax_y.set_xlabel(r'$\log_{10}$(y counts)')
cb.ax.tick_params(labelsize="xx-small")
ax_im.tick_params(axis='both', which='major', labelsize="xx-small")
ax_x.tick_params(axis='both', which='major', labelsize="xx-small")
ax_y.tick_params(axis='both', which='major', labelsize="xx-small")
# Plot sum along y axis
#ax_y.plot(np.sum(maxpx[0], axis=0), np.arange(maxpx.shape[0]), ".-")
#ax_x.plot(np.arange(maxpx.shape[1]), np.sum(maxpx[0], axis=0), ".-")
# Sky Subtraction
if subtract_sky:
xy = np.meshgrid(range(image.shape[1]),range(image.shape[0]))
dist = np.sqrt((xy[0]-maxpx[1])**2.0 + (xy[1]-maxpx[0])**2.0)
sky = np.where( (xy[0] > 0) & (xy[1] > 0) &
(xy[0] < image.shape[1]-1) & (xy[1] < image.shape[0]-1) &
(dist > sky_rad) & (dist < image.shape[1]))
for i in range(flux.shape[2]):
flux[:,:,i] -= np.median(flux[sky[0],sky[1],i])
ymin = np.min([np.max([maxpx[0]-npix//2,0]),image.shape[0]-npix])
xmin = np.min([np.max([maxpx[1]-npix//2,0]),image.shape[1]-npix])
flux_stamp = flux[ymin:ymin+npix,xmin:xmin+npix,:]
# Offset mins so plotted lines are at edge of pixels
xminp = xmin - 0.5
yminp = ymin - 0.5
# Plot vertical bounds
ax_im.plot([xminp, xminp], [yminp+npix, yminp], c="r")
ax_im.plot([xminp+npix, xminp+npix], [yminp+npix, yminp], c="r")
# Plot horizontal bounds
ax_im.plot([xminp, xminp+npix], [yminp+npix, yminp+npix], c="r")
ax_im.plot([xminp, xminp+npix], [yminp, yminp], c="r")
if len(fig_fn)>0:
#plt.gcf().set_size_inches(5*asp_im, 5/asp_im)
plt.savefig(fig_fn, bbox_inches='tight')
return flux_stamp,wave
def weighted_extract_spectrum(flux_stamp_in, readout_var=None):
"""Optimally extract the spectrum based on a constant weighting
Based on a p08 file axis ordering, but transposes axes
as required.
Readout variance is roughly 11 in the p08 extracted spectra
Parameters
----------
flux_stamp: numpy array
nx x ny x nwave IFU image as a function of wavelength
readout_var: float (optional)
Readout variance in extracted spectrum in DN.
TODO:
1) Look for and remove bad pix/cosmic rays.
2) Remove dodgy constant for readout_var.
"""
if flux_stamp_in.shape[0]>flux_stamp_in.shape[1]:
flux_stamp = np.transpose(flux_stamp_in, (1,2,0))
else:
flux_stamp = flux_stamp_in
#Find the readout variance roughly if it isn't given.
if readout_var is None:
rsdev = 1.4826/np.sqrt(2)*np.nanmedian(np.abs(flux_stamp[0,0,1:]-flux_stamp[0,0,:-1]))
readout_var = rsdev**2
#Find the median flux over all wavelengths, limiting to be >0
flux_med = np.maximum(np.median(flux_stamp,axis=2),0)
pixel_var = flux_med + readout_var
weights = flux_med/pixel_var
n_spaxels = np.prod(weights.shape)
#Form a weighted average, then multiply by n_spaxels to get a sum
spectrum = n_spaxels * np.array(
[np.sum(flux_stamp[:,:,i]*weights)/np.sum(weights) for i in range(flux_stamp.shape[2])])
#Old calculation of sigma. Lets be a little more readable!
sig = np.array([np.sqrt(np.sum((np.maximum(flux_stamp[:,:,i],0)+readout_var)*weights**2)) for i in range(flux_stamp.shape[2])])
#The variance of each pixel is flux_stamp + readout_var, with flux_stamp being an estimate
#of flux per pixel, which should not be less than zero.
#var = [np.sum((np.maximum(flux_stamp[:,:,i],0)+readout_var)*weights**2)/np.sum(weights)**2 for i in range(flux_stamp.shape[2])]
#sig = n_spaxels * np.sqrt(np.array(var))
return spectrum,sig
def conv_ambre_spect(ambre_dir,ambre_conv_dir):
"""Take all the AMBRE spectra from a directory, convolve and re-sample
by a factor of 10, then save to a new directory"""
infns = glob.glob(ambre_dir + '/*fits')
for infn in infns:
data = pyfits.getdata(infn)
data = np.convolve(data,np.ones(10)/10., 'same')
conv_data = data[10*np.arange(90000,dtype='int')].astype('float32')
ix_start = infn.rfind('/') + 1
ix_end = infn.rfind('.')
outfn = infn[ix_start:ix_end] + 'conv.fits'
pyfits.writeto(ambre_conv_dir + '/' + outfn,conv_data, clobber=True)
def conv_phoenix_spect(pho_dir,pho_conv_dir):
"""
Take all phoenix spectra from a directory, convolve to 0.1A,
then save to a new directory
Currently resampling onto a wavelength grid of 0.1A also, from
3000 to 12000A to match AMBRE spectra
also mostly matching filenames
"""
infns = glob.glob(pho_dir + '/*.fits')
for ii,infn in enumerate(infns):
data = pyfits.getdata(infn)
wav = pyfits.getdata('WAVE_PHOENIX-ACES-AGSS-COND-2011.fits')
##go from vacuum to air wavelengths
wav = wav/(1.0+2.735182E-4+131.4182/wav**2+2.76249e8/wav**4)
cdata = np.convolve(data,np.ones(10)/10.0,'same')
intwav = 0.1*np.arange(90000)+3000.0
icdata = np.interp(intwav,wav,cdata)
n1 = infn.split('/')[-1].split('lte')[1].split('-')
n2 = 'g'+n1[1]
n1 = 'p'+n1[0]
outname = pho_conv_dir+'/'+n1 + ':'+n2+':m0.0:t01:z+0.00:a+0.00.PHOENIXconv.fits'
pyfits.writeto(outname,icdata,clobber=True)
print('convolving '+ str(ii+1) +' out of ' + str(len(infns)))
def make_wifes_p08_template(fn, out_dir,rv=0.0):
"""From a p08 file, create a template spectrum for future cross-correlation.
The template is interpolated onto a 0.1 Angstrom grid (to match higher resolution
templates.
Parameters
----------
ddir: string
Data directory for the p08 file
fn: string
p08 fits filename
out_dir: string
Output directory
"""
flux_stamp,wave = read_and_find_star_p08(fn)
heliocentric_correction = pyfits.getheader(fn)['RADVEL']
star = pyfits.getheader(fn)['OBJECT']
spectrum,sig = weighted_extract_spectrum(flux_stamp)
dell_template = 0.1
wave_template=np.arange(90000)*dell_template + 3000
spectrum_interp = np.interp(wave_template,wave*(1 - (rv - heliocentric_correction)/2.998e5),spectrum)
outfn = out_dir + star + ':' + fn.split('/')[-1]
pyfits.writeto(outfn,spectrum_interp,clobber=True)
def rv_fit_mlnlike(shift,modft,data,errors,gaussian_offset):
"""Return minus the logarithm of the likelihood of the model fitting the data
Parameters
----------
shift: float
Shift in pixels
modft: array-like
Real numpy Fourier transform of the model spectrum.
data: array-like
spectral data.
errors: array-like
uncertainties in spectral data
gaussian_offset: float
Offset to Gaussian uncertainty distribution
"""
shifted_mod = np.fft.irfft(modft * np.exp(-2j * np.pi * np.arange(len(modft))/len(data) * shift))
return -np.sum(np.log(np.exp(-(data - shifted_mod)**2/2.0/errors**2) + gaussian_offset))
def rv_shift_binary(shift1, shift2, alpha, modft1, modft2):
"""Shift two templates and add them, to model a binary star"""
data_len = (len(modft1)-1)*2
shifted_mod1 = np.fft.irfft(modft1 * np.exp(-2j * np.pi * np.arange(len(modft1))/data_len * shift1))
shifted_mod2 = np.fft.irfft(modft2 * np.exp(-2j * np.pi * np.arange(len(modft2))/data_len * shift2))
return (shifted_mod1 + alpha*shifted_mod2)/(1.0 + alpha)
def make_fake_binary(spect,wave,sig, template_fns, flux_ratio, rv0, rv1):
"""Make a fake binary in order to test todcor etc!"""
# (wave_log, spect_int, sig_int, template_ints) = \
# interpolate_spectra_onto_log_grid(spect,wave,sig, template_fns)
wave_templates = []
spect_templates = []
for template_fn in template_fns:
dd = np.loadtxt(template_fn)
wave_templates.append(dd[:,0])
spect_templates.append(dd[:,1])
wave_templates = np.array(wave_templates)
spect_templates = np.array(spect_templates)
c_light = 3e5
fake_binary = np.interp(wave_templates[0]*(1 - rv0/c_light),wave_templates[0], spect_templates[0]) + \
np.interp(wave_templates[0]*(1 - rv1/c_light),wave_templates[1], spect_templates[1])*flux_ratio
#fake_binary = np.interp(wave_log*(1 - rv0/c_light),wave_log, template_ints[0]) + \
# np.interp(wave_log*(1 - rv1/c_light),wave_log, template_ints[1])*flux_ratio
#Un-continuum-subtract
#binspect = fake_binary + 1
#return binspect, wave_log, np.ones(len(binspect))*0.01
return fake_binary, wave_templates[0], np.ones(len(wave_templates[0]))*0.01
def interpolate_spectra_onto_log_grid(spect,wave,sig, template_dir,bad_intervals=[],\
smooth_distance=201,convolve_template=True, nwave_log=int(1e4), \
subtract_smoothed=True, interp_k=1):
"""Interpolate both the target and template spectra onto a common wavelength grid"""
#Create our logarithmic wavelength scale with the same min and max wavelengths as the
#target spectrum, and nwave_log wavelengths.
wave_log = np.min(wave)*np.exp( np.log(np.max(wave)/np.min(wave))/\
nwave_log*np.arange(nwave_log))
#Interpolate the target spectrum onto this scale
#spect_int = np.interp(wave_log,wave,spect)
#sig_int = np.interp(wave_log,wave,sig)
spl = InterpolatedUnivariateSpline(wave, spect, k=interp_k)
spect_int = spl(wave_log)
spl = InterpolatedUnivariateSpline(wave, sig, k=interp_k)
sig_int = spl(wave_log)
#Normalise
sig_int /= np.median(spect_int)
spect_int /= np.median(spect_int)
#Remove bad intervals
for interval in bad_intervals:
wlo = np.where(wave_log > interval[0])[0]
if len(wlo)==0:
continue
whi = np.where(wave_log > interval[1])[0]
if len(whi)==0:
whi = [len(wave_log)-1]
whi = whi[0]
wlo = wlo[0]
spect_int[wlo:whi] = spect_int[wlo] + np.arange(whi-wlo,dtype='float')/(whi-wlo)*(spect_int[whi] - spect_int[wlo])
sig_int[wlo:whi]=1
if subtract_smoothed:
#Subtract smoothed spectrum
spect_int -= spect_int[0] + np.arange(len(spect_int))/(len(spect_int)-1.0)*(spect_int[-1]-spect_int[0])
spect_int -= np.convolve(spect_int,np.ones(smooth_distance)/smooth_distance,'same')
#Now we find the interpolated template spectra, template_ints
template_fns = template_dir
template_ints = np.zeros( (len(template_fns),len(wave_log)) )
for i,template_fn in enumerate(template_fns):
try:
#Try loading a reduced WiFeS file first...
if template_fn.find("p08") >= len(template_fn) - 8:
print('Using raw wifes p08 file')
flux,wave_template=read_and_find_star_p08(template_fn)
spect_template,dummy = weighted_extract_spectrum(flux)
dell_template = np.mean(wave_template[1:]-wave_template[:-1])
#Try loading pickled RV standards
elif template_fn.find('pkl') >= len(template_fn)-4:
print('Using pickled Standards')
template_file = open(template_fn, 'r')
wave_template, spect_template = pickle.load(template_file)
dell_template = np.mean(wave_template[1:]-wave_template[:-1])
#Next try a template text file (wavelength and flux in 2 columns)
elif template_fn.find('txt') >= len(template_fn)-4:
print('Using text file input')
dd = np.loadtxt(template_fn)
dell_template = np.mean(dd[1:,0]-dd[:-1,0])
wave_template = dd[:,0]
spect_template = dd[:,1]
#Finally try the Ambre convolved spectral format.
elif template_fn.find('fit') >= len(template_fn)-4:
print('Using ambre models (fits with fixed wavelength grid)')
spect_template = pyfits.getdata(template_fn)
dell_template = 0.1
wave_template=np.arange(90000)*dell_template + 3000
else:
print('Invalid rv standard or model file: ' + template_fn)
raise UserWarning
except:
print('Error loading model spectrum')
raise UserWarning
if convolve_template:
#Amount of subsampling in the template
template_subsamp = int((wave[1]-wave[0])/dell_template)
#Make sure it is an odd number to prevent shifting...
template_subsamp = np.maximum((template_subsamp//2)*2 - 1,1)
spect_template = np.convolve(np.convolve(spect_template,np.ones(template_subsamp)/template_subsamp,'same'),\
np.ones(2*template_subsamp+1)/(2*template_subsamp+1),'same')
#Interpolate onto the log wavelength grid.
#template_int = np.interp(wave_log,wave_template,spect_template)
spl = InterpolatedUnivariateSpline(wave_template,spect_template, k=interp_k)
template_int = spl(wave_log)
#Normalise
template_int /= np.median(template_int)
#Remove bad intervals
for interval in bad_intervals:
wlo = np.where(wave_log > interval[0])[0]
if len(wlo)==0:
continue
whi = np.where(wave_log > interval[1])[0]
if len(whi)==0:
whi = [len(wave_log)-1]
whi = whi[0]
wlo = wlo[0]
template_int[wlo:whi] = template_int[wlo] + np.arange(whi-wlo, dtype='float')/(whi-wlo)*(template_int[whi] - template_int[wlo])
if subtract_smoothed:
#Subtract smoothed spectrum
template_int -= template_int[0] + np.arange(len(template_int))/(len(template_int)-1.0)*(template_int[-1]-template_int[0])
template_int -= np.convolve(template_int,np.ones(smooth_distance)/smooth_distance,'same')
template_ints[i,:] = template_int
return wave_log, spect_int, sig_int, template_ints
def calc_rv_template(spect,wave,sig, template_dir,bad_intervals,smooth_distance=101, \
gaussian_offset=1e-4,nwave_log=1e4,oversamp=1,fig_fn='',convolve_template=True,\
starnumber=0, plotit=False, save_figures=False, save_dir='./', heliocentric_correction=0.):
"""Compute a radial velocity based on an best fitting template spectrum.
Teff is estimated at the same time.
Parameters
----------
spect: array-like
The reduced WiFeS spectrum
wave: array-like
The wavelengths corresponding to the reduced WiFeS spectrum
template_conv_dir: string
The directory containing template spectra convolved to 0.1 Angstrom resolution
bad_intervals:
List of wavelength intervals where e.g. telluric absorption is bad.
smooth_distance: float
Distance to smooth for "continuum" correction
oversamp: float
Oversampling of the input wavelength scale. The slit is assumed 2 pixels wide.
gaussian_offset: float
Offset for the likelihood function from a Gaussian normalised to 1.
Returns
-------
rv: float
Radial velocity in km/s
rv_sig: float
Uncertainty in radial velocity (NB assumes good model fit)
temp: int
Temperature of model spectrum used for cross-correlation.
"""
if isinstance(template_dir, list):
template_fns = template_dir
else:
template_fns = glob.glob(template_dir)
#ADD IN HELIOCENTRIC CORRECTION SOMEWHERE:
#Make the Heliocentric correction...
#rv += h['RADVEL']
#Interpolate the target and template spectra.
(wave_log, spect_int, sig_int, template_ints) = interpolate_spectra_onto_log_grid(spect,wave,sig, template_fns,bad_intervals=bad_intervals, smooth_distance=smooth_distance,convolve_template=convolve_template, nwave_log=nwave_log)
#Do a cross-correlation to the nearest "spectral pixel" for each template
drv = np.log(wave_log[1]/wave_log[0])*2.998e5
rvs = np.zeros(len(template_fns))
peaks = np.zeros(len(template_fns))
for i,template_fn in enumerate(template_fns):
template_int = template_ints[i]
if save_figures == True:
plt.clf()
plt.plot(wave_log, template_int, label='template')
plt.plot(wave_log, spect_int, label='spectrum')
plt.title('Template no.'+str(i+1))
plt.savefig(save_dir + 'spectrum_vs_template_' + template_fns[i].split('/')[-1].split('.fits')[0] + '.png')
plt.clf()
cor = np.correlate(spect_int,template_int,'same')
##here it's a good idea to limit where the peak Xcorrelation can be, only search for a peak within 1000 of rv=0
## that's and RV range of -778 to 778 for the default spacings in the code
peaks[i] = np.max(cor[int(nwave_log/2)-100:int(nwave_log/2)+100])/np.sqrt(np.sum(np.abs(template_int)**2))
rvs[i] = (np.argmax(cor[int(nwave_log/2)-100:int(nwave_log/2)+100])-100)*drv
if starnumber == 0: print('Correlating Template ' + str(i+1)+' out of ' + str(len(template_fns)))
if starnumber >0 : print('Correlating Template ' + str(i+1)+' out of ' + str(len(template_fns)) +' for star '+str(starnumber))
this_rvs = drv*(np.arange(2*smooth_distance)-smooth_distance)
correlation = cor[int(nwave_log/2)-100:int(nwave_log/2)+100]/np.sqrt(np.sum(np.abs(template_int)**2))
best_ind = np.argmax(correlation)
print("best RV for template "+str(i+1)+" is "+str(this_rvs[best_ind+1] + heliocentric_correction))
if save_figures == True:
plt.clf()
plt.plot(this_rvs[1:-1], correlation/np.max(correlation))
plt.title('Correlation_with_template_no.'+str(i+1))
plt.savefig(save_dir + 'Correlation_with_template_no' + str(i+1) + '.png')
plt.clf()
#Find the best cross-correlation.
ix = np.argmax(peaks)
print("BEST TEMPLATE:"+template_fns[ix].split('/')[-1])
#Recompute and plot the best cross-correlation
template_int = template_ints[ix,:]
cor = np.correlate(spect_int,template_int,'same')
plt.clf()
plt.plot(drv*(np.arange(2*smooth_distance)-smooth_distance),
cor[int(nwave_log/2)-smooth_distance:int(nwave_log/2)+smooth_distance])
##store the figure data for later use
outsave = np.array([drv*(np.arange(2*smooth_distance)-smooth_distance),cor[int(nwave_log/2)-smooth_distance:int(nwave_log/2)+smooth_distance]])
saveoutname = fig_fn.split('.png')[0] + "_figdat.pkl"
pickle.dump(outsave,open(saveoutname,"wb"))
plt.xlabel('Velocity (km/s)')
plt.ylabel('X Correlation')
#plt.show()
fn_ix = template_fns[ix].rfind('/')
#Dodgy! Need a better way to find a name for the template.
fn_ix_delta = template_fns[ix][fn_ix:].find(':')
if fn_ix_delta>0:
name = template_fns[ix][fn_ix+1:fn_ix+fn_ix_delta]
name_string=name
#A little messy !!!
if name[0]=='p':
name = name[1:]
name_string = 'T = ' + name + ' K'
name_string = template_fns[ix][fn_ix+1:]
#pdb.set_trace()
#Fit for a precise RV... note that minimize (rather than minimize_scalar) failed more
#often for spectra that were not good matches.
modft = np.fft.rfft(template_int)
#res = op.minimize(rv_fit_mlnlike,rvs[ix]/drv,args=(modft,spect_int,sig_int,gaussian_offset))
#x = res.x[0]
#res = op.minimize_scalar(rv_fit_mlnlike,args=(modft,spect_int,sig_int,gaussian_offset),bounds=((rvs[ix]-1)/drv,(rvs[ix]+1)/drv))
#x = res.x
#fval = res.fun
x,fval,ierr,numfunc = op.fminbound(rv_fit_mlnlike,rvs[ix]/drv-5/drv,rvs[ix]/drv+5/drv,args=(modft,spect_int,sig_int,gaussian_offset),full_output=True)
rv = x*drv
rv += heliocentric_correction
##best model
shifted_mod = np.fft.irfft(modft * np.exp(-2j * np.pi * np.arange(len(modft))/len(spect_int) * x))
#pdb.set_trace()
fplus = rv_fit_mlnlike(x+0.5,modft,spect_int,sig_int,gaussian_offset)
fminus = rv_fit_mlnlike(x-0.5,modft,spect_int,sig_int,gaussian_offset)
hess_inv = 0.5**2/(fplus + fminus - 2*fval)
if (hess_inv < 0) | (fplus < fval) | (fminus < fval):
#If you get here, then there is a problem with the input spectrum or fitting.
#raise UserWarning
print("WARNING: Radial velocity fit did not work - trying again with wider range for: " + fig_fn)
x,fval,ierr,numfunc = op.fminbound(rv_fit_mlnlike,rvs[ix]/drv-10/drv,rvs[ix]/drv+10/drv,args=(modft,spect_int,sig_int,gaussian_offset),full_output=True)
rv = x*drv
#print("RV ="+str(rv)+", fval ="+str(fval))
fplus = rv_fit_mlnlike(x+0.5,modft,spect_int,sig_int,gaussian_offset)
#print("fplus ="+str(fplus))
fminus = rv_fit_mlnlike(x-0.5,modft,spect_int,sig_int,gaussian_offset)
#print("fminus ="+str(fminus))
hess_inv = 0.5**2/(fplus + fminus - 2*fval)
#print("hess_inv ="+str(hess_inv))
#import pdb
#pdb.set_trace()
if (hess_inv < 0) | (fplus < fval) | (fminus < fval):
print("WARNING: Radial velocity fit did not work, giving up with NaN uncertainty")
rv_sig = np.sqrt(hess_inv*nwave_log/len(spect)/oversamp)*drv
plt.title('RV, RV_sigma:' + str(rv) + ',' +str(rv_sig))
plt.savefig(save_dir + 'Best_correlation_temp_' + template_fns[ix].split('/')[-1] + '.png')
plt.title(name_string + ', RV = {0:4.1f}+/-{1:4.1f} km/s'.format(rv,rv_sig))
if len(fig_fn) > 0:
plt.savefig(fig_fn)
plt.clf()
plt.plot(wave_log,spect_int)
plt.plot(wave_log,shifted_mod)
plt.xlim([6400.0,6700.0])
plt.title(name_string + ', RV = {0:4.1f}+/-{1:4.1f} km/s'.format(rv,rv_sig))
if len(fig_fn) > 0:
fig_fn_new = fig_fn.split('_xcor.png')[0] + 'fitplot.png'
plt.savefig(fig_fn_new)
#again save the figure data for use later in making nicer plots with IDL
outsave = np.array([wave_log,spect_int,shifted_mod])
saveoutname = fig_fn.split('_xcor.png')[0] + 'fitplot_figdat.pkl'
pickle.dump(outsave,open(saveoutname,"wb"))
# pdb.set_trace()
return rv,rv_sig,template_fns[ix].split('/')[-1]
def calc_rv_todcor(spect,wave,sig, template_fns,bad_intervals=[],fig_fn='',\
smooth_distance=201,convolve_template=True, alpha=0.3,\
nwave_log=int(1e4),ncor=1000, return_fitted=False,jd=0.0,out_fn='',\
heliocentric_correction=0, plotit=False, window_divisor=20):
"""Compute a radial velocity based on an best fitting template spectrum.
Teff is estimated at the same time.
Parameters
----------
spect: array-like
The reduced WiFeS spectrum
wave: array-like
The wavelengths corresponding to the reduced WiFeS spectrum
template_fns: string
Spectral template for star 1 and star 2 that can be read in by np.loadtxt
bad_intervals:
List of wavelength intervals where e.g. telluric absorption is bad. For todcor,
These can only be smoothed over.
smooth_distance: float
Distance to smooth for "continuum" correction
Returns
-------
rv1: float
Radial velocity of star 1 in km/s
rv_sig1: float
Uncertainty in radial velocity (NB assumes good model fit)
rv2: float
Radial velocity of star 1 in km/s
rv_sig2: float
Uncertainty in radial velocity (NB assumes good model fit)
corpeak: float
Correlation peak
"""
(wave_log, spect_int, sig_int, template_ints) = \
interpolate_spectra_onto_log_grid(spect,wave,sig, template_fns,\
bad_intervals=bad_intervals, smooth_distance=smooth_distance, \
convolve_template=convolve_template, nwave_log=nwave_log)
drv = np.log(wave_log[1]/wave_log[0])*2.998e5
#*** Next (hopefully with two templates only!) we continue and apply the TODCOR algorithm.
window_width = nwave_log//window_divisor
ramp = np.arange(1,window_width+1,dtype=float)/window_width
window = np.ones(nwave_log)
window[:window_width] *= ramp
window[-window_width:] *= ramp[::-1]
template_ints[0] *= window
template_ints[1] *= window
spect_int *= window
norm1 = np.sqrt(np.sum(template_ints[0]**2))
norm2 = np.sqrt(np.sum(template_ints[1]**2))
norm_tgt = np.sqrt(np.sum(spect_int**2))
#pdb.set_trace()
c1 = np.fft.irfft(np.conj(np.fft.rfft(template_ints[0]/norm1))*np.fft.rfft(spect_int/norm_tgt))
c1 = np.roll(c1,ncor//2)[:ncor]
c2 = np.fft.irfft(np.conj(np.fft.rfft(template_ints[1]/norm2))*np.fft.rfft(spect_int/norm_tgt))
c2 = np.roll(c2,ncor//2)[:ncor]
#Unclear which way around this line should be. ix_c12 sign was corrected in order to
#give the right result with simulated data.
c12 = np.fft.irfft(np.fft.rfft(template_ints[1]/norm2)*np.conj(np.fft.rfft(template_ints[0]/norm1)))
c12 = np.roll(c12,ncor//2)[:ncor]
ix = np.arange(ncor).astype(int)
xy = np.meshgrid(ix,ix)
#Correct the flux ratio for the RMS spectral variation. Is this needed???
alpha_norm = alpha * norm2/norm1
ix_c12 = np.minimum(np.maximum(xy[0]-xy[1]+ncor//2,0),ncor-1) #!!!This was the old line !!!
#ix_c12 = np.minimum(np.maximum(xy[1]-xy[0]+ncor//2,0),ncor-1) #XXX New (temporary?) line XXX
todcor = (c1[xy[0]] + alpha_norm*c2[xy[1]])/np.sqrt(1 + 2*alpha_norm*c12[ix_c12] + alpha_norm**2)
print("Max correlation: {0:5.2f}".format(np.max(todcor)))
#print(alpha_norm)
#plt.plot(drv*(np.arange(nwave_log)-nwave_log//2),np.roll(c1,nwave_log//2))
#Figure like TODCOR paper:
#fig = plt.figure()
#ax = fig.gca(projection='3d')
#ax.plot_surface(xy[0],xy[1],todcor)
plt.clf()
plt.imshow(todcor, cmap=cm.gray,interpolation='nearest',extent=[-drv*ncor/2,drv*ncor/2,-drv*ncor/2,drv*ncor/2])
xym = np.unravel_index(np.argmax(todcor), todcor.shape)
old_fit = False
if (old_fit):
hw_fit = 1 #2
if (xym[0]< hw_fit) | (xym[1]< hw_fit) | (xym[0]>= ncor-hw_fit) | (xym[1]>= ncor-hw_fit):
print("Error: TODCOR peak to close to edge!")
raise UserWarning
ix_fit = np.arange(-hw_fit, hw_fit + 1).astype(int)
xy_fit = np.meshgrid(ix_fit,ix_fit)
p_init = models.Gaussian2D(amplitude=np.max(todcor),x_mean=0, y_mean=0,
x_stddev = 50.0/drv, y_stddev = 50.0/drv)
fit_p = fitting.LevMarLSQFitter()
p = fit_p(p_init, xy_fit[0], xy_fit[1], todcor[xym[0]-hw_fit:xym[0]+hw_fit+1,
xym[1]-hw_fit:xym[1]+hw_fit+1])
rv_x = drv*((p.parameters[1] + xym[1]) - ncor//2)
rv_y = drv*((p.parameters[2] + xym[0]) - ncor//2)
else:
pix = todcor[xym[0]-1:xym[0]+2, xym[1]]
xym_frac0 = (pix[2] - pix[0])/(2*pix[1] - pix[0] - pix[2])/2
pix = todcor[xym[0], xym[1]-1:xym[1]+2]
xym_frac1 = (pix[2] - pix[0])/(2*pix[1] - pix[0] - pix[2])/2
rv_x = drv*((xym_frac1 + xym[1]) - ncor//2)
rv_y = drv*((xym_frac0 + xym[0]) - ncor//2)
model_spect = rv_shift_binary(rv_x/drv, rv_y/drv, alpha, np.fft.rfft(template_ints[0]), np.fft.rfft(template_ints[1]))
if plotit:
(wave_log, spect_int_norm, sig_int, template_int_norm) = \
interpolate_spectra_onto_log_grid(spect,wave,sig, template_fns,\
bad_intervals=bad_intervals, smooth_distance=smooth_distance, \
convolve_template=convolve_template, nwave_log=nwave_log, \
subtract_smoothed=False)
model_spect_norm = rv_shift_binary(rv_x/drv, rv_y/drv, alpha, \
np.fft.rfft(template_int_norm[0]), np.fft.rfft(template_int_norm[1]))
model_spect_prim = rv_shift_binary(rv_x/drv, rv_y/drv, 0, \
np.fft.rfft(template_int_norm[0]), np.fft.rfft(template_int_norm[1]))
model_spect_sec = rv_shift_binary(rv_x/drv, rv_y/drv, 1e6, \
|
np.fft.rfft(template_int_norm[0])
|
numpy.fft.rfft
|
# -*- coding: utf-8 -*-
"""
12-Class SSVEP EEG Dataset - Classification Using Convolutional Neural Network
User-Independent Training using Complex Spectrum Features (Leave one out)
Following implementation is an asynchronous SSVEP BCI
using Convolutional Neural Network classification for 1 second data length
"""
import numpy as np
from numpy.core.defchararray import array
import numpy.matlib
import scipy.io as sio
# from sklearn.model_selection import KFold
from sklearn.model_selection import LeaveOneOut
import numpy as np
from keras.utils.np_utils import to_categorical
from keras import optimizers
from keras.losses import categorical_crossentropy
import ssvep_utils as su
CNN_PARAMS = {
'batch_size': 256,
'epochs': 50,
'droprate': 0.25,
'learning_rate': 0.001,
'lr_decay': 0.0,
'l2_lambda': 0.005,
'momentum': 0.9,
'kernel_f': 10,
'n_ch': 8,
'num_classes': 12}
FFT_PARAMS = {
'resolution': 0.2930,
'start_frequency': 3.0,
'end_frequency': 35.0,
'sampling_rate': 256
}
all_acc = np.zeros((10, 1))
sub_num = 10
####################
# 数据集预处理
train_data_all = np.zeros((sub_num, 720, 8, 220, 1))
labels_all = np.zeros((sub_num, 720, 12))
for subject in range(0, sub_num):
dataset = sio.loadmat(f'data/s{subject+1}.mat')
eeg = np.array(dataset['eeg'], dtype='float32')
CNN_PARAMS['num_classes'] = eeg.shape[0]
CNN_PARAMS['n_ch'] = eeg.shape[1]
total_trial_len = eeg.shape[2]
num_trials = eeg.shape[3]
sample_rate = 256
filtered_data = su.get_filtered_eeg(eeg, 6, 80, 4, sample_rate)
eeg = []
window_len = 1
shift_len = 1
segmented_data = su.get_segmented_epochs(filtered_data, window_len, shift_len, sample_rate)
filtered_data = []
features_data = su.complex_spectrum_features(segmented_data, FFT_PARAMS)
segmented_data = []
#Combining the features into a matrix of dim [features X channels X classes X trials*segments]
features_data = np.reshape(features_data, (features_data.shape[0], features_data.shape[1],
features_data.shape[2], features_data.shape[3]*features_data.shape[4]))
train_data = features_data[:, :, 0, :].T
#Reshaping the data into dim [classes*trials*segments X channels X features]
for target in range(1, features_data.shape[2]):
train_data = np.vstack([train_data, np.squeeze(features_data[:, :, target, :]).T])
#Finally reshaping the data into dim [classes*trials*segments X channels X features X 1]
train_data = np.reshape(train_data, (train_data.shape[0], train_data.shape[1], train_data.shape[2], 1))
train_data_all[subject, :, :, :, :] = train_data
total_epochs_per_class = features_data.shape[3]
features_data = []
class_labels = np.arange(CNN_PARAMS['num_classes'])
labels = (np.matlib.repmat(class_labels, total_epochs_per_class, 1).T).ravel()
labels = to_categorical(labels)
labels_all[subject, :, :] = labels
########################
# Training and testing
########################
#
# num_folds = 10
# kf = KFold(n_splits=num_folds, shuffle=True)
# kf.get_n_splits(train_data)
# cv_acc = np.zeros((num_folds, 1))
# fold = -1
num_out = 10
loo = LeaveOneOut()
loo.get_n_splits(train_data)
cv_acc =
|
np.zeros((num_out, 1))
|
numpy.zeros
|
#!/usr/bin/env python3
import sys
import numpy as np
import scipy.integrate as spint
import warnings
import os.path
from directdm.run import adm
from directdm.run import rge
from directdm.num.num_input import Num_input
from directdm.match.dim4_gauge_contribution import Higgspenguin
from directdm.num.single_nucleon_form_factors import *
#----------------------------------------------#
# convert dictionaries to lists and vice versa #
#----------------------------------------------#
def dict_to_list(dictionary, order_list):
""" Create a list from dictionary, according to ordering in order_list """
#assert sorted(order_list) == sorted(dictionary.keys())
wc_list = []
for wc_name in order_list:
wc_list.append(dictionary[wc_name])
return wc_list
def list_to_dict(wc_list, order_list):
""" Create a dictionary from a list wc_list, using keys in order_list """
#assert len(order_list) == len(wc_list)
wc_dict = {}
for wc_ind in range(len(order_list)):
wc_dict[order_list[wc_ind]] = wc_list[wc_ind]
return wc_dict
#---------------------------------------------------#
# Classes for Wilson coefficients at various scales #
#---------------------------------------------------#
class WC_3flavor(object):
def __init__(self, coeff_dict, DM_type, input_dict):
""" Class for Wilson coefficients in 3 flavor QCD x QED plus DM.
The first argument should be a dictionary for the initial conditions
of the 2 + 24 + 4 + 36 + 4 + 48 + 6 + 1 + 12 = 137
dimension-five to dimension-eight three-flavor-QCD Wilson coefficients of the form
{'C51' : value, 'C52' : value, ...}.
An arbitrary number of them can be given; the default values are zero.
The second argument is the DM type; it can take the following values:
"D" (Dirac fermion)
"M" (Majorana fermion)
"C" (Complex scalar)
"R" (Real scalar)
The possible names are (with an hopefully obvious notation):
Dirac fermion: 'C51', 'C52', 'C61u', 'C61d', 'C61s', 'C61e', 'C61mu', 'C61tau',
'C62u', 'C62d', 'C62s', 'C62e', 'C62mu', 'C62tau',
'C63u', 'C63d', 'C63s', 'C63e', 'C63mu', 'C63tau',
'C64u', 'C64d', 'C64s', 'C64e', 'C64mu', 'C64tau',
'C71', 'C72', 'C73', 'C74',
'C75u', 'C75d', 'C75s', 'C75e', 'C75mu', 'C75tau',
'C76u', 'C76d', 'C76s', 'C76e', 'C76mu', 'C76tau',
'C77u', 'C77d', 'C77s', 'C77e', 'C77mu', 'C77tau',
'C78u', 'C78d', 'C78s', 'C78e', 'C78mu', 'C78tau',
'C79u', 'C79d', 'C79s', 'C79e', 'C79mu', 'C79tau',
'C710u', 'C710d', 'C710s', 'C710e', 'C710mu', 'C710tau',
'C711', 'C712', 'C713', 'C714',
'C715u', 'C715d', 'C715s', 'C715e', 'C715mu', 'C715tau',
'C716u', 'C716d', 'C716s', 'C716e', 'C716mu', 'C716tau',
'C717u', 'C717d', 'C717s', 'C717e', 'C717mu', 'C717tau',
'C718u', 'C718d', 'C718s', 'C718e', 'C718mu', 'C718tau',
'C719u', 'C719d', 'C719s', 'C719e', 'C719mu', 'C719tau',
'C720u', 'C720d', 'C720s', 'C720e', 'C720mu', 'C720tau',
'C721u', 'C721d', 'C721s', 'C721e', 'C721mu', 'C721tau',
'C722u', 'C722d', 'C722s', 'C722e', 'C722mu', 'C722tau',
'C723u', 'C723d', 'C723s', 'C723e', 'C723mu', 'C723tau',
'C725',
'C81u', 'C81d', 'C81s', 'C82u', 'C82d', 'C82s'
'C83u', 'C83d', 'C83s', 'C84u', 'C84d', 'C84s'
Majorana fermion: 'C62u', 'C62d', 'C62s', 'C62e', 'C62mu', 'C62tau',
'C64u', 'C64d', 'C64s', 'C64e', 'C64mu', 'C64tau',
'C71', 'C72', 'C73', 'C74',
'C75u', 'C75d', 'C75s', 'C75e', 'C75mu', 'C75tau',
'C76u', 'C76d', 'C76s', 'C76e', 'C76mu', 'C76tau',
'C77u', 'C77d', 'C77s', 'C77e', 'C77mu', 'C77tau',
'C78u', 'C78d', 'C78s', 'C78e', 'C78mu', 'C78tau',
'C711', 'C712', 'C713', 'C714',
'C715u', 'C715d', 'C715s', 'C715e', 'C715mu', 'C715tau',
'C716u', 'C716d', 'C716s', 'C716e', 'C716mu', 'C716tau',
'C717u', 'C717d', 'C717s', 'C717e', 'C717mu', 'C717tau',
'C718u', 'C718d', 'C718s', 'C718e', 'C718mu', 'C718tau',
'C82u', 'C82d', 'C82s', 'C84u', 'C84d', 'C84s'
Complex Scalar: 'C61u', 'C61d', 'C61s', 'C61e', 'C61mu', 'C61tau',
'C62u', 'C62d', 'C62s', 'C62e', 'C62mu', 'C62tau',
'C63u', 'C63d', 'C63s', 'C63e', 'C63mu', 'C63tau',
'C64u', 'C64d', 'C64s', 'C64e', 'C64mu', 'C64tau',
'C65', 'C66', 'C67', 'C68'
'C81u', 'C81d', 'C81s', 'C82u', 'C82d', 'C82s',
'C69u', 'C69d', 'C69s', 'C69e', 'C69mu', 'C69tau',
'C610'
Real Scalar: 'C63u', 'C63d', 'C63s', 'C63e', 'C63mu', 'C63tau',
'C64u', 'C64d', 'C64s', 'C64e', 'C64mu', 'C64tau',
'C65', 'C66', 'C67', 'C68',
'C69u', 'C69d', 'C69s', 'C69e', 'C69mu', 'C69tau',
'C610'
(the notation corresponds to the numbering in 1707.06998, 1801.04240).
The Wilson coefficients should be specified in the MS-bar scheme at 2 GeV.
For completeness, the default initial conditions at MZ for the corresponding
leptonic operator Wilson coefficients are defined as the SM values
(note that these operators have vanishing QCD anomalous dimension):
'D63eu', 'D63muu', 'D63tauu', 'D63ed', 'D63mud', 'D63taud', 'D63es', 'D63mus', 'D63taus',
'D62ue', 'D62umu', 'D62utau', 'D62de', 'D62dmu', 'D62dtau', 'D62se', 'D62smu', 'D62stau'
The third argument is a dictionary with all input parameters.
The class has three methods:
run
---
Run the Wilson coefficients from mu = 2 GeV to mu_low [GeV; default 2 GeV], with 3 active quark flavors
cNR
---
Calculate the cNR coefficients as defined in 1308.6288
The class has two mandatory arguments: The DM mass in GeV and the momentum transfer in GeV
The effects of double insertion [arxiv:1801.04240] are included also for leptons;
for couplings to electrons and muons, there are other contributions that are neglected.
If the relevant initial conditions are set to non-zero values, a user warning is issued
upon creation of the class instance.
write_mma
---------
Write an output file that can be loaded into mathematica,
to be used in the DMFormFactor package [1308.6288].
"""
self.DM_type = DM_type
self.sm_lepton_name_list = ['D63eu', 'D63muu', 'D63tauu', 'D63ed', 'D63mud',
'D63taud', 'D63es', 'D63mus', 'D63taus',
'D62ue', 'D62umu', 'D62utau', 'D62de', 'D62dmu',
'D62dtau', 'D62se', 'D62smu', 'D62stau']
if self.DM_type == "D":
self.wc_name_list = ['C51', 'C52', 'C61u', 'C61d', 'C61s', 'C61e', 'C61mu',
'C61tau', 'C62u', 'C62d', 'C62s', 'C62e', 'C62mu', 'C62tau',
'C63u', 'C63d', 'C63s', 'C63e', 'C63mu', 'C63tau', 'C64u',
'C64d', 'C64s', 'C64e', 'C64mu', 'C64tau',
'C71', 'C72', 'C73', 'C74',
'C75u', 'C75d', 'C75s', 'C75e', 'C75mu', 'C75tau',
'C76u', 'C76d', 'C76s', 'C76e', 'C76mu', 'C76tau',
'C77u', 'C77d', 'C77s', 'C77e', 'C77mu', 'C77tau',
'C78u', 'C78d', 'C78s', 'C78e', 'C78mu', 'C78tau',
'C79u', 'C79d', 'C79s', 'C79e', 'C79mu', 'C79tau',
'C710u', 'C710d', 'C710s', 'C710e', 'C710mu', 'C710tau',
'C711', 'C712', 'C713', 'C714',
'C715u', 'C715d', 'C715s', 'C715e', 'C715mu', 'C715tau',
'C716u', 'C716d', 'C716s', 'C716e', 'C716mu', 'C716tau',
'C717u', 'C717d', 'C717s', 'C717e', 'C717mu', 'C717tau',
'C718u', 'C718d', 'C718s', 'C718e', 'C718mu', 'C718tau',
'C719u', 'C719d', 'C719s', 'C719e', 'C719mu', 'C719tau',
'C720u', 'C720d', 'C720s', 'C720e', 'C720mu', 'C720tau',
'C721u', 'C721d', 'C721s', 'C721e', 'C721mu', 'C721tau',
'C722u', 'C722d', 'C722s', 'C722e', 'C722mu', 'C722tau',
'C723u', 'C723d', 'C723s', 'C723e', 'C723mu', 'C723tau',
'C725']
self.wc8_name_list = ['C81u', 'C81d', 'C81s', 'C82u', 'C82d', 'C82s',
'C83u', 'C83d', 'C83s', 'C84u', 'C84d', 'C84s']
if self.DM_type == "M":
self.wc_name_list = ['C62u', 'C62d', 'C62s', 'C62e', 'C62mu', 'C62tau',
'C64u', 'C64d', 'C64s', 'C64e', 'C64mu', 'C64tau',
'C71', 'C72', 'C73', 'C74',
'C75u', 'C75d', 'C75s', 'C75e', 'C75mu', 'C75tau',
'C76u', 'C76d', 'C76s', 'C76e', 'C76mu', 'C76tau',
'C77u', 'C77d', 'C77s', 'C77e', 'C77mu', 'C77tau',
'C78u', 'C78d', 'C78s', 'C78e', 'C78mu', 'C78tau',
'C711', 'C712', 'C713', 'C714',
'C715u', 'C715d', 'C715s', 'C715e', 'C715mu', 'C715tau',
'C716u', 'C716d', 'C716s', 'C716e', 'C716mu', 'C716tau',
'C717u', 'C717d', 'C717s', 'C717e', 'C717mu', 'C717tau',
'C718u', 'C718d', 'C718s', 'C718e', 'C718mu', 'C718tau',
'C723u', 'C723d', 'C723s', 'C723e', 'C723mu', 'C723tau',
'C725']
self.wc8_name_list = ['C82u', 'C82d', 'C82s', 'C84u', 'C84d', 'C84s']
# The list of indices to be deleted from the QCD/QED ADM because of less operators
del_ind_list = np.r_[np.s_[0:8], np.s_[14:20], np.s_[54:66], np.s_[94:118]]
# The list of indices to be deleted from the dim.8 ADM because of less operators
del_ind_list_dim_8 = np.r_[np.s_[0:3], np.s_[6:9]]
if self.DM_type == "C":
self.wc_name_list = ['C61u', 'C61d', 'C61s', 'C61e', 'C61mu', 'C61tau',
'C62u', 'C62d', 'C62s', 'C62e', 'C62mu', 'C62tau',
'C65', 'C66',
'C63u', 'C63d', 'C63s', 'C63e', 'C63mu', 'C63tau',
'C64u', 'C64d', 'C64s', 'C64e', 'C64mu', 'C64tau',
'C67', 'C68',
'C69u', 'C69d', 'C69s', 'C69e', 'C69mu', 'C69tau',
'C610']
self.wc8_name_list = ['C81u', 'C81d', 'C81s', 'C82u', 'C82d', 'C82s']
# The list of indices to be deleted from the QCD/QED ADM because of less operators
del_ind_list = np.r_[np.s_[0:2], np.s_[8:14], np.s_[20:26], np.s_[27:28], np.s_[29:30],\
np.s_[36:42], np.s_[48:66], np.s_[67:68], np.s_[69:70], np.s_[70:118]]
# The list of indices to be deleted from the dim.8 ADM because of less operators
del_ind_list_dim_8 = np.r_[np.s_[0:3], np.s_[6:9]]
if self.DM_type == "R":
self.wc_name_list = ['C65', 'C66',
'C63u', 'C63d', 'C63s', 'C63e', 'C63mu', 'C63tau',
'C64u', 'C64d', 'C64s', 'C64e', 'C64mu', 'C64tau',
'C67', 'C68',
'C69u', 'C69d', 'C69s', 'C69e', 'C69mu', 'C69tau',
'C610']
self.wc8_name_list = []
# The list of indices to be deleted from the QCD/QED ADM because of less operators
del_ind_list = np.r_[np.s_[0:26], np.s_[27:28], np.s_[29:30], np.s_[36:42],\
np.s_[48:66], np.s_[67:68], np.s_[69:70], np.s_[70:118]]
self.coeff_dict = {}
# Issue a user warning if a key is not defined:
for wc_name in coeff_dict.keys():
if wc_name in self.wc_name_list:
pass
elif wc_name in self.wc8_name_list:
pass
else:
warnings.warn('The key ' + wc_name + ' is not a valid key. Typo?')
# Create the dictionary.
for wc_name in self.wc_name_list:
if wc_name in coeff_dict.keys():
self.coeff_dict[wc_name] = coeff_dict[wc_name]
else:
self.coeff_dict[wc_name] = 0.
for wc_name in self.wc8_name_list:
if wc_name in coeff_dict.keys():
self.coeff_dict[wc_name] = coeff_dict[wc_name]
else:
self.coeff_dict[wc_name] = 0.
# The dictionary of input parameters
self.ip = input_dict
# The default values for the SM lepton operators:
# Input for lepton contribution
sw = np.sqrt(self.ip['sw2_MSbar'])
cw = np.sqrt(1-sw**2)
vd = (-1/2 - 2*sw**2*(-1/3))/(2*sw*cw)
vu = (1/2 - 2*sw**2*(2/3))/(2*sw*cw)
ad = -(-1/2)/(2*sw*cw)
au = -(1/2)/(2*sw*cw)
vl = (-1/2 - 2*sw**2*(-1))/(2*sw*cw)
al = -(-1/2)/(2*sw*cw)
self.coeff_dict['D62ue'] = au*al * 4*sw**2*cw**2
self.coeff_dict['D62umu'] = au*al * 4*sw**2*cw**2
self.coeff_dict['D62utau'] = au*al * 4*sw**2*cw**2
self.coeff_dict['D62de'] = ad*al * 4*sw**2*cw**2
self.coeff_dict['D62dmu'] = ad*al * 4*sw**2*cw**2
self.coeff_dict['D62dtau'] = ad*al * 4*sw**2*cw**2
self.coeff_dict['D62se'] = ad*al * 4*sw**2*cw**2
self.coeff_dict['D62smu'] = ad*al * 4*sw**2*cw**2
self.coeff_dict['D62stau'] = ad*al * 4*sw**2*cw**2
self.coeff_dict['D63eu'] = al*vu * 4*sw**2*cw**2
self.coeff_dict['D63muu'] = al*vu * 4*sw**2*cw**2
self.coeff_dict['D63tauu'] = al*vu * 4*sw**2*cw**2
self.coeff_dict['D63ed'] = al*vd * 4*sw**2*cw**2
self.coeff_dict['D63mud'] = al*vd * 4*sw**2*cw**2
self.coeff_dict['D63taud'] = al*vd * 4*sw**2*cw**2
self.coeff_dict['D63es'] = al*vd * 4*sw**2*cw**2
self.coeff_dict['D63mus'] = al*vd * 4*sw**2*cw**2
self.coeff_dict['D63taus'] = al*vd * 4*sw**2*cw**2
for wc_name in self.sm_lepton_name_list:
if wc_name in coeff_dict.keys():
self.coeff_dict[wc_name] = coeff_dict[wc_name]
else:
pass
# Issue a user warning if certain electron / muon Wilson coefficients are non-zero:
for wc_name in self.coeff_dict.keys():
if DM_type == "D":
for wc_name in ['C63e', 'C63mu', 'C64e', 'C64mu']:
if self.coeff_dict[wc_name] != 0.:
warnings.warn('The RG result for ' + wc_name + ' is incomplete, expect large uncertainties!')
else:
pass
elif DM_type == "M":
for wc_name in ['C64e', 'C64mu']:
if self.coeff_dict[wc_name] != 0.:
warnings.warn('The RG result for ' + wc_name + ' is incomplete, expect large uncertainties!')
else:
pass
elif DM_type == "C":
for wc_name in ['C62e', 'C62mu']:
if self.coeff_dict[wc_name] != 0.:
warnings.warn('The RG result for ' + wc_name + ' is incomplete, expect large uncertainties!')
else:
pass
elif DM_type == "R":
pass
# Create the np.array of coefficients:
self.coeff_list_dm_dim5_dim6_dim7 = np.array(dict_to_list(self.coeff_dict, self.wc_name_list))
self.coeff_list_dm_dim8 = np.array(dict_to_list(self.coeff_dict, self.wc8_name_list))
self.coeff_list_sm_lepton_dim6 = np.array(dict_to_list(self.coeff_dict, self.sm_lepton_name_list))
#---------------------------#
# The anomalous dimensions: #
#---------------------------#
if self.DM_type == "D":
self.gamma_QED = adm.ADM_QED(3)
self.gamma_QED2 = adm.ADM_QED2(3)
self.gamma_QCD = adm.ADM_QCD(3)
self.gamma_QCD2 = adm.ADM_QCD2(3)
self.gamma_QCD_dim8 = adm.ADM_QCD_dim8(3)
if self.DM_type == "M":
self.gamma_QED = np.delete(np.delete(adm.ADM_QED(3), del_ind_list, 0), del_ind_list, 1)
self.gamma_QED2 = np.delete(np.delete(adm.ADM_QED2(3), del_ind_list, 0), del_ind_list, 1)
self.gamma_QCD = np.delete(np.delete(adm.ADM_QCD(3), del_ind_list, 1), del_ind_list, 2)
self.gamma_QCD2 = np.delete(np.delete(adm.ADM_QCD2(3), del_ind_list, 1), del_ind_list, 2)
self.gamma_QCD_dim8 = np.delete(np.delete(adm.ADM_QCD_dim8(3), del_ind_list_dim_8, 0),\
del_ind_list_dim_8, 1)
if self.DM_type == "C":
self.gamma_QED = np.delete(np.delete(adm.ADM_QED(3), del_ind_list, 0), del_ind_list, 1)
self.gamma_QED2 = np.delete(np.delete(adm.ADM_QED2(3), del_ind_list, 0), del_ind_list, 1)
self.gamma_QCD = np.delete(np.delete(adm.ADM_QCD(3), del_ind_list, 1), del_ind_list, 2)
self.gamma_QCD2 = np.delete(np.delete(adm.ADM_QCD2(3), del_ind_list, 1), del_ind_list, 2)
self.gamma_QCD_dim8 = np.delete(np.delete(adm.ADM_QCD_dim8(3), del_ind_list_dim_8, 0),\
del_ind_list_dim_8, 1)
if self.DM_type == "R":
self.gamma_QED = np.delete(np.delete(adm.ADM_QED(3), del_ind_list, 0), del_ind_list, 1)
self.gamma_QED2 = np.delete(np.delete(adm.ADM_QED2(3), del_ind_list, 0), del_ind_list, 1)
self.gamma_QCD = np.delete(np.delete(adm.ADM_QCD(3), del_ind_list, 1), del_ind_list, 2)
self.gamma_QCD2 = np.delete(np.delete(adm.ADM_QCD2(3), del_ind_list, 1), del_ind_list, 2)
def run(self, mu_low=None):
""" Running of 3-flavor Wilson coefficients
Calculate the running from 2 GeV to mu_low [GeV; default 2 GeV] in the three-flavor theory.
Return a dictionary of Wilson coefficients for the three-flavor Lagrangian
at scale mu_low (this is the default).
"""
if mu_low is None:
mu_low=2
#-------------#
# The running #
#-------------#
alpha_at_mu = 1/self.ip['amtauinv']
as31 = rge.AlphaS(self.ip['asMZ'], self.ip['Mz'])
as31_high = as31.run({'mbmb': self.ip['mb_at_mb'], 'mcmc': self.ip['mc_at_mc']},\
{'mub': self.ip['mb_at_mb'], 'muc': self.ip['mc_at_mc']}, 2, 3, 1)
as31_low = as31.run({'mbmb': self.ip['mb_at_mb'], 'mcmc': self.ip['mc_at_mc']},\
{'mub': self.ip['mb_at_mb'], 'muc': self.ip['mc_at_mc']}, mu_low, 3, 1)
evolve1 = rge.RGE(self.gamma_QCD, 3)
evolve2 = rge.RGE(self.gamma_QCD2, 3)
if self.DM_type == "D" or self.DM_type == "M" or self.DM_type == "C":
evolve8 = rge.RGE([self.gamma_QCD_dim8], 3)
else:
pass
C_at_mu_QCD = np.dot(evolve2.U0_as2(as31_high, as31_low),\
np.dot(evolve1.U0(as31_high, as31_low),\
self.coeff_list_dm_dim5_dim6_dim7))
C_at_mu_QED = np.dot(self.coeff_list_dm_dim5_dim6_dim7, self.gamma_QED)\
* np.log(mu_low/2) * alpha_at_mu/(4*np.pi)\
+ np.dot(self.coeff_list_dm_dim5_dim6_dim7, self.gamma_QED2)\
* np.log(mu_low/2) * (alpha_at_mu/(4*np.pi))**2
if self.DM_type == "D" or self.DM_type == "M" or self.DM_type == "C":
C_dim8_at_mu = np.dot(evolve8.U0(as31_high, as31_low), self.coeff_list_dm_dim8)
else:
pass
# Revert back to dictionary
dict_coeff_mu = list_to_dict(C_at_mu_QCD + C_at_mu_QED, self.wc_name_list)
if self.DM_type == "D" or self.DM_type == "M" or self.DM_type == "C":
dict_dm_dim8 = list_to_dict(C_dim8_at_mu, self.wc8_name_list)
dict_coeff_mu.update(dict_dm_dim8)
dict_sm_lepton_dim6 = list_to_dict(self.coeff_list_sm_lepton_dim6, self.sm_lepton_name_list)
dict_coeff_mu.update(dict_sm_lepton_dim6)
else:
pass
return dict_coeff_mu
def _my_cNR(self, DM_mass, RGE=None, NLO=None, DOUBLE_WEAK=None):
"""Calculate the coefficients of the NR operators, with momentum dependence factored out.
DM_mass is the DM mass in GeV
RGE is a flag to turn RGE running on (True) or off (False). (Default True)
If NLO is set to True, the coherently enhanced NLO terms for Q_9^(7) are added. (Default False)
If DOUBLE_WEAK is set to False, the weak mixing below the weak scale is set to zero. (Default True)
Returns a dictionary of coefficients for the NR Lagrangian,
as in 1308.6288, plus coefficients c13 -- c23, c100 for "spurious" long-distance operators
The possible names are
['cNR1p', 'cNR1n', 'cNR2p', 'cNR2n', 'cNR3p', 'cNR3n', 'cNR4p', 'cNR4n', 'cNR5p', 'cNR5n',
'cNR6p', 'cNR6n', 'cNR7p', 'cNR7n', 'cNR8p', 'cNR8n', 'cNR9p', 'cNR9n', 'cNR10p', 'cNR10n',
'cNR11p', 'cNR11n', 'cNR12p', 'cNR12n', 'cNR13p', 'cNR13n', 'cNR14p', 'cNR14n', 'cNR15p', 'cNR15n',
'cNR16p', 'cNR16n', 'cNR17p', 'cNR17n', 'cNR18p', 'cNR18n', 'cNR19p', 'cNR19n', 'cNR20p', 'cNR20n',
'cNR21p', 'cNR21n', 'cNR22p', 'cNR22n', 'cNR23p', 'cNR23n', 'cNR100p', 'cNR100n', 'cNR104p', 'cNR104n']
"""
if RGE is None:
RGE = True
if NLO is None:
NLO = False
if DOUBLE_WEAK is None:
DOUBLE_WEAK = True
if DOUBLE_WEAK:
wmws = 1.
else:
wmws = 0.
### Input parameters ####
mpi = self.ip['mpi0']
mp = self.ip['mproton']
mn = self.ip['mneutron']
mN = (mp+mn)/2
alpha = 1/self.ip['alowinv']
GF = self.ip['GF']
as_2GeV = rge.AlphaS(self.ip['asMZ'],\
self.ip['Mz']).run({'mbmb': self.ip['mb_at_mb'], 'mcmc': self.ip['mc_at_mc']},\
{'mub': self.ip['mb_at_mb'], 'muc': self.ip['mc_at_mc']}, 2, 3, 1)
gs2_2GeV = 4*np.pi*as_2GeV
# Quark masses at 2GeV
mu = self.ip['mu_at_2GeV']
md = self.ip['md_at_2GeV']
ms = self.ip['ms_at_2GeV']
mtilde = 1/(1/mu + 1/md + 1/ms)
# Lepton masses
me = self.ip['me']
mmu = self.ip['mmu']
mtau = self.ip['mtau']
# Z boson mass
MZ = self.ip['Mz']
### Numerical constants
mproton = self.ip['mproton']
mneutron = self.ip['mneutron']
F1up = F1('u', 'p', self.ip).value_zero_mom()
F1dp = F1('d', 'p', self.ip).value_zero_mom()
F1sp = F1('s', 'p', self.ip).value_zero_mom()
F1un = F1('u', 'n', self.ip).value_zero_mom()
F1dn = F1('d', 'n', self.ip).value_zero_mom()
F1sn = F1('s', 'n', self.ip).value_zero_mom()
F1spslope = F1('s', 'p', self.ip).first_deriv_zero_mom()
F1snslope = F1('s', 'n', self.ip).first_deriv_zero_mom()
F2up = F2('u', 'p', self.ip).value_zero_mom()
F2dp = F2('d', 'p', self.ip).value_zero_mom()
F2sp = F2('s', 'p', self.ip).value_zero_mom()
F2un = F2('u', 'n', self.ip).value_zero_mom()
F2dn = F2('d', 'n', self.ip).value_zero_mom()
F2sn = F2('s', 'n', self.ip).value_zero_mom()
FAup = FA('u', 'p', self.ip).value_zero_mom()
FAdp = FA('d', 'p', self.ip).value_zero_mom()
FAsp = FA('s', 'p', self.ip).value_zero_mom()
FAun = FA('u', 'n', self.ip).value_zero_mom()
FAdn = FA('d', 'n', self.ip).value_zero_mom()
FAsn = FA('s', 'n', self.ip).value_zero_mom()
FPpup_pion = FPprimed('u', 'p', self.ip).value_pion_pole()
FPpdp_pion = FPprimed('d', 'p', self.ip).value_pion_pole()
FPpsp_pion = FPprimed('s', 'p', self.ip).value_pion_pole()
FPpun_pion = FPprimed('u', 'n', self.ip).value_pion_pole()
FPpdn_pion = FPprimed('d', 'n', self.ip).value_pion_pole()
FPpsn_pion = FPprimed('s', 'n', self.ip).value_pion_pole()
FPpup_eta = FPprimed('u', 'p', self.ip).value_eta_pole()
FPpdp_eta = FPprimed('d', 'p', self.ip).value_eta_pole()
FPpsp_eta = FPprimed('s', 'p', self.ip).value_eta_pole()
FPpun_eta = FPprimed('u', 'n', self.ip).value_eta_pole()
FPpdn_eta = FPprimed('d', 'n', self.ip).value_eta_pole()
FPpsn_eta = FPprimed('s', 'n', self.ip).value_eta_pole()
FSup = FS('u', 'p', self.ip).value_zero_mom()
FSdp = FS('d', 'p', self.ip).value_zero_mom()
FSsp = FS('s', 'p', self.ip).value_zero_mom()
FSun = FS('u', 'n', self.ip).value_zero_mom()
FSdn = FS('d', 'n', self.ip).value_zero_mom()
FSsn = FS('s', 'n', self.ip).value_zero_mom()
FPup_pion = FP('u', 'p', self.ip).value_pion_pole()
FPdp_pion = FP('d', 'p', self.ip).value_pion_pole()
FPsp_pion = FP('s', 'p', self.ip).value_pion_pole()
FPun_pion = FP('u', 'n', self.ip).value_pion_pole()
FPdn_pion = FP('d', 'n', self.ip).value_pion_pole()
FPsn_pion = FP('s', 'n', self.ip).value_pion_pole()
FPup_eta = FP('u', 'p', self.ip).value_eta_pole()
FPdp_eta = FP('d', 'p', self.ip).value_eta_pole()
FPsp_eta = FP('s', 'p', self.ip).value_eta_pole()
FPun_eta = FP('u', 'n', self.ip).value_eta_pole()
FPdn_eta = FP('d', 'n', self.ip).value_eta_pole()
FPsn_eta = FP('s', 'n', self.ip).value_eta_pole()
FGp = FG('p', self.ip).value_zero_mom()
FGn = FG('n', self.ip).value_zero_mom()
FGtildep = FGtilde('p', self.ip).value_zero_mom()
FGtilden = FGtilde('n', self.ip).value_zero_mom()
FGtildep_pion = FGtilde('p', self.ip).value_pion_pole()
FGtilden_pion = FGtilde('n', self.ip).value_pion_pole()
FGtildep_eta = FGtilde('p', self.ip).value_eta_pole()
FGtilden_eta = FGtilde('n', self.ip).value_eta_pole()
FT0up = FT0('u', 'p', self.ip).value_zero_mom()
FT0dp = FT0('d', 'p', self.ip).value_zero_mom()
FT0sp = FT0('s', 'p', self.ip).value_zero_mom()
FT0un = FT0('u', 'n', self.ip).value_zero_mom()
FT0dn = FT0('d', 'n', self.ip).value_zero_mom()
FT0sn = FT0('s', 'n', self.ip).value_zero_mom()
FT1up = FT1('u', 'p', self.ip).value_zero_mom()
FT1dp = FT1('d', 'p', self.ip).value_zero_mom()
FT1sp = FT1('s', 'p', self.ip).value_zero_mom()
FT1un = FT1('u', 'n', self.ip).value_zero_mom()
FT1dn = FT1('d', 'n', self.ip).value_zero_mom()
FT1sn = FT1('s', 'n', self.ip).value_zero_mom()
FTW2up = FTwist2('u', 'p', self.ip).value_zero_mom()
FTW2dp = FTwist2('d', 'p', self.ip).value_zero_mom()
FTW2sp = FTwist2('s', 'p', self.ip).value_zero_mom()
FTW2gp = FTwist2('g', 'p', self.ip).value_zero_mom()
FTW2un = FTwist2('u', 'n', self.ip).value_zero_mom()
FTW2dn = FTwist2('d', 'n', self.ip).value_zero_mom()
FTW2sn = FTwist2('s', 'n', self.ip).value_zero_mom()
FTW2gn = FTwist2('g', 'n', self.ip).value_zero_mom()
### The coefficients ###
#
# Note that all dependence on 1/q^2, 1/(m^2-q^2), q^2/(m^2-q^2) is taken care of
# by defining spurious operators.
#
# Therefore, we need to split some of the coefficients
# into the "pion part" etc. with the q-dependence factored out,
# and introduce a few spurious "long-distance" operators.
#
# The coefficients cNR1 -- cNR12 correspond to the operators in 1611.00368 and 1308.6288
#
# Therefore, we define O13 = O6/(mpi^2+q^2);
# O14 = O6/(meta^2+q^2);
# O15 = O6*q^2/(mpi^2+q^2);
# O16 = O6*q^2/(meta^2+q^2);
# O17 = O10/(mpi^2+q^2);
# O18 = O10/(meta^2+q^2);
# O19 = O10*q^2/(mpi^2+q^2);
# O20 = O10*q^2/(meta^2+q^2);
#
# For the dipole interactions, these are the ones that have c2p1, c1N2, c2p2 as coefficients.
# Therefore, we define O21 = O5/q^2;
# O22 = O6/q^2.
# O23 = O11/q^2.
#
# For the tensors, O4 * q^2 appears as a leading contribution.
# Therefore, we define O104 = O4 * q^2
#
# For the tensors, O1 * q^2 appears as a subleading contribution.
# Therefore, we define O100 = O1 * q^2
#
# q^2 is here always the spatial part!!!
#
if RGE:
c3mu_dict = self.run(2)
else:
c3mu_dict = self.coeff_dict
if self.DM_type == "D":
my_cNR_dict = {
'cNR1p' : F1up*(c3mu_dict['C61u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C81u'])\
+ F1dp*(c3mu_dict['C61d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C81d'])\
+ F1up*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D63eu'])\
+ F1dp*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D63ed'])\
+ F1up*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D63muu'])\
+ F1dp*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D63mud'])\
+ F1up*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D63tauu'])\
+ F1dp*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D63taud'])\
+ FGp*c3mu_dict['C71']\
+ FSup*c3mu_dict['C75u'] + FSdp*c3mu_dict['C75d'] + FSsp*c3mu_dict['C75s']\
- alpha/(2*np.pi*DM_mass)*c3mu_dict['C51']\
+ 2*DM_mass * (F1up*c3mu_dict['C715u'] + F1dp*c3mu_dict['C715d'] + F1sp*c3mu_dict['C715s'])\
+ FTW2up*c3mu_dict['C723u']\
+ FTW2dp*c3mu_dict['C723d']\
+ FTW2sp*c3mu_dict['C723s']\
+ FTW2gp*c3mu_dict['C725'],
'cNR2p' : 0,
'cNR3p' : F2sp * c3mu_dict['C61s'],
'cNR4p' : - 4*( FAup*(c3mu_dict['C64u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\
+ FAdp*(c3mu_dict['C64d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C84d'])\
+ FAsp*(c3mu_dict['C64s'] - np.sqrt(2)*GF*wmws*ms**2 / gs2_2GeV * c3mu_dict['C84s'])\
+ FAup*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62ue'])\
+ FAdp*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62de'])\
+ FAsp*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62se'])\
+ FAup*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62umu'])\
+ FAdp*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62dmu'])\
+ FAsp*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62smu'])\
+ FAup*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62utau'])\
+ FAdp*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62dtau'])\
+ FAsp*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62stau']))\
- 2*alpha/np.pi * self.ip['mup']/mN * c3mu_dict['C51']\
+ 8*(FT0up*c3mu_dict['C79u'] + FT0dp*c3mu_dict['C79d'] + FT0sp*c3mu_dict['C79s']),
'cNR5p' : - 2*mN * (F1up*c3mu_dict['C719u'] + F1dp*c3mu_dict['C719d'] + F1sp*c3mu_dict['C719s']),
'cNR6p' : mN/DM_mass * FGtildep * c3mu_dict['C74']\
-2*mN*((F1up+F2up)*c3mu_dict['C719u']\
+ (F1dp+F2dp)*c3mu_dict['C719d']\
+ (F1sp+F2dp)*c3mu_dict['C719s'])\
+ mN/DM_mass * F2sp * c3mu_dict['C61s'],
'cNR7p' : - 2*( FAup*(c3mu_dict['C63u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\
+ FAdp*(c3mu_dict['C63d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C83d'])\
+ FAsp*(c3mu_dict['C63s'] - np.sqrt(2)*GF*wmws*ms**2 / gs2_2GeV * c3mu_dict['C83s'])\
+ FAup*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62ue'])\
+ FAdp*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62de'])\
+ FAsp*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62se'])\
+ FAup*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62umu'])\
+ FAdp*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62dmu'])\
+ FAsp*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62smu'])\
+ FAup*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62utau'])\
+ FAdp*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62dtau'])\
+ FAsp*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62stau']))\
- 4*DM_mass * (FAup*c3mu_dict['C717u'] + FAdp*c3mu_dict['C717d'] + FAsp*c3mu_dict['C717s']),
'cNR8p' : 2*( F1up*(c3mu_dict['C62u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\
+ F1dp*(c3mu_dict['C62d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C82d'])\
+ F1up*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D63eu'])\
+ F1dp*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D63ed'])\
+ F1up*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D63muu'])\
+ F1dp*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D63mud'])\
+ F1up*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D63tauu'])\
+ F1dp*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D63taud'])),
'cNR9p' : 2*( (F1up+F2up)*(c3mu_dict['C62u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\
+ (F1dp+F2dp)*(c3mu_dict['C62d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C82d'])\
+ (F1sp+F2sp)*(c3mu_dict['C62s'] - np.sqrt(2)*GF*wmws*ms**2 / gs2_2GeV * c3mu_dict['C82s'])\
+ (F1up+F2up)*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D63eu'])\
+ (F1dp+F2dp)*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D63ed'])\
+ (F1sp+F2sp)*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D63es'])\
+ (F1up+F2up)*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D63muu'])\
+ (F1dp+F2dp)*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D63mud'])\
+ (F1sp+F2sp)*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D63mus'])\
+ (F1up+F2up)*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D63tauu'])\
+ (F1dp+F2dp)*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D63taud'])\
+ (F1sp+F2sp)*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D63taus']))
+ 2*mN*( FAup*(c3mu_dict['C63u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\
+ FAdp*(c3mu_dict['C63d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C83d'])\
+ FAsp*(c3mu_dict['C63s'] - np.sqrt(2)*GF*wmws*ms**2 / gs2_2GeV * c3mu_dict['C83s'])\
+ FAup*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62ue'])\
+ FAdp*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62de'])\
+ FAsp*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62se'])\
+ FAup*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62umu'])\
+ FAdp*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62dmu'])\
+ FAsp*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62smu'])\
+ FAup*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62utau'])\
+ FAdp*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62dtau'])\
+ FAsp*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62stau']))/DM_mass\
- 4*mN * (FAup*c3mu_dict['C721u'] + FAdp*c3mu_dict['C721d'] + FAsp*c3mu_dict['C721s']),
'cNR10p' : FGtildep * c3mu_dict['C73']\
-2*mN/DM_mass * (FT0up*c3mu_dict['C710u']\
+ FT0dp*c3mu_dict['C710d']\
+ FT0sp*c3mu_dict['C710s']),
'cNR11p' : - mN/DM_mass * (FSup*c3mu_dict['C76u']\
+ FSdp*c3mu_dict['C76d']\
+ FSsp*c3mu_dict['C76s'])\
- mN/DM_mass * FGp * c3mu_dict['C72']\
+ 2*((FT0up-FT1up)*c3mu_dict['C710u']\
+ (FT0dp-FT1dp)*c3mu_dict['C710d']\
+ (FT0sp-FT1sp)*c3mu_dict['C710s'])\
- 2*mN * ( F1up*(c3mu_dict['C716u']+c3mu_dict['C720u'])\
+ F1dp*(c3mu_dict['C716d']+c3mu_dict['C720d'])\
+ F1sp*(c3mu_dict['C716s']+c3mu_dict['C720s'])),
'cNR12p' : -8*(FT0up*c3mu_dict['C710u'] + FT0dp*c3mu_dict['C710d'] + FT0sp*c3mu_dict['C710s']),
'cNR13p' : mN/DM_mass * (FPup_pion*c3mu_dict['C78u'] + FPdp_pion*c3mu_dict['C78d'])\
+ FPpup_pion*(c3mu_dict['C64u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\
+ FPpdp_pion*(c3mu_dict['C64d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C84d'])\
+ FPpup_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62ue'])\
+ FPpdp_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62de'])\
+ FPpup_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62umu'])\
+ FPpdp_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62dmu'])\
+ FPpup_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62utau'])\
+ FPpdp_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62dtau']),
'cNR14p' : mN/DM_mass * (FPup_eta*c3mu_dict['C78u']\
+ FPdp_eta*c3mu_dict['C78d']\
+ FPsp_eta*c3mu_dict['C78s'])\
+ FPpup_eta*(c3mu_dict['C64u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\
+ FPpdp_eta*(c3mu_dict['C64d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C84d'])\
+ FPpsp_eta*(c3mu_dict['C64s'] - np.sqrt(2)*GF*wmws*ms**2 / gs2_2GeV * c3mu_dict['C84s'])\
+ FPpup_eta*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62ue'])\
+ FPpdp_eta*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62de'])\
+ FPpsp_eta*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62se'])\
+ FPpup_eta*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62umu'])\
+ FPpdp_eta*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62dmu'])\
+ FPpsp_eta*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62smu'])\
+ FPpup_eta*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62utau'])\
+ FPpdp_eta*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62dtau'])\
+ FPpsp_eta*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62stau'])\
+ 4*mN * ( FAup*(c3mu_dict['C718u']+c3mu_dict['C722u'])\
+ FAdp*(c3mu_dict['C718d']+c3mu_dict['C722d'])\
+ FAsp*(c3mu_dict['C718s']+c3mu_dict['C722s'])),
'cNR15p' : mN/DM_mass * FGtildep_pion * c3mu_dict['C74'],
'cNR16p' : mN/DM_mass * FGtildep_eta * c3mu_dict['C74'],
'cNR17p' : FPup_pion*c3mu_dict['C77u'] + FPdp_pion*c3mu_dict['C77d'],
'cNR18p' : FPup_eta*c3mu_dict['C77u'] + FPdp_eta*c3mu_dict['C77d'] + FPsp_eta*c3mu_dict['C77s'],
'cNR19p' : FGtildep_pion * c3mu_dict['C73'],
'cNR20p' : FGtildep_eta * c3mu_dict['C73'],
'cNR21p' : mN* (2*alpha/np.pi*c3mu_dict['C51']),
'cNR22p' : -mN**2* (- 2*alpha/np.pi * self.ip['mup']/mN * c3mu_dict['C51']),
'cNR23p' : mN* (2*alpha/np.pi*c3mu_dict['C52']),
'cNR100p' : ( F1up*c3mu_dict['C719u']\
+ F1dp*c3mu_dict['C719d']\
+ F1sp*c3mu_dict['C719s'])/(2*DM_mass)\
+ (F1spslope - F2sp / mN**2/4) * c3mu_dict['C61s'],
'cNR104p' : 2*((F1up+F2up)*c3mu_dict['C719u']\
+ (F1dp+F2dp)*c3mu_dict['C719d']\
+ (F1sp+F2dp)*c3mu_dict['C719s'])/mN\
- 1/mN/DM_mass * F2sp * c3mu_dict['C61s'],
'cNR1n' : F1un*(c3mu_dict['C61u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C81u'])\
+ F1dn*(c3mu_dict['C61d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C81d'])\
+ FGn*c3mu_dict['C71']\
+ FSun*c3mu_dict['C75u'] + FSdn*c3mu_dict['C75d'] + FSsn*c3mu_dict['C75s']\
+ F1un*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D63eu'])\
+ F1dn*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D63ed'])\
+ F1un*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D63muu'])\
+ F1dn*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D63mud'])\
+ F1un*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D63tauu'])\
+ F1dn*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D63taud'])\
+ 2*DM_mass * (F1un*c3mu_dict['C715u'] + F1dn*c3mu_dict['C715d'] + F1sn*c3mu_dict['C715s'])\
+ FTW2un*c3mu_dict['C723u']\
+ FTW2dn*c3mu_dict['C723d']\
+ FTW2sn*c3mu_dict['C723s']\
+ FTW2gn*c3mu_dict['C725'],
'cNR2n' : 0,
'cNR3n' : F2sn * c3mu_dict['C61s'],
'cNR4n' : - 4*( FAun*(c3mu_dict['C64u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\
+ FAdn*(c3mu_dict['C64d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C84d'])\
+ FAsn*(c3mu_dict['C64s'] - np.sqrt(2)*GF*wmws*ms**2 / gs2_2GeV * c3mu_dict['C84s'])\
+ FAun*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62ue'])\
+ FAdn*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62de'])\
+ FAsn*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62se'])\
+ FAun*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62umu'])\
+ FAdn*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62dmu'])\
+ FAsn*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62smu'])\
+ FAun*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62utau'])\
+ FAdn*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62dtau'])\
+ FAsn*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62stau']))\
- 2*alpha/np.pi * self.ip['mun']/mN * c3mu_dict['C51']\
+ 8*(FT0un*c3mu_dict['C79u'] + FT0dn*c3mu_dict['C79d'] + FT0sn*c3mu_dict['C79s']),
'cNR5n' : - 2*mN * (F1un*c3mu_dict['C719u'] + F1dn*c3mu_dict['C719d'] + F1sn*c3mu_dict['C719s']),
'cNR6n' : mN/DM_mass * FGtilden * c3mu_dict['C74']\
-2*mN*((F1un+F2un)*c3mu_dict['C719u']\
+ (F1dn+F2dn)*c3mu_dict['C719d']\
+ (F1sn+F2dn)*c3mu_dict['C719s'])\
+ mN/DM_mass * F2sn * c3mu_dict['C61s'],
'cNR7n' : - 2*( FAun*(c3mu_dict['C63u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\
+ FAdn*(c3mu_dict['C63d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C83d'])\
+ FAsn*(c3mu_dict['C63s'] - np.sqrt(2)*GF*wmws*ms**2 / gs2_2GeV * c3mu_dict['C83s'])\
+ FAun*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62ue'])\
+ FAdn*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62de'])\
+ FAsn*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62se'])\
+ FAun*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62umu'])\
+ FAdn*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62dmu'])\
+ FAsn*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62smu'])\
+ FAun*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62utau'])\
+ FAdn*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62dtau'])\
+ FAsn*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62stau']))\
- 4*DM_mass * (FAun*c3mu_dict['C717u'] + FAdn*c3mu_dict['C717d']+ FAsn*c3mu_dict['C717s']),
'cNR8n' : 2*( F1un*(c3mu_dict['C62u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\
+ F1dn*(c3mu_dict['C62d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C82d'])\
+ F1un*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D63eu'])\
+ F1dn*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D63ed'])\
+ F1un*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D63muu'])\
+ F1dn*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D63mud'])\
+ F1un*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D63tauu'])\
+ F1dn*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D63taud'])),
'cNR9n' : 2*( (F1un+F2un)*(c3mu_dict['C62u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\
+ (F1dn+F2dn)*(c3mu_dict['C62d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C82d'])\
+ (F1sn+F2sn)*(c3mu_dict['C62s'] - np.sqrt(2)*GF*wmws*ms**2 / gs2_2GeV * c3mu_dict['C82s'])\
+ (F1un+F2un)*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D63eu'])\
+ (F1dn+F2dn)*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D63ed'])\
+ (F1sn+F2sn)*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D63es'])\
+ (F1un+F2un)*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D63muu'])\
+ (F1dn+F2dn)*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D63mud'])\
+ (F1sn+F2sn)*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D63mus'])\
+ (F1un+F2up)*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D63tauu'])\
+ (F1dn+F2dp)*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D63taud'])\
+ (F1sp+F2sp)*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D63taus']))
+ 2*mN*( FAun*(c3mu_dict['C63u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\
+ FAdn*(c3mu_dict['C63d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C83d'])\
+ FAsn*(c3mu_dict['C63s'] - np.sqrt(2)*GF*wmws*ms**2 / gs2_2GeV * c3mu_dict['C83s'])\
+ FAun*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62ue'])\
+ FAdn*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62de'])\
+ FAsn*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C63e'] * c3mu_dict['D62se'])\
+ FAun*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62umu'])\
+ FAdn*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62dmu'])\
+ FAsn*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C63mu'] * c3mu_dict['D62smu'])\
+ FAun*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62utau'])\
+ FAdn*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62dtau'])\
+ FAsn*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C63tau'] * c3mu_dict['D62stau']))/DM_mass\
- 4*mN * (FAun*c3mu_dict['C721u']\
+ FAdn*c3mu_dict['C721d']\
+ FAsn*c3mu_dict['C721s']),
'cNR10n' : FGtilden * c3mu_dict['C73']\
-2*mN/DM_mass * (FT0un*c3mu_dict['C710u']\
+ FT0dn*c3mu_dict['C710d']\
+ FT0sn*c3mu_dict['C710s']),
'cNR11n' : - mN/DM_mass * (FSun*c3mu_dict['C76u']\
+ FSdn*c3mu_dict['C76d']\
+ FSsn*c3mu_dict['C76s'])\
- mN/DM_mass * FGn * c3mu_dict['C72']\
+ 2*((FT0un-FT1un)*c3mu_dict['C710u']\
+ (FT0dn-FT1dn)*c3mu_dict['C710d']\
+ (FT0sn-FT1sn)*c3mu_dict['C710s'])\
- 2*mN * ( F1un*(c3mu_dict['C716u']+c3mu_dict['C720u'])\
+ F1dn*(c3mu_dict['C716d']+c3mu_dict['C720d'])\
+ F1sn*(c3mu_dict['C716s']+c3mu_dict['C720s'])),
'cNR12n' : -8*(FT0un*c3mu_dict['C710u'] + FT0dn*c3mu_dict['C710d'] + FT0sn*c3mu_dict['C710s']),
'cNR13n' : mN/DM_mass * (FPun_pion*c3mu_dict['C78u'] + FPdn_pion*c3mu_dict['C78d'])\
+ FPpun_pion*(c3mu_dict['C64u'] - np.sqrt(2)*GF*wmws*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\
+ FPpdn_pion*(c3mu_dict['C64d'] - np.sqrt(2)*GF*wmws*md**2 / gs2_2GeV * c3mu_dict['C84d'])\
+ FPpun_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62ue'])\
+ FPpdn_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * me**2 * np.log(2/MZ)\
* c3mu_dict['C64e'] * c3mu_dict['D62de'])\
+ FPpun_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62umu'])\
+ FPpdn_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * mmu**2 * np.log(2/MZ)\
* c3mu_dict['C64mu'] * c3mu_dict['D62dmu'])\
+ FPpun_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 * np.log(2/MZ)\
* c3mu_dict['C64tau'] * c3mu_dict['D62utau'])\
+ FPpdn_pion*(np.sqrt(2)*GF*wmws/np.pi**2 * mtau**2 *
|
np.log(2/MZ)
|
numpy.log
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 15:39:34 2020
@author: dsbrown
"""
import numpy as np
import utils
from scipy.optimize import linprog
from interface import implements, Interface
import sys
#acts as abstract class
class MDP(Interface):
def get_num_actions(self):
pass
def get_reward_dimensionality(self):
pass
def set_reward_fn(self, new_reward):
pass
def get_transition_prob(self, s1,a,s2):
pass
def get_num_states(self):
pass
def get_readable_actions(self, action_num):
pass
def get_state_action_rewards(self):
pass
def uses_linear_approximation(self):
pass
def transform_to_R_sa(self, reward_weights):
#mainly used for BIRL to take hypothesis reward and transform it
#take in representation of reward weights and return vectorized version of R_sa
#R_sa = [R(s0,a0), .., R(sn,a0), ...R(s0,am),..., R(sn,am)]
pass
def get_transition_prob_matrices(self):
#return a list of transition matrices for each action a_0 through a_m
pass
class ChainMDP(implements(MDP)):
#basic MDP class that has two actions (left, right), no terminal states and is a chain mdp with deterministic transitions
def __init__(self, num_states, r_sa, gamma, init_dist):
self.num_actions = 2
self.num_rows = 1
self.num_cols = num_states
self.num_states = num_states
self.gamma = gamma
self.init_dist = init_dist
self.terminals = []
self.r_sa = r_sa
self.init_states = []
for s in range(self.num_states):
if self.init_dist[s] > 0:
self.init_states.append(s)
self.P_left = self.get_transitions(policy="left")
#print("P_left\n",self.P_left)
self.P_right = self.get_transitions(policy="right")
#print("P_right\n",self.P_right)
self.Ps = [self.P_left, self.P_right]
def get_transition_prob_matrices(self):
return self.Ps
def get_num_actions(self):
return self.num_actions
def transform_to_R_sa(self, reward_weights):
#Don't do anything, reward_weights should be r_sa
assert(len(reward_weights) == len(self.r_sa))
return reward_weights
def get_readable_actions(self, action_num):
if action_num == 0:
return "<"
elif action_num == 1:
return ">"
else:
print("error, only two possible actions")
sys.exit()
def get_num_states(self):
return self.num_states
def get_reward_dimensionality(self):
return len(self.r_sa)
def uses_linear_approximation(self):
return False
def set_reward_fn(self, new_reward):
self.r_sa = new_reward
def get_state_action_rewards(self):
return self.r_sa
def get_transition_prob(self, s1,a,s2):
return self.Ps[a][s1][s2]
def get_transitions(self, policy):
P_pi = np.zeros((self.num_states, self.num_states))
if policy == "left": #action 0
#always transition one to left unless already at left border
cnt = 0
for r in range(self.num_rows):
for c in range(self.num_cols):
if c > 0:
P_pi[cnt, cnt - 1] = 1.0
else:
P_pi[cnt,cnt] = 1.0
#increment state count
cnt += 1
elif policy == "right": #action 1
#always transition one to right unless already at right border
cnt = 0
for r in range(self.num_rows):
for c in range(self.num_cols):
if c < self.num_cols - 1:
#transition to next state to right
P_pi[cnt, cnt + 1] = 1.0
else:
#self transition
P_pi[cnt,cnt] = 1.0
#increment state count
cnt += 1
return P_pi
class MachineReplacementMDP(ChainMDP):
#basic MDP class that has two actions (left, right), no terminal states and is a chain mdp with deterministic transitions
def __init__(self, num_states, r_sa, gamma, init_dist):
self.num_actions = 2
self.num_rows = 1
self.num_cols = num_states
self.num_states = num_states
self.gamma = gamma
self.init_dist = init_dist
self.terminals = []
self.r_sa = r_sa
self.P_noop = self.get_transitions(policy="noop")
#print("P_left\n",self.P_left)
self.P_repair = self.get_transitions(policy="repair")
#print("P_right\n",self.P_right)
self.Ps = [self.P_noop, self.P_repair]
def get_readable_actions(self, action_num):
if action_num == 0:
return "noop" #no-op
elif action_num == 1:
return "repair" #repair
else:
print("error, only two possible actions")
sys.exit()
def get_transitions(self, policy):
P_pi = np.zeros((self.num_states, self.num_states))
if policy == "noop": #action 0
#always transition to one state farther in chain unless at the last state where you go to the beginning
for c in range(self.num_cols):
if c < self.num_cols - 1:
#continue to the right
P_pi[c, c + 1] = 1.0
else:
#go back to the beginning
P_pi[c,0] = 1.0
elif policy == "repair": #action 1
#always transition back to the first state
for c in range(self.num_cols):
P_pi[c,0] = 1.0
return P_pi
class BasicGridMDP(implements(MDP)):
#basic MDP class that has four actions, possible terminal states and is a grid with deterministic transitions
def __init__(self, num_rows, num_cols, r_s, gamma, init_dist, terminals = [], debug=False):
self.num_actions = 4
self.num_rows = num_rows
self.num_cols = num_cols
self.num_states = num_rows * num_cols
self.gamma = gamma
self.init_dist = init_dist
self.terminals = terminals
self.debug = debug
self.r_s = r_s
self.r_sa = self.transform_to_R_sa(self.r_s)
#print("transformed R(s,a)", self.r_sa)
self.init_states = []
for s in range(self.num_states):
if self.init_dist[s] > 0:
self.init_states.append(s)
self.P_left = self.get_transitions(policy="left")
if self.debug: print("P_left\n",self.P_left)
self.P_right = self.get_transitions(policy="right")
if self.debug: print("P_right\n",self.P_right)
self.P_up = self.get_transitions(policy="up")
if self.debug: print("_up\n",self.P_up)
self.P_down = self.get_transitions(policy="down")
if self.debug: print("P_down\n",self.P_down)
self.Ps = [self.P_left, self.P_right, self.P_up, self.P_down] #actions:0,1,2,3
def get_transition_prob_matrices(self):
return self.Ps
def get_num_actions(self):
return self.num_actions
def get_num_states(self):
return self.num_states
def get_readable_actions(self, action_num):
if action_num == 0:
return "<"
elif action_num == 1:
return ">"
elif action_num == 2:
return "^"
elif action_num == 3:
return "v"
else:
print("error, only four possible actions")
sys.exit()
def get_transition_prob(self, s1,a,s2):
return self.Ps[a][s1][s2]
#Note that I'm using r_s as the reward dim not r_sa!
def get_reward_dimensionality(self):
return len(self.r_s)
#NOTE: the dimensionality still needs to be checked.
def uses_linear_approximation(self):
return False
def get_state_action_rewards(self):
return self.r_sa
#assume new reward is of the form r_s
def set_reward_fn(self, new_reward):
self.r_s = new_reward
#also update r_sa
self.r_sa = self.transform_to_R_sa(self.r_s)
#transform R(s) into R(s,a) for use in LP
def transform_to_R_sa(self, reward_weights):
#assume that reward_weights is r_s
#tile to get r_sa from r
'''input: numpy array R_s, output R_sa'''
#print(len(R_s))
#print(self.num_states)
#just repeat values since R_sa = [R(s1,a1), R(s2,a1),...,R(sn,a1), R(s1,a2), R(s2,a2),..., R(sn,am)]
assert(len(reward_weights) == self.num_states)
return np.tile(reward_weights, self.num_actions)
def get_transitions(self, policy):
P_pi = np.zeros((self.num_states, self.num_states))
if policy == "left": #action 0
#always transition one to left unless already at left border
cnt = 0
for r in range(self.num_rows):
for c in range(self.num_cols):
if cnt not in self.terminals: #no transitions out of terminal
if c > 0:
P_pi[cnt, cnt - 1] = 1.0
else:
P_pi[cnt,cnt] = 1.0
#increment state count
cnt += 1
elif policy == "right": #action 1
#always transition one to right unless already at right border
cnt = 0
for r in range(self.num_rows):
for c in range(self.num_cols):
if cnt not in self.terminals: #no transitions out of terminal
if c < self.num_cols - 1:
#transition to next state to right
P_pi[cnt, cnt + 1] = 1.0
else:
#self transition
P_pi[cnt,cnt] = 1.0
#increment state count
cnt += 1
elif policy == "up": #action 2
#always transition one to left unless already at left border
cnt = 0
for r in range(self.num_rows):
for c in range(self.num_cols):
if cnt not in self.terminals: #no transitions out of terminal
if r > 0:
P_pi[cnt, cnt - self.num_cols] = 1.0
else:
P_pi[cnt,cnt] = 1.0
#increment state count
cnt += 1
elif policy == "down": #action 3
#always transition one to left unless already at left border
cnt = 0
for r in range(self.num_rows):
for c in range(self.num_cols):
if cnt not in self.terminals: #no transitions out of terminal
if r < self.num_rows - 1:
P_pi[cnt, cnt + self.num_cols] = 1.0
else:
P_pi[cnt,cnt] = 1.0
#increment state count
cnt += 1
return P_pi
class FeaturizedGridMDP(BasicGridMDP):
def __init__(self,num_rows, num_cols, state_feature_matrix, feature_weights, gamma, init_dist, terminals = [], debug=False):
self.num_actions = 4
self.num_rows = num_rows
self.num_cols = num_cols
self.num_states = num_rows * num_cols
self.gamma = gamma
self.init_dist = init_dist
self.terminals = terminals
self.debug = debug
self.init_states = []
for s in range(self.num_states):
if self.init_dist[s] > 0:
self.init_states.append(s)
self.P_left = self.get_transitions(policy="left")
if self.debug: print("P_left\n",self.P_left)
self.P_right = self.get_transitions(policy="right")
if self.debug: print("P_right\n",self.P_right)
self.P_up = self.get_transitions(policy="up")
if self.debug: print("_up\n",self.P_up)
self.P_down = self.get_transitions(policy="down")
if self.debug: print("P_down\n",self.P_down)
self.Ps = [self.P_left, self.P_right, self.P_up, self.P_down] #actions:0,1,2,3
#figure out reward function
self.state_features = state_feature_matrix
self.feature_weights = feature_weights
r_s = np.dot(self.state_features, self.feature_weights)
#print("r_s", r_s)
self.r_s = r_s
self.r_sa = self.transform_to_R_sa(self.feature_weights)
#print("transformed R(s,a)", self.r_sa)
def get_reward_dimensionality(self):
return len(self.feature_weights)
def uses_linear_approximation(self):
return True
def set_reward_fn(self, new_reward):
#input is the new_reward weights
assert(len(new_reward) == len(self.feature_weights))
#update feature weights
self.feature_weights = new_reward.copy()
#update r_s
self.r_s = np.dot(self.state_features, new_reward)
#update r_sa
self.r_sa = np.tile(self.r_s, self.num_actions)
def transform_to_R_sa(self, reward_weights):
#assumes that inputs are the reward feature weights or state rewards
#returns the vectorized R_sa
#first get R_s
if len(reward_weights) == self.get_reward_dimensionality():
R_s = np.dot(self.state_features, reward_weights)
elif len(reward_weights) == self.num_states:
R_s = reward_weights
else:
print("Error, reward weights should be features or state rewards")
sys.exit()
return np.tile(R_s, self.num_actions)
def get_windy_down_grid_transitions(mdp_env, slip_prob):
num_rows, num_cols = mdp_env.num_rows, mdp_env.num_cols
num_states = mdp_env.num_states
terminals = mdp_env.terminals
#action 0 LEFT
P_left = np.zeros((num_states, num_states))
#always transition one to left unless already at left border
prob_slip = slip_prob
cnt = 0
for r in range(num_rows):
for c in range(num_cols):
if cnt not in terminals: #no transitions out of terminal
#check columns
if c == 0:
P_left[cnt,cnt] += 1.0 - prob_slip #self loop if
if r < num_rows - 1: # check if above last row
P_left[cnt, cnt + num_cols] += prob_slip
else:
P_left[cnt,cnt] += prob_slip
else: #c > 0
P_left[cnt, cnt - 1] = 1.0 - prob_slip
if r < num_rows - 1: # check if above last row
P_left[cnt, cnt - 1 + num_cols] += prob_slip
else:
P_left[cnt,cnt - 1] += prob_slip
#increment state count
cnt += 1
#action 1 RIGHT
P_right = np.zeros((num_states, num_states))
#always transition one to right unless already at right border
cnt = 0
for r in range(num_rows):
for c in range(num_cols):
if cnt not in terminals: #no transitions out of terminal
if c < num_cols - 1:
#transition to next state to right
P_right[cnt, cnt + 1] += 1.0 - prob_slip
if r < num_rows - 1: # check if above last row
P_right[cnt, cnt + 1 + num_cols] += prob_slip
else:
P_right[cnt,cnt + 1] += prob_slip
else: # c == num_cols - 1 (at right edge of world)
#self transition
P_right[cnt,cnt] = 1.0 - prob_slip
if r < num_rows - 1: # check if above last row
P_right[cnt, cnt + num_cols] += prob_slip
else: # bottom right corner
P_right[cnt,cnt] += prob_slip
#increment state count
cnt += 1
#action 2 UP
#Let's say it pushes you left or right with prob_slip / 2
P_up = np.zeros((num_states, num_states))
#always transition one to left unless already at left border
cnt = 0
for r in range(num_rows):
for c in range(num_cols):
if cnt not in terminals: #no transitions out of terminal
if r > 0:
P_up[cnt, cnt - num_cols] = 1.0 - prob_slip
if c == 0:
P_up[cnt, cnt - num_cols] += prob_slip / 2 #go up left and run into wall
P_up[cnt, cnt - num_cols + 1] += prob_slip / 2
elif c == num_cols - 1:
P_up[cnt, cnt - num_cols - 1] += prob_slip / 2 #go up left and run into wall
P_up[cnt, cnt - num_cols] += prob_slip / 2
else:
P_up[cnt, cnt - num_cols - 1] += prob_slip / 2 #go up left
P_up[cnt, cnt - num_cols + 1] += prob_slip / 2 #go up and right
else: #r == 0
P_up[cnt, cnt] = 1.0 - prob_slip
if c == 0:
P_up[cnt, cnt] += prob_slip / 2 #go up left and run into wall
P_up[cnt, cnt + 1] += prob_slip / 2
elif c == num_cols - 1:
P_up[cnt, cnt] += prob_slip / 2 #go up left and run into wall
P_up[cnt, cnt - 1] += prob_slip / 2
else:
P_up[cnt, cnt - num_cols - 1] += prob_slip / 2 #go up left
P_up[cnt, cnt - num_cols + 1] += prob_slip / 2 #go up and right
#increment state count
cnt += 1
#action 3 DOWN
P_down = np.zeros((num_states, num_states))
#always transition one to left unless already at left border
cnt = 0
for r in range(num_rows):
for c in range(num_cols):
if cnt not in terminals: #no transitions out of terminal
if r < num_rows - 1:
P_down[cnt, cnt + num_cols] = 1.0 - prob_slip
if c == 0:
P_down[cnt, cnt + num_cols] += prob_slip / 2 #go up left and run into wall
P_down[cnt, cnt + num_cols + 1] += prob_slip / 2
elif c == num_cols - 1:
P_down[cnt, cnt + num_cols - 1] += prob_slip / 2 #go up left and run into wall
P_down[cnt, cnt + num_cols] += prob_slip / 2
else:
P_down[cnt, cnt + num_cols - 1] += prob_slip / 2 #go up left
P_down[cnt, cnt + num_cols + 1] += prob_slip / 2 #go up and right
else: #r == num_rows - 1
P_down[cnt, cnt] = 1.0 - prob_slip
if c == 0:
P_down[cnt, cnt] += prob_slip / 2 #go up left and run into wall
P_down[cnt, cnt + 1] += prob_slip / 2
elif c == num_cols - 1:
P_down[cnt, cnt] += prob_slip / 2 #go up left and run into wall
P_down[cnt, cnt - 1] += prob_slip / 2
else:
P_down[cnt, cnt - 1] += prob_slip / 2 #go up left
P_down[cnt, cnt + 1] += prob_slip / 2 #go up and right
#increment state count
cnt += 1
Ps = [P_left, P_right, P_up, P_down] #actions:0,1,2,3
return Ps
#just assume that every action away from left and right edges has windy prob of pushing down when taking any action
def get_windy_down_const_prob_transitions(mdp_env, slip_prob):
num_rows, num_cols = mdp_env.num_rows, mdp_env.num_cols
num_states = mdp_env.num_states
terminals = mdp_env.terminals
#action 0 LEFT
P_left = np.zeros((num_states, num_states))
#always transition one to left unless already at left border
prob_slip = slip_prob
cnt = 0
for r in range(num_rows):
for c in range(num_cols):
if cnt not in terminals: #no transitions out of terminal
#check columns
if c == 0:
P_left[cnt,cnt] = 1.0
elif c == num_cols - 1:
P_left[cnt,cnt - 1] = 1.0
else: #c > 0 and c < num_cols - 1 so possibly slip
P_left[cnt, cnt - 1] = 1.0 - prob_slip
if r < num_rows - 1: # check if above last row
P_left[cnt, cnt + num_cols] += prob_slip #slip down
else:
P_left[cnt,cnt] += prob_slip
#increment state count
cnt += 1
#action 1 RIGHT
P_right = np.zeros((num_states, num_states))
#always transition one to right unless already at right border
cnt = 0
for r in range(num_rows):
for c in range(num_cols):
if cnt not in terminals: #no transitions out of terminal
if c == 0:
P_right[cnt,cnt+1] = 1.0
elif c < num_cols - 1:
#transition to next state to right or go down instead
P_right[cnt, cnt + 1] += 1.0 - prob_slip
if r < num_rows - 1: # check if above last row
P_right[cnt, cnt + num_cols] += prob_slip
else:
P_right[cnt,cnt] += prob_slip
else: # c == num_cols - 1 (at right edge of world)
#self transition
P_right[cnt,cnt] = 1.0 #just bump into wall on right
#increment state count
cnt += 1
#action 2 UP
#Let's say it pushes you left or right with prob_slip / 2
P_up = np.zeros((num_states, num_states))
#always transition one to left unless already at left border
cnt = 0
for r in range(num_rows):
for c in range(num_cols):
if cnt not in terminals: #no transitions out of terminal
# if cnt == 7:
# print("hey")
if r > 0:
P_up[cnt, cnt - num_cols] = 1.0 - prob_slip
if c == 0:
P_up[cnt, cnt - num_cols] += prob_slip #go up
elif c == num_cols - 1:
P_up[cnt, cnt - num_cols] += prob_slip #go up
else: #maybe go down instead
if r < num_rows - 1:
P_up[cnt, cnt + num_cols] += prob_slip
else:
P_up[cnt, cnt] += prob_slip
else: #r == 0
P_up[cnt, cnt] = 1.0 - prob_slip
if c == 0: # no slip
P_up[cnt, cnt] += prob_slip
elif c == num_cols - 1: #no slip
P_up[cnt, cnt] += prob_slip
else: #slip down maybe
P_up[cnt, cnt + num_cols] += prob_slip
#increment state count
cnt += 1
#action 3 DOWN
P_down =
|
np.zeros((num_states, num_states))
|
numpy.zeros
|
"""Miscellaneous small functions repeatedly used in tiatoolbox."""
import copy
import json
import os
import pathlib
import zipfile
from typing import Union
import cv2
import numpy as np
import pandas as pd
import requests
import torch
import yaml
from skimage import exposure
from tiatoolbox.utils.exceptions import FileNotSupported
def split_path_name_ext(full_path):
"""Split path of a file to directory path, file name and extensions.
Args:
full_path (str or pathlib.Path):
Path to a file.
Returns:
tuple:
Three parts of the input file path:
- :py:obj:`pathlib.Path` - Parent directory path
- :py:obj:`str` - File name
- :py:obj:`list(str)` - File extensions
Examples:
>>> from tiatoolbox import utils
>>> dir_path, file_name, extensions =
... utils.misc.split_path_name_ext(full_path)
"""
input_path = pathlib.Path(full_path)
return input_path.parent.absolute(), input_path.name, input_path.suffixes
def grab_files_from_dir(input_path, file_types=("*.jpg", "*.png", "*.tif")):
"""Grab file paths specified by file extensions.
Args:
input_path (str or pathlib.Path):
Path to the directory where files
need to be searched.
file_types (str or tuple(str)):
File types (extensions) to be searched.
Returns:
list:
File paths as a python list. It has been sorted to ensure
the same ordering across platforms.
Examples:
>>> from tiatoolbox import utils
>>> file_types = ("*.ndpi", "*.svs", "*.mrxs")
>>> files_all = utils.misc.grab_files_from_dir(input_path,
... file_types=file_types)
"""
input_path = pathlib.Path(input_path)
if isinstance(file_types, str):
if len(file_types.split(",")) > 1:
file_types = tuple(file_types.replace(" ", "").split(","))
else:
file_types = (file_types,)
files_grabbed = []
for files in file_types:
files_grabbed.extend(input_path.glob(files))
# Ensure same ordering
files_grabbed.sort()
return list(files_grabbed)
def save_yaml(
input_dict: dict,
output_path="output.yaml",
parents: bool = False,
exist_ok: bool = False,
):
"""Save dictionary as yaml.
Args:
input_dict (dict):
A variable of type 'dict'.
output_path (str or pathlib.Path):
Path to save the output file.
parents (bool):
Make parent directories if they do not exist. Default is
False.
exist_ok (bool):
Overwrite the output file if it exists. Default is False.
Returns:
Examples:
>>> from tiatoolbox import utils
>>> input_dict = {'hello': 'Hello World!'}
>>> utils.misc.save_yaml(input_dict, './hello.yaml')
"""
path = pathlib.Path(output_path)
if path.exists() and not exist_ok:
raise FileExistsError("File already exists.")
if parents:
path.parent.mkdir(parents=True, exist_ok=True)
with open( # skipcq: PTC-W6004: PTC-W6004
str(pathlib.Path(output_path)), "w"
) as yaml_file:
yaml.dump(input_dict, yaml_file)
def imwrite(image_path, img) -> None:
"""Write numpy array to an image.
Args:
image_path (str or pathlib.Path):
File path (including extension) to save image to.
img (:class:`numpy.ndarray`):
Image array of dtype uint8, MxNx3.
Examples:
>>> from tiatoolbox import utils
>>> import numpy as np
>>> utils.misc.imwrite('BlankImage.jpg',
... np.ones([100, 100, 3]).astype('uint8')*255)
"""
if isinstance(image_path, pathlib.Path):
image_path = str(image_path)
cv2.imwrite(image_path, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
def imread(image_path, as_uint8=True):
"""Read an image as numpy array.
Args:
image_path (str or pathlib.Path):
File path (including extension) to read image.
as_uint8 (bool):
Read an image in uint8 format.
Returns:
:class:`numpy.ndarray`:
Image array of dtype uint8, MxNx3.
Examples:
>>> from tiatoolbox import utils
>>> img = utils.misc.imread('ImagePath.jpg')
"""
if isinstance(image_path, pathlib.Path):
image_path = str(image_path)
if pathlib.Path(image_path).suffix == ".npy":
image = np.load(image_path)
else:
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if as_uint8:
return image.astype(np.uint8)
return image
def load_stain_matrix(stain_matrix_input):
"""Load a stain matrix as a numpy array.
Args:
stain_matrix_input (ndarray or str, pathlib.Path):
Either a 2x3 or 3x3 numpy array or a path to a saved .npy /
.csv file. If using a .csv file, there should be no column
headers provided
Returns:
stain_matrix (:class:`numpy.ndarray`):
The loaded stain matrix.
Examples:
>>> from tiatoolbox import utils
>>> sm = utils.misc.load_stain_matrix(stain_matrix_input)
"""
if isinstance(stain_matrix_input, (str, pathlib.Path)):
_, __, suffixes = split_path_name_ext(stain_matrix_input)
if suffixes[-1] not in [".csv", ".npy"]:
raise FileNotSupported(
"If supplying a path to a stain matrix, use either a \
npy or a csv file"
)
if suffixes[-1] == ".csv":
return pd.read_csv(stain_matrix_input).to_numpy()
# only other option left for suffix[-1] is .npy
return np.load(str(stain_matrix_input))
if isinstance(stain_matrix_input, np.ndarray):
return stain_matrix_input
raise TypeError(
"Stain_matrix must be either a path to npy/csv file or a numpy array"
)
def get_luminosity_tissue_mask(img, threshold):
"""Get tissue mask based on the luminosity of the input image.
Args:
img (:class:`numpy.ndarray`):
Input image used to obtain tissue mask.
threshold (float):
Luminosity threshold used to determine tissue area.
Returns:
tissue_mask (:class:`numpy.ndarray`):
Binary tissue mask.
Examples:
>>> from tiatoolbox import utils
>>> tissue_mask = utils.misc.get_luminosity_tissue_mask(img, threshold=0.8)
"""
img = img.astype("uint8") # ensure input image is uint8
img = contrast_enhancer(img, low_p=2, high_p=98) # Contrast enhancement
img_lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
l_lab = img_lab[:, :, 0] / 255.0 # Convert to range [0,1].
tissue_mask = l_lab < threshold
# check it's not empty
if tissue_mask.sum() == 0:
raise ValueError("Empty tissue mask computed.")
return tissue_mask
def mpp2common_objective_power(
mpp, common_powers=(1, 1.25, 2, 2.5, 4, 5, 10, 20, 40, 60, 90, 100)
):
"""Approximate (commonly used value) of objective power from mpp.
Uses :func:`mpp2objective_power` to estimate and then rounds to the
nearest value in `common_powers`.
Args:
mpp (float or tuple(float)): Microns per-pixel.
common_powers (tuple or list of float): A sequence of objective
power values to round to. Defaults to
(1, 1.25, 2, 2.5, 4, 5, 10, 20, 40, 60, 90, 100).
Returns:
float:
Objective power approximation.
Examples:
>>> mpp2common_objective_power(0.253)
array(40)
>>> mpp2common_objective_power(
... [0.253, 0.478],
... common_powers=(10, 20, 40),
... )
array([40, 20])
"""
op = mpp2objective_power(mpp)
distances = [np.abs(op - power) for power in common_powers]
return common_powers[
|
np.argmin(distances)
|
numpy.argmin
|
import sys
import time
import numpy as np
import os
from sklearn.metrics import roc_auc_score
# Parameters for progress_bar Init
TOTAL_BAR_LENGTH = 65.
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
last_time = time.time()
begin_time = last_time
def ensure_dir(path):
import pathlib
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
def progress_bar(current, total, msg=None):
''' print current result of train, valid
Args:
current (int): current batch idx
total (int): total number of batch idx
msg(str): loss and acc
'''
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
''' calculate and formating time
Args:
seconds (float): time
'''
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def stats(outputs, targets):
''' Using outputs and targets list, calculate true positive,
false positive, true negative, false negative, accuracy,
recall, specificity, precision, F1 Score, AUC, best Threshold.
And return them
Args:
outputs (numpy array): net outputs list
targets (numpy array): correct result list
'''
num = len(np.arange(0,1.005,0.005))
correct = [0] * num
tp = [0] * num
tn = [0] * num
fp = [0] * num
fn = [0] * num
recall = [0] * num
specificity = [0] * num
outputs_num = outputs.shape[0]
for i, threshold in enumerate(np.arange(0, 1.005, 0.005)):
threshold =
|
np.ones(outputs_num)
|
numpy.ones
|
# Copyright 2020 FZI Forschungszentrum Informatik
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import datetime
import logging
import os
import sys
import pathlib
from absl import app, flags
import numpy as np
import tensorflow as tf
import deepmind_lab
import agents
import networks
import models
import read_tfrecords
import buffers
import third_party.scores as scores
import third_party.ensembles as ensembles
import third_party.utils as utils
def softmax_cross_entropy_logits_loss(y_true, y_pred):
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=y_true, logits=y_pred)
return loss
def add_input_noise(ego_vel):
ego_vel_noise = tf.random.normal(
ego_vel.shape, 0.0, 1.0) * velocity_noise
return ego_vel + ego_vel_noise
# TFRecords helper functions
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(values):
return tf.train.Feature(float_list=tf.train.FloatList(value=values.reshape(-1)))
logging.getLogger("tensorflow").setLevel(logging.ERROR)
# FLAGS = flags.FLAGS
# flags.DEFINE_float('task_env_size', 2.2,
# 'Environment size (meters).')
# General
datetime = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
log_dir = os.path.join('/tmp', 'tensorboard', 'gridnetwork', datetime)
train_log_dir = os.path.join(log_dir, 'train')
test_log_dir = os.path.join(log_dir, 'test')
pathlib.Path(train_log_dir).mkdir(parents=True, exist_ok=True)
pathlib.Path(test_log_dir).mkdir(parents=True, exist_ok=True)
# DM-Lab Observations
img_width = 64
img_height = 64
max_velocity = 325.0
observations = ['RGB',
'DEBUG.POS.ROT', 'DEBUG.POS.TRANS',
'VEL.ROT', 'VEL.TRANS']
# DM-Lab Environment
level = 'tests/empty_room_test'
level_boundary_min = np.array([116.125, 116.125])
level_boundary_max =
|
np.array([783.87, 783.87])
|
numpy.array
|
import numpy
import os
class DumpFile(object):
def __init__(self, path, dtype):
self.path = path
self.filenames = []
dtype = numpy.dtype(dtype)
if dtype == numpy.dtype('f8'):
self.rdtype = numpy.dtype('f8')
self.cdtype = numpy.dtype('complex128')
else:
self.rdtype = numpy.dtype('f4')
self.cdtype = numpy.dtype('complex64')
i = 0
while True:
fn = '%s.%03d' % (path, i)
if not os.path.exists(fn):
if i == 0:
fn = path
if not os.path.exists(fn):
raise OSError("File not found")
else:
break
self.filenames.append(fn)
i = i + 1
def as_real(self):
shape = self._guess_size('real')
data = numpy.zeros(shape, dtype=self.rdtype)
for fn in self.filenames:
geo = fn + '.geometry'
strides, offset, shape = self._parse_geo(geo, 'real')
d = numpy.fromfile(fn, dtype=self.rdtype)
ind = tuple([slice(x, x+o) for x, o in zip(offset, shape)])
d = numpy.lib.stride_tricks.as_strided(d, shape=shape, strides=strides * self.rdtype.itemsize)
data[ind] = d
return data
def as_complex(self):
shape = self._guess_size('complex')
data = numpy.zeros(shape, dtype='complex64')
for fn in self.filenames:
geo = fn + '.geometry'
strides, offset, shape = self._parse_geo(geo, 'complex')
d = numpy.fromfile(fn, dtype=self.cdtype)
ind = tuple([slice(x, x+o) for x, o in zip(offset, shape)])
d = numpy.lib.stride_tricks.as_strided(d, shape=shape, strides=strides * self.cdtype.itemsize)
data[ind] = d
return data
def _parse_geo(self, geofn, mode):
if mode == 'real':
strides = numpy.loadtxt(open(geofn).readlines()[3].split()[1:], dtype=int)
offset = numpy.loadtxt(open(geofn).readlines()[1].split()[1:], dtype=int)
shape = numpy.loadtxt(open(geofn).readlines()[2].split()[1:], dtype=int)
elif mode == 'complex':
strides = numpy.loadtxt(open(geofn).readlines()[7].split()[1:], dtype=int)
offset = numpy.loadtxt(open(geofn).readlines()[5].split()[1:], dtype=int)
shape = numpy.loadtxt(open(geofn).readlines()[6].split()[1:], dtype=int)
return strides, offset, shape
def _guess_size(self, mode):
size = None
for fn in self.filenames:
geo = fn + '.geometry'
strides, offset, shape = self._parse_geo(geo, mode)
last = shape + offset
if size is None:
size = last
else:
size = numpy.maximum(size, last)
return size
def power(f1, f2=None, boxsize=1.0, average=True):
""" stupid power spectrum calculator.
f1 f2 must be density fields in configuration or fourier space.
For convenience if f1 is strictly overdensity in fourier space,
(zero mode is zero) the code still works.
Returns k, p or k, p * n, N if average is False
"""
def tocomplex(f1):
if f1.dtype.kind == 'c':
return f1
else:
return numpy.fft.rfftn(f1)
f1 = tocomplex(f1)
if f1[0, 0, 0] != 0.0:
f1 /= abs(f1[0, 0, 0])
if f2 is None:
f2 = f1
if f2 is not f1:
f2 = tocomplex(f2)
if f2[0, 0, 0] != 0.0:
f2 /= abs(f2[0, 0, 0])
def fftk(shape, boxsize):
k = []
for d in range(len(shape)):
kd = numpy.arange(shape[d])
if d != len(shape) - 1:
kd[kd > shape[d] // 2] -= shape[d]
else:
kd = kd[:shape[d]]
kdshape = numpy.ones(len(shape), dtype='int')
kdshape[d] = len(kd)
kd = kd.reshape(kdshape)
k.append(kd)
return k
k = fftk(f1.shape, boxsize)
def find_root(kk):
solution = numpy.int64(numpy.sqrt(kk) - 2).clip(0)
solution[solution < 0] = 0
mask = (solution + 1) ** 2 < kk
while(mask.any()):
solution[mask] += 1
mask = (solution + 1) ** 2 <= kk
return solution
ksum = numpy.zeros(f1.shape[0] //2, 'f8')
wsum = numpy.zeros(f1.shape[0] //2, 'f8')
xsum = numpy.zeros(f1.shape[0] //2, 'f8')
for i in range(f1.shape[0]):
kk = k[0][i] ** 2 + k[1] ** 2 + k[2] ** 2
# remove unused dimension
kk = kk[0]
d = find_root(kk)
w = numpy.ones(d.shape, dtype='f4') * 2
w[..., 0] = 1
w[..., -1] = 1
xw = abs(f1[i] * f2[i].conjugate()) * w
kw = kk ** 0.5 * 2 * numpy.pi / boxsize * w
ksum += numpy.bincount(d.flat, weights=kw.flat, minlength=f1.shape[0])[:f1.shape[0] // 2]
wsum += numpy.bincount(d.flat, weights=w.flat, minlength=f1.shape[0])[:f1.shape[0] // 2]
xsum += numpy.bincount(d.flat, weights=xw.flat, minlength=f1.shape[0])[:f1.shape[0] // 2]
center = ksum / wsum
if not average:
return center, xsum * boxsize**3, wsum
else:
return center, xsum / wsum * boxsize **3
def fftdown(field, size):
""" Down samples a fourier space field. Size can be scalar or vector.
Hermitian should be handled correctly. But I have only tested this
on hermitian compressed fields.
"""
ashape = numpy.array(field.shape)
asize = ashape.copy()
fullaxes = ashape == ashape[0]
asize[:] = size
if
|
numpy.isscalar(size)
|
numpy.isscalar
|
import numpy
from scipy.special import jn
import scipy.linalg
import scipy.optimize
import warnings
class WaveGuide:
r""" This class can be used to decompose in-duct sounds into acoustic modes. It can be used for experimental data
and for numeric data.
"""
def __init__(self, dimensions, **kwargs):
r"""
Parameters for initialization:
Parameters
----------
dimensions : array_like
The dimensions of the waveguide.
- For a circular duct, *dimensions* is (radius, ).
- For a rectangular duct, *dimension* is (dimension x, dimension y).
- For any other shape, *dimensions* can be specified in the same way (dimension 1, dimension 2, ...).
temperature : float, optional
Temperature in Kelvin of the medium inside the waveguide. Defaults to T 293.15.
M : float, optional
Bulk Mach_number in :math:`z_+` direction. Defaults to 0.
flip_flow : boolean, optional
If *True*, it changes the flow-direction from :math:`z_+` to :math:`z_-` direction. A negative Mach
number has the same effect. Defaults to *False*.
damping : {"no", "kirchoff","dokumaci"}, optional
Choose one of the pre-defined acoustic dispersion models. Defaults to "no".
- "no": no predefined dissipation is used. This should be used to implement custom dissipation models.
- "kirchoff": `Kirchoff's thermo-viscous dissipation <https://onlinelibrary.wiley.com/doi/abs/10.1002/andp.18682100602>`_ is used.
- "dokumaci": `Dokumaci's thermo-viscous dissipation <https://www.sciencedirect.com/science/article/pii/S0022460X14004921>`_ is used. Useful for higher order modes and waveguides with flow.
- "stinson": `Stinson's thermo-viscous dissipation <https://asa.scitation.org/doi/10.1121/1.400379>`_ is used.
distance : float
The distance between the decomposition cross section and the first microphone. Defaults to 0.
cross_section : {"circular","rectangular","custom"}, optional
Choose one of the pre-defined duct profiles. Defaults to "circular". If set to "custom" the methods
:meth:`.WaveGuide.get_c`, :meth:`WaveGuide.get_psi` , and :meth:`.WaveGuide.get_wavenumber` should be
customized.
f_max : float, optional
If set, all propagating modes up to the frequency `f_max` [Hz] are pre-computed and decompositions run
faster. Defaults to 1000.
gas_constant: float, optional
The ideal gas constant of the medium inside the waveguide. Defaults to 287.053072.
dynamic_viscosity : float, optional
The dynamic viscosity of the medium inside the waveguide. Defaults to 10.13e-6.
pressure : float, optional
The static pressure of the medium inside the waveguide. Defaults to 101.000.
heat_capacity : float, optional
The heat capacity ratio of the medium inside the waveguide. Defaults to 1.401.
thermal_conductivity : float, optional
The thermal conductivity of the medium inside the waveguide. Defaults to 0.02587.
eigenvalue : functional, optional
Uses an external function to compute the eigenvalues. If not specified, the cross section specific
function is used. If a custom cross section is specified, the eigenvalue defaults to 1.
modeshape : functional, optional
Uses an external function to compute the mode shapes. If not specified, the cross section specific
function is used. If a custom cross section is specified, the mode shapee defaults to 1.
wavenumber : functional, optional
Uses an external function to compute the wavenumber. If not specified, the cross section specific
function is used. If a custom cross section is specified, the wavenumber defaults to the wavenumber
for a circular duct.
normalization : functional, optional
Uses an external function to compute the normalization factor for the mode shapes. If not specified,
the cross section specific function is used. If a custom cross section is specified, the mode shapee
normalization defaults to 1.
"""
# Define parameters of the test domain
self.temperature = kwargs.get("temperature", 293.15)
self.M = kwargs.get("M", 0)
self.flip_flow = 1
if "flip_flow" in kwargs.keys() and kwargs["flip_flow"] == True:
self.flip_flow = -1
# Acoustic dissipation
self.damping = kwargs.get("damping", "no")
# Duct Geometry
self.dimensions = dimensions
self.cross_section = kwargs.get("cross_section", "circular")
self.f_max = kwargs.get("f_max", 100)
# Define the parameter of the air
self.gas_constant = kwargs.get("gas_constant", 287.053072)
self.mu = kwargs.get("dynamic_viscosity", 18.13e-6)
self.p0 = kwargs.get("pressure", 101000)
self.gamma = kwargs.get("heat_capacity", 1.401)
self.kth = kwargs.get("thermal_conductivity", 0.02587)
# Compute specific heat constant
self.Cp = self.gamma / (self.gamma - 1) * self.gas_constant
# Compute density
self.rho = self.p0 / self.gas_constant / self.temperature
# Calculate the speed of sound
self.speed_of_sound = numpy.sqrt(self.gamma * self.gas_constant * self.temperature)
self.microphone_group = [[], []]
# Axial position of the microphones
self.microphone_position = []
# Rectangular duct: Pos1 = x, pos2 = y of the microphone
# Circular duct: Pos1 = angle, pos2 = radial position of the microphone
self.microphone_pos1 = []
self.microphone_pos2 = []
# Allocate the cut on mode to 1, will be recalculated later
self.cuton_mode = 1;
self.ref_angle = 0;
# Allocate the distance between the microphones and the decomposition cross section. Important for loss model.
self.distance = kwargs.get("distance",0)
self.frequency = 0
self._link_functions(**kwargs)
self.get_kappa = numpy.vectorize(self.get_kappa)
# Set the eigenvalues for the first propagating mode (plane wave)
self.kappa = self._init_eigenvalue_matrix(0, 0)
# Set the moce vecotrs and the eigenvalues for all modes that can propagate in the expected frequency range
self.mode_vector, self.kappa = self._init_modes()
def _link_functions(self, **kwargs):
"""
Links the cross section specific and problem specific functions to the class.
Parameters
----------
eigenvalue : functional, optional
Uses an external function to compute the eigenvalues. If not specified, the cross section specific
function is used. If a custom cross section is specified, the eigenvalue defaults to 1.
modeshape : functional, optional
Uses an external function to compute the mode shapes. If not specified, the cross section specific
function is used. If a custom cross section is specified, the mode shapee defaults to 1.
wavenumber : functional, optional
Uses an external function to compute the wavenumber. If not specified, the cross section specific
function is used. If a custom cross section is specified, the wavenumber defaults to the wavenumber
for a circular duct.
normalization : functional, optional
Uses an external function to compute the normalization factor for the mode shapes. If not specified,
the cross section specific function is used. If a custom cross section is specified, the mode shapee
normalization defaults to 1.
"""
damping = None
if self.damping == "kirchoff":
damping = self.K0_kirchoff
if self.damping == "dokumaci":
damping = self.K0_dokumaci
if self.cross_section == "circular":
eigenvalue = kwargs.get("eigenvalue", self.get_eigenvalue_circular)
modeshape = kwargs.get("modeshape", self.get_psi_circular)
wavenumber = kwargs.get("wavenumber", self.get_wavenumber)
mode_norm = kwargs.get("normalization", self.get_c_circular)
if self.damping == "stinson":
damping = self.K0_stinson_circular
elif self.cross_section == "rectangular":
eigenvalue = kwargs.get("eigenvalue", self.get_eigenvalue_rect)
modeshape = kwargs.get("modeshape", self.get_psi_rect)
wavenumber = kwargs.get("wavenumber", self.get_wavenumber)
mode_norm = kwargs.get("normalization", self.get_c_rect)
if self.damping == "stinson":
damping = self.K0_stinson_rect
else:
eigenvalue = kwargs.get("eigenvalue", self.get_eigenvalue)
modeshape = kwargs.get("modeshape", self.get_psi)
wavenumber = kwargs.get("wavenumber", self.get_wavenumber)
mode_norm = kwargs.get("normalization", self.get_c)
if damping is not None:
self.get_K0 = damping
self.get_wavenumber = wavenumber
self.get_eigenvalue = eigenvalue
self.get_psi = modeshape
self.get_c = mode_norm
def _init_eigenvalue_matrix(self, m, n):
"""
Initializes a matrix that contains the eigenvalues for all propagating modes.
Parameters
----------
m : integer
Mode-order in the first direction. If the waveguide is circular, m indicates the circumferential mode-order.
The plane wave has the order 0.
n : integer
Mode-order in the second direction. If the waveguide is circular, n indicates the radial mode-order.
The plane wave has the order 0.
Returns
-------
numpy.ndArray of the dimension m x n that contains the eigenvalues for all modes up to the mode-order (m,n).
"""
ematrix = numpy.zeros((m + 2, n + 2))
for mOrder in range(m + 2):
for nOrder in range(n + 2):
ematrix[mOrder, nOrder] = self.get_eigenvalue(mOrder, nOrder)
return ematrix
def _init_modes(self):
"""
Finds the order of the (m,n)-modes regarding their cut-on frequencies.
Returns
-------
(mode_vector, kappa): tuple
- mode_vector : numpy.ndArray, containing tuples (m,n) of the modes, in the order of their cut-on frequency.
- kappa : numpy.ndArray of the dimension m x n that contains the eigenvalues for all modes up to the mode
order (m,n).
"""
mode_vector = []
# At least the plane wave ( (0,0)- mode ) must be cut-on
maxm = 0
maxn = 0
# Find the first m-mode-order that is cut-off
while (numpy.imag(self.get_wavenumber(maxm, 0, self.f_max, sign=-1, dissipative=False)) == 0 and
numpy.imag(self.get_wavenumber(maxm, 0, self.f_max, sign=+1, dissipative=False)) == 0):
maxm += 1
# Find the first n mode-order that is cut-off
while (numpy.imag(self.get_wavenumber(0, maxn, self.f_max, sign=-1, dissipative=False)) == 0 and
numpy.imag(self.get_wavenumber(0, maxn, self.f_max, sign=+1, dissipative=False)) == 0):
maxn += 1
# Create a matrix that contains all cut-on (and some cut-off) eigenvalues for the modes
ematrix = self._init_eigenvalue_matrix(maxm, maxn)
kappa = numpy.copy(ematrix)
# Iterate through all modes as long as the cut-off mode with the smallest eigenvalue is found
currentm, currentn = numpy.unravel_index(numpy.argmin(ematrix, axis=None), ematrix.shape)
while (numpy.imag(self.get_wavenumber(currentm, currentn, self.f_max, sign=-1, dissipative=False)) == 0 and
numpy.imag(self.get_wavenumber(currentm, currentn, self.f_max, sign=+1, dissipative=False)) == 0):
# If the duct is circular, the mode-order can be positive and negative
if self.cross_section == "circular" and not currentm == 0:
mode_vector.append([-1 * currentm, currentn])
mode_vector.append([currentm, currentn])
ematrix [currentm, currentn] = numpy.Inf
currentm, currentn = numpy.unravel_index(numpy.argmin(ematrix, axis=None), ematrix.shape)
return (numpy.array(mode_vector), kappa)
def get_domainvalues(self):
"""
Returns the characteristic properties of the waveguide and the medium.
Returns
-------
dict
The characteristic properties {"density", "dynamic_viscosity", "specific_heat", "heat_capacity",
"thermal_conductivity", "speed_of_sound", "Mach_number", "radius", "bulk-viscosity"} of the waveguide and
the medium.
"""
return {"density": self.rho, "dynamic_viscosity": self.mu, "specific_heat": self.Cp,
"heat_capacity": self.gamma, "thermal_conductivity": self.kth, "speed_of_sound": self.speed_of_sound,
"Mach_number": self.M * self.flip_flow, "radius": self.get_radius(), "bulk-viscosity": 0.6 * self.mu}
def set_distance(self, d):
"""
Sets the distance between the first microphone and the decomposition cross section.
Parameters
----------
d : float
Distance in [m] between the first microphone and the decomposition cross section.
"""
# Subtract the old distance and add the new distance
self.microphone_position = self.microphone_position + d - self.distance
self.distance = d
def get_radius(self):
"""
Returns the radius or an equivalent measure for the waveguide.
Returns
-------
float
If the waveguide's cross section is "circular", the radius is returned. If the waveguide is "rectangular",
the hydraulic radius is returned. Otherwise, dimension[0] is returned.
"""
if self.cross_section == "circular":
radius = self.dimensions[0]
# If the duct is rectangular, return the hydraulic radius.
elif self.cross_section == "rectangular":
radius = self.dimensions[0] * self.dimensions[1] / (self.dimensions[0] + self.dimensions[1])
else:
radius = self.dimension[0]
return radius
def set_temperature_pressure(self, t=None, p0=None):
"""
Sets the temperature and pressure in the waveguide. Recalculates the speed of sound, the density, and the
Mach_number.
Parameters
----------
t : float
Temperature in Kelvin of the medium inside the waveguide.
p0 : float, optional
Static pressure in Pa of the medium inside the waveguide.
"""
# Update temperature and pressure
if p0 is not None:
self.p0 = p0
if t is not None:
self.temperature = t
# recompute the properties of the medium
speed_of_sound_updated = numpy.sqrt(self.gamma * self.gas_constant * self.temperature)
self.M *= self.speed_of_sound / speed_of_sound_updated
self.rho = self.p0 / self.gas_constant / self.temperature
self.speed_of_sound = speed_of_sound_updated
def set_flip_flow(self, flip_flow):
"""
Set the flow direction. Standard flow direction is in :math:`P_+` direction.
Parameters
----------
flip_flow : bool
If flip_flow is *True*, the flow direction is in :math:`P_-` direction (towards the test component).
"""
if flip_flow:
self.flip_flow = -1
else:
self.flip_flow = 1
def read_microphonefile(self, filename, cylindrical_coordinates=False, **kwargs):
"""
Reads a file that contains the microphone position. The dimensions are [m] or [deg].
The file must have the following structure:
- For Circular duct:
=== === ==
z1 r1 :math:`\Phi` 1
z2 r2 :math:`\Phi` 2
... ... ...
zm rm :math:`\Phi` m
=== === ==
- For other ducts:
=== === ==
z1 x1 :math:`y` m
z2 x2 :math:`y` m
... ... ...
zm xm :math:`y` m
=== === ==
Parameters
----------
filename : str
Full Path to the file that contains the microphone data.
cylindrical_coordinates : bool, optional
If *True* the circumferential position is converted from deg. to radians.
kwargs : additional parameters
Will be passed to `numpy.loadtxt <https://numpy.org/doc/stable/reference/generated/numpy.loadtxt.html>`_\.
Please refer to the numpy documentation for information.
"""
self.microphone_position = numpy.loadtxt(filename, **kwargs)
# Transform deg to radians
if cylindrical_coordinates:
self.microphone_position[:, 2] *= numpy.pi / 180
self.microphone_position[:, 0] += self.distance
def set_microphone_positions(self, posz, pos1, pos2, cylindrical_coordinates = False):
"""
Sets the positions for the pressure probes. The dimensions are [m] or [deg].
Parameters
----------
posz : array_like
Axial positions of the pressure probes.
pos1 : array_like
Position of the pressure probe in the first dimension. For waveguides with circular cross section, this
is the radial position in meters.
pos2 : array_like
Position of the pressure probe in the second dimension. For waveguides with circular cross sections, this
is the circumferential position in deg.
cylindrical_coordinates : bool, optional
If *True* the circumferential position is converted from deg. to radians.
"""
self.microphone_position = numpy.zeros((len(posz), 3))
self.microphone_position[:, 0] = posz
self.microphone_position[:, 1] = pos1
self.microphone_position[:, 2] = pos2
if cylindrical_coordinates:
self.microphone_position[:, 2] *= numpy.pi / 180
self.microphone_position[:, 0] += self.distance
def get_eigenvalue(self, m, n, **kwargs):
"""
Placeholder for the eigenvalue of the (m,n)-mode. When the object is initialized, this function may be
overwritten either by one of the predefined eigenvalue functions :meth:`.WaveGuide.get_eigenvalue_circular`
(circular waveguide) and :meth:`.WaveGuide.get_eigenvalue_rect` (rectangular waveguide),
or a custom function.
Parameters
----------
m : int
Mode-order in the first direction. If the waveguide is circular, m indicates the circumferential mode-order.
The plane wave has the order 0.
n : integer
Mode-order in the second direction. If the waveguide is circular, n indicates the radial mode-order.
The plane wave has the order 0.
kwargs : additional arguments
Returns
-------
complex
The eigenvalue of the (m,n)-mode.
"""
return 1
def get_eigenvalue_circular(self, m, n, **kwargs):
r"""
Returns the eigenvalue of the (m,n)-mode for a circular duct.
.. math::
\kappa_{mn} = \frac{R_n(J'_m)}{r}
where :math:`J'_m` is the derivative of the Bessel function of first kind and order m, and :math:`R_n` is the
n-th zero.
Parameters
----------
m : int
Mode-order in the circumferential direction. The plane wave has the order 0.
n : integer
Mode-order in the radial direction. The plane wave has the order 0.
kwargs : additional arguments
Returns
-------
float or complex
The eigenvalue of the (m,n)-mode, divided by the radius.
"""
# here, we need to correct the fact that the scipy value for the 0-0 value is missing.
bessel_der_zero = scipy.special.jnp_zeros(m, n + 1)
if m == 0:
bessel_der_zero = numpy.append([0], bessel_der_zero)
return bessel_der_zero[n] / self.dimensions[0]
def get_eigenvalue_rect(self, m, n, **kwargs):
r"""
Returns the eigenvalue of the (m,n)-mode for a rectangular duct.
.. math ::
\kappa_{mn} = \pi (\frac{n}{a} + \frac{m}{b})
Parameters
----------
m : int
Mode-order in x-direction. The plane wave has the order 0.
n : integer
Mode-order in y-direction. The plane wave has the order 0.
kwargs : additional arguments
Returns
-------
float or complex
The eigenvalue of the (m,n)-mode.
"""
return numpy.pi * (n / self.dimensions[0] + m / self.dimensions[1])
def get_kappa(self, m, n):
"""
Returns the eigenvalues of the (m,n)-mode. This method picks the eigenvalues from a pre-calculated eigenvalue
matrix, which makes computations in larger data sets more efficient. For the computation of the eigenvalue,
see :meth:`.WaveGuide.get_eigenvalue` .
Parameters
----------
m : int
Mode-order in the circumferential direction. The plane wave has the order 0.
n : integer
Mode-order in the radial direction. The plane wave has the order 0.
Returns
-------
float or complex
The eigenvalue of the (m,n)-mode.
"""
# if kappa was not computed for m and n yet, compute it
m, n = abs(m), abs(n)
if m >= self.kappa.shape[0] or n >= self.kappa.shape[1]:
self.kappa = self._init_eigenvalue_matrix(m, n)
return self.kappa[m, n]
def get_c(self, m, n, **kwargs):
"""
Placeholder for the normalization of the mode shapee of the (m,n)-mode.
When the object is initialized, this function may be overwritten either by one of the predefined
normalization functions :meth:`.WaveGuide.get_c_circular` (circular waveguide) and
:meth:`.WaveGuide.get_c_rect` (rectangular waveguide), or a custom function. The predefined functions
normalize the mode shapes to be orthonormal, i.e.,
.. math ::
\int_{A} \psi_{mn} \psi_{m'n'} dA = \delta_{mm'} \delta_{nn'}
and :math:`\delta` is the Kronecker delta.
Parameters
----------
m : int
Mode-order in the first direction. If the waveguide is circular, m indicates the circumferential mode-order.
The plane wave has the order 0.
n : integer
Mode-order in the second direction. If the waveguide is circular, n indicates the radial mode-order.
The plane wave has the order 0.
kwargs : additional arguments.
Returns
-------
float or complex
The normalization factor of the (m,n)-mode.
"""
return 1
def get_c_rect(self, m, n, **kwargs):
r"""
Return the normalization of the (m,n)-mode for a rectangular duct. Modes are normalized to be orthonormal.
.. math ::
C_{mn} = \frac{\sqrt{a b}}{2}
Parameters
----------
m : int
Mode-order in x-direction. The plane wave has the order 0.
n : integer
Mode-order in y-direction. The plane wave has the order 0.
kwargs : additional arguments
Returns
-------
float or complex
The eigenvalue of the (m,n)-mode.
"""
return numpy.sqrt(self.dimensions[0] * self.dimensions[1]) / 2
def get_c_circular(self, m, n, **kwargs):
"""
Return the normalization of the (m,n)-mode for a circular duct. Modes are normalized to be orthonormal.
.. math ::
C_{mn} = \sqrt{A * (J_m(r\kappa)^2 - J_{m-1}(r\kappa) * J_{m+1}(r\kappa))}
where :math:`J_m` is the Bessel function of 1 Kind and Order *m*, and :math:`\kappa` is the eigenvalue, see
:meth:`.WaveGuide.get_eigenvalue` .
Parameters
----------
m : int
Mode-order in the circumferential direction. The plane wave has the order 0.
n : integer
Mode-order in the radial direction. The plane wave has the order 0.
kwargs : additional arguments
Returns
-------
complex
The normalization-factor of the (m,n)-mode.
"""
k_r = self.dimensions[0] * self.get_kappa(m, n)
A = self.dimensions[0] ** 2 * numpy.pi
return numpy.sqrt(A * (numpy.square(jn(m, k_r)) - jn(m - 1, k_r) * jn(m + 1, k_r)))
def get_psi(self, m, n, pos1, pos2, **kwargs):
"""
Placeholder for the normalized mode shapes of the (m,n)-mode. When the object is initialized, this function
may be overwritten either by one of the predefined normalization functions :meth:`.WaveGuide.get_psi_circular`
(circular waveguide) and :meth:`.WaveGuide.get_psi_rect` (rectangular waveguide), or a custom function.
Parameters
----------
m : int or array_like
Mode-order in the first direction. If the waveguide is circular, m indicates the circumferential mode-order.
The plane wave has the order 0.
n : int or array_like
Mode-order in the second direction. If the waveguide is circular, n indicates the radial mode-order.
The plane wave has the order 0.
pos1 : float or array_like
Position in the first direction.
pos2 : float or array_like
Position in the second direction.
kwargs : additional arguments
Returns
-------
complex
The eigenvalue of the (m,n)-mode.
"""
return numpy.ones((len(pos1),), dtype=numpy.complex)
def get_psi_circular(self, m, n, r, phi, **kwargs):
r"""
Return the normalized mode shapee of the (m,n)-mode for a circular duct. Modes are normalized to be
orthonormal, see :meth:`.WaveGuide.get_c`\.
.. math::
\Psi_{mn} = \frac{J_m(R\kappa_{mn}) \mathbf{e}^{\mathbf{i} m \phi}}{C_{mn}}
where :math:`\kappa` is the eigenvalue, see :meth:`.WaveGuide.get_eigenvalue` .
Parameters
----------
m : int
Mode-order in the circumferential direction. The plane wave has the order 0.
n : integer
Mode-order in the radial direction. The plane wave has the order 0.
r : float or array_like
Radial-coordinate.
phi : float or array_like
Circumferential-coordinate.
kwargs : additional arguments
Returns
-------
complex
The normalized mode shapee of the (m,n)-mode at position (pos1,pos2).
"""
return 1 / self.get_c(m, n) * jn(m, r * self.get_kappa(m, n)) *
|
numpy.exp(m * phi * 1j)
|
numpy.exp
|
import unittest
import os
from numpy import array, array_equal, sin, cos, radians
import pyNastran
from pyNastran.bdf.bdf import BDF, BDFCard, read_bdf, DMI, DMIG, fill_dmigs
from pyNastran.bdf.cards.test.utils import save_load_deck
PKG_PATH = pyNastran.__path__[0]
TEST_PATH = os.path.join(PKG_PATH, 'bdf', 'cards', 'test')
class TestDMIG(unittest.TestCase):
def test_dmig_1(self):
"""Tests DMIG reading"""
model = BDF(debug=False)
bdf_name = os.path.join(TEST_PATH, 'dmig.bdf')
model.read_bdf(bdf_name, xref=False, punch=True)
out = model.dmigs['REALS'].get_matrix(is_sparse=False)
reals_actual, unused_rows_reversed, unused_cols_reversed = out
#print "---reals_actual---\n", reals_actual
#print "---out---\n", out
reals_expected = [
[1.0, 0.5, 0.25],
[0.5, 2.0, 0.75],
[0.25, 0.75, 3.0],
]
a_matrix = model.dmigs['REALS']
assert len(a_matrix.GCi) == 6, 'len(GCi)=%s GCi=%s matrix=\n%s' % (len(a_matrix.GCi), a_matrix.GCi, a_matrix)
assert len(a_matrix.GCj) == 6, 'len(GCj)=%s GCj=%s matrix=\n%s' % (len(a_matrix.GCj), a_matrix.GCj, a_matrix)
self.assertTrue(array_equal(reals_expected, reals_actual))
a_matrix.get_matrix()
def test_dmig_2(self):
model = BDF(debug=False)
bdf_name = os.path.join(TEST_PATH, 'dmig.bdf')
model.read_bdf(bdf_name, xref=False, punch=True)
out = model.dmigs['REAL'].get_matrix(is_sparse=False)
real_actual, unused_rows_reversed, unused_cols_reversed = out
#print "---REAL_actual---\n", REAL_actual
real_expected = [
[1.0, 0.5, 0.25],
[0.0, 2.0, 0.75],
[0.0, 0.0, 3.0],
]
a_matrix = model.dmigs['REAL']
assert len(a_matrix.GCi) == 6, 'len(GCi)=%s GCi=%s matrix=\n%s' % (len(a_matrix.GCi), a_matrix.GCi, a_matrix)
assert len(a_matrix.GCj) == 6, 'len(GCj)=%s GCj=%s matrix=\n%s' % (len(a_matrix.GCj), a_matrix.GCj, a_matrix)
self.assertTrue(array_equal(real_expected, real_actual))
a_matrix.get_matrix()
#model2 = BDF(debug=False)
#bdf_name = os.path.join(TEST_PATH, 'include_dir', 'include.inc')
#model2.read_bdf(bdf_name, xref=False, punch=True)
save_load_deck(model)
def test_dmig_3(self):
model = BDF(debug=False)
bdf_name = os.path.join(TEST_PATH, 'dmig.bdf')
model.read_bdf(bdf_name, xref=False, punch=True)
out = model.dmigs['IMAG'].get_matrix(is_sparse=False)
imag_actual, unused_rows_reversed, unused_cols_reversed = out
#print "---IMAG_actual---\n", IMAG_actual
imag_expected_real = [
[1.0, 0.5, 0.25],
[0.0, 2.0, 0.75],
[0.0, 0.0, 3.0],
]
imag_expected_imag = [
[1.1, 0.51, 0.251],
[0.0, 2.1, 0.751],
[0.0, 0.0, 3.1],
]
a_matrix = model.dmigs['IMAG']
assert len(a_matrix.GCi) == 6, 'len(GCi)=%s GCi=%s matrix=\n%s' % (len(a_matrix.GCi), a_matrix.GCi, a_matrix)
assert len(a_matrix.GCj) == 6, 'len(GCj)=%s GCj=%s matrix=\n%s' % (len(a_matrix.GCj), a_matrix.GCj, a_matrix)
imag_expected = array(imag_expected_real) + array(imag_expected_imag)*1j
self.assertTrue(array_equal(imag_expected, imag_actual))
a_matrix.get_matrix()
save_load_deck(model)
def test_dmig_4(self):
model = BDF(debug=False)
bdf_name = os.path.join(TEST_PATH, 'dmig.bdf')
model.read_bdf(bdf_name, xref=False, punch=True)
out = model.dmigs['IMAGS'].get_matrix(is_sparse=False)
imags_actual, unused_rows_reversed, unused_cols_reversed = out
#print("---imag_actual---\n", imag_actual)
imags_expected_real = [
[1.0, 0.5, 0.25],
[0.5, 2.0, 0.75],
[0.25, 0.75, 3.0],
]
imags_expected_imag = [
[1.1, 0.51, 0.251],
[0.51, 2.1, 0.751],
[0.251, 0.751, 3.1],
]
a_matrix = model.dmigs['IMAGS']
assert len(a_matrix.GCi) == 6, 'len(GCi)=%s GCi=%s matrix=\n%s' % (len(a_matrix.GCi), a_matrix.GCi, a_matrix)
assert len(a_matrix.GCj) == 6, 'len(GCj)=%s GCj=%s matrix=\n%s' % (len(a_matrix.GCj), a_matrix.GCj, a_matrix)
imags_expected = array(imags_expected_real) + array(imags_expected_imag)*1j
msg = '\n%s_actual\n%s\n\n----' % ('IMAGS', imags_actual)
msg += '\n%s_expected\n%s\n----' % ('IMAGS', imags_expected)
msg += '\n%s_delta\n%s\n----' % ('IMAGS', imags_actual-imags_expected)
self.assertTrue(
|
array_equal(imags_expected, imags_actual)
|
numpy.array_equal
|
#%% imports
from nltk.classify.util import accuracy
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import stopwords
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.collections import defaultdict
import nltk
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from hazm import Normalizer, word_tokenize
import pandas as pd
import numpy as np
def clean_persianText(txt):
normalizer = Normalizer()
txt = normalizer.character_refinement(txt)
txt = normalizer.affix_spacing(txt)
txt = normalizer.punctuation_spacing(txt)
txt = txt.replace('.', '')
txt = normalizer.normalize(txt)
return txt
def prepare_data(data):
data = data[['text', 'label']]
lbl = data['label']
data['text'] = data['text'].apply(lambda x: clean_persianText(x))
return data['text'], data['label']
def bag_of_words(words):
return dict([(word, True) for word in words])
def bag_of_words_not_in_set(words, badwords):
return bag_of_words(set(words)-set(badwords))
def bag_of_non_stopwords(words, stopfile="english"):
badwords = stopwords.words(stopfile)
return bag_of_words_not_in_set(words, badwords)
def bag_of_bigrams(words, score_fn=BigramAssocMeasures.chi_sq, n=200):
bcf = BigramCollocationFinder.from_words(words)
bigrams = bcf.nbest(score_fn, n)
return bag_of_words(words+bigrams)
def label_feats_from_corpus(corp, feature_detector=bag_of_words):
label_feats = defaultdict(list)
for label in corp.categories():
for fileid in corp.fileids(categories=[label]):
feats = feature_detector(corp.words(fileids=[fileid]))
label_feats[label].append(feats)
return label_feats
def load_data(file_name='./dataset/fa_2.xlsx'):
data, labels = prepare_data(pd.read_excel(file_name))
unique_labels =
|
np.unique(labels)
|
numpy.unique
|
'''
Direct
------
'''
from re import Pattern
from typing import Collection, Optional, Union
import numpy as np
from anndata import AnnData
import metacells.parameters as pr
import metacells.tools as tl
import metacells.utilities as ut
from .collect import compute_effective_cell_sizes
from .feature import extract_feature_data
__all__ = [
'compute_direct_metacells',
]
@ut.logged()
@ut.timed_call()
@ut.expand_doc()
def compute_direct_metacells( # pylint: disable=too-many-statements,too-many-branches
adata: AnnData,
what: Union[str, ut.Matrix] = '__x__',
*,
feature_downsample_min_samples: int = pr.feature_downsample_min_samples,
feature_downsample_min_cell_quantile: float = pr.feature_downsample_min_cell_quantile,
feature_downsample_max_cell_quantile: float = pr.feature_downsample_max_cell_quantile,
feature_min_gene_total: Optional[int] = pr.feature_min_gene_total,
feature_min_gene_top3: Optional[int] = pr.feature_min_gene_top3,
feature_min_gene_relative_variance: Optional[float] = pr.feature_min_gene_relative_variance,
feature_gene_names: Optional[Collection[str]] = None,
feature_gene_patterns: Optional[Collection[Union[str, Pattern]]] = None,
forbidden_gene_names: Optional[Collection[str]] = None,
forbidden_gene_patterns: Optional[Collection[Union[str, Pattern]]] = None,
cells_similarity_value_normalization: float = pr.cells_similarity_value_normalization,
cells_similarity_log_data: bool = pr.cells_similarity_log_data,
cells_similarity_method: str = pr.cells_similarity_method,
target_metacell_size: float = pr.target_metacell_size,
max_cell_size: Optional[float] = pr.max_cell_size,
max_cell_size_factor: Optional[float] = pr.max_cell_size_factor,
cell_sizes: Optional[Union[str, ut.Vector]] = pr.cell_sizes,
knn_k: Optional[int] = pr.knn_k,
min_knn_k: Optional[int] = pr.min_knn_k,
knn_balanced_ranks_factor: float = pr.knn_balanced_ranks_factor,
knn_incoming_degree_factor: float = pr.knn_incoming_degree_factor,
knn_outgoing_degree_factor: float = pr.knn_outgoing_degree_factor,
candidates_cell_seeds: Optional[Union[str, ut.Vector]] = None,
min_seed_size_quantile: float = pr.min_seed_size_quantile,
max_seed_size_quantile: float = pr.max_seed_size_quantile,
candidates_cooldown_pass: float = pr.cooldown_pass,
candidates_cooldown_node: float = pr.cooldown_node,
candidates_cooldown_phase: float = pr.cooldown_phase,
candidates_min_split_size_factor: Optional[float] = pr.candidates_min_split_size_factor,
candidates_max_merge_size_factor: Optional[float] = pr.candidates_max_merge_size_factor,
candidates_min_metacell_cells: Optional[int] = pr.min_metacell_cells,
must_complete_cover: bool = False,
deviants_min_gene_fold_factor: float = pr.deviants_min_gene_fold_factor,
deviants_max_gene_fraction: Optional[float] = pr.deviants_max_gene_fraction,
deviants_max_cell_fraction: Optional[float] = pr.deviants_max_cell_fraction,
dissolve_min_robust_size_factor: Optional[float] = pr.dissolve_min_robust_size_factor,
dissolve_min_convincing_size_factor: Optional[float] = pr.dissolve_min_convincing_size_factor,
dissolve_min_convincing_gene_fold_factor: float = pr.dissolve_min_convincing_gene_fold_factor,
dissolve_min_metacell_cells: int = pr.dissolve_min_metacell_cells,
random_seed: int = pr.random_seed,
) -> AnnData:
'''
Directly compute metacells using ``what`` (default: {what}) data.
This directly computes the metacells on the whole data. Like any method that directly looks at
the whole data at once, the amount of CPU and memory needed becomes unreasonable when the data
size grows. Above O(10,000) you are much better off using the divide-and-conquer method.
.. note::
The current implementation is naive in that it computes the full dense N^2 correlation
matrix, and only then extracts the sparse graph out of it. We actually need two copies where
each requires 4 bytes per entry, so for O(100,000) cells, we have storage of
O(100,000,000,000). In addition, the implementation is serial for the graph clustering
phases.
It is possible to mitigate this by fusing the correlations phase and the graph generation
phase, parallelizing the result, and also (somehow) parallelizing the graph clustering
phase. This might increase the "reasonable" size for the direct approach to O(100,000).
We have decided not to invest in this direction since it won't allow us to push the size to
O(1,000,000) and above. Instead we provide the divide-and-conquer method, which easily
scales to O(1,000,000) on a single multi-core server, and to "unlimited" size if we further
enhance the implementation to use a distributed compute cluster of such servers.
.. todo::
Should :py:func:`compute_direct_metacells` avoid computing the graph and partition it for a
very small number of cells?
**Input**
The presumably "clean" annotated ``adata``, where the observations are cells and the variables
are genes, where ``what`` is a per-variable-per-observation matrix or the name of a
per-variable-per-observation annotation containing such a matrix.
**Returns**
Sets the following annotations in ``adata``:
Variable (Gene) Annotations
``high_total_gene``
A boolean mask of genes with "high" expression level.
``high_relative_variance_gene``
A boolean mask of genes with "high" normalized variance, relative to other genes with a
similar expression level.
``forbidden_gene``
A boolean mask of genes which are forbidden from being chosen as "feature" genes based
on their name.
``feature_gene``
A boolean mask of the "feature" genes.
``gene_deviant_votes``
The number of cells each gene marked as deviant (if zero, the gene did not mark any cell
as deviant). This will be zero for non-"feature" genes.
Observation (Cell) Annotations
``seed``
The index of the seed metacell each cell was assigned to to. This is ``-1`` for
non-"clean" cells.
``candidate``
The index of the candidate metacell each cell was assigned to to. This is ``-1`` for
non-"clean" cells.
``cell_deviant_votes``
The number of genes that were the reason the cell was marked as deviant (if zero, the
cell is not deviant).
``dissolved``
A boolean mask of the cells contained in a dissolved metacell.
``metacell``
The integer index of the metacell each cell belongs to. The metacells are in no
particular order. Cells with no metacell assignment ("outliers") are given a metacell
index of ``-1``.
``outlier``
A boolean mask of the cells contained in no metacell.
**Computation Parameters**
1. Invoke :py:func:`metacells.pipeline.feature.extract_feature_data` to extract "feature" data
from the clean data, using the
``feature_downsample_min_samples`` (default: {feature_downsample_min_samples}),
``feature_downsample_min_cell_quantile`` (default: {feature_downsample_min_cell_quantile}),
``feature_downsample_max_cell_quantile`` (default: {feature_downsample_max_cell_quantile}),
``feature_min_gene_total`` (default: {feature_min_gene_total}), ``feature_min_gene_top3``
(default: {feature_min_gene_top3}), ``feature_min_gene_relative_variance (default:
{feature_min_gene_relative_variance}), ``feature_gene_names`` (default:
{feature_gene_names}), ``feature_gene_patterns`` (default: {feature_gene_patterns}),
``forbidden_gene_names`` (default: {forbidden_gene_names}), ``forbidden_gene_patterns``
(default: {forbidden_gene_patterns}) and ``random_seed`` (default: {random_seed}) to make
this replicable.
2. Compute the fractions of each variable in each cell, and add the
``cells_similarity_value_normalization`` (default: {cells_similarity_value_normalization}) to
it.
3. If ``cells_similarity_log_data`` (default: {cells_similarity_log_data}), invoke the
:py:func:`metacells.utilities.computation.log_data` function to compute the log (base 2) of
the data.
4. Invoke :py:func:`metacells.tools.similarity.compute_obs_obs_similarity` to compute the
similarity between each pair of cells, using the
``cells_similarity_method`` (default: {cells_similarity_method}).
5. Invoke :py:func:`metacells.pipeline.collect.compute_effective_cell_sizes` using
``max_cell_size`` (default: {max_cell_size}), ``max_cell_size_factor`` (default:
{max_cell_size_factor}) and ``cell_sizes`` (default: {cell_sizes}) to get the effective cell
sizes to use.
5. Invoke :py:func:`metacells.tools.knn_graph.compute_obs_obs_knn_graph` to compute a
K-Nearest-Neighbors graph, using the
``knn_balanced_ranks_factor`` (default: {knn_balanced_ranks_factor}),
``knn_incoming_degree_factor`` (default: {knn_incoming_degree_factor})
and
``knn_outgoing_degree_factor`` (default: {knn_outgoing_degree_factor}).
If ``knn_k`` (default: {knn_k}) is not specified, then it is
chosen to be the median number of cells required to reach the target metacell size,
but at least ``min_knn_k`` (default: {min_knn_k}).
6. Invoke :py:func:`metacells.tools.candidates.compute_candidate_metacells` to compute
the candidate metacells, using the
``candidates_cell_seeds`` (default: {candidates_cell_seeds}),
``min_seed_size_quantile`` (default: {min_seed_size_quantile}),
``max_seed_size_quantile`` (default: {max_seed_size_quantile}),
``candidates_cooldown_pass`` (default: {candidates_cooldown_pass}),
``candidates_cooldown_node`` (default: {candidates_cooldown_node}),
``candidates_cooldown_phase`` (default: {candidates_cooldown_phase}),
``candidates_min_split_size_factor`` (default: {candidates_min_split_size_factor}),
``candidates_max_merge_size_factor`` (default: {candidates_max_merge_size_factor}),
``candidates_min_metacell_cells`` (default: {candidates_min_metacell_cells}),
and
``random_seed`` (default: {random_seed})
to make this replicable. This tries to build metacells of the
``target_metacell_size`` (default: {target_metacell_size})
using the effective cell sizes.
7. Unless ``must_complete_cover`` (default: {must_complete_cover}), invoke
:py:func:`metacells.tools.deviants.find_deviant_cells` to remove deviants from the candidate
metacells, using the
``deviants_min_gene_fold_factor`` (default: {deviants_min_gene_fold_factor}),
``deviants_max_gene_fraction`` (default: {deviants_max_gene_fraction})
and
``deviants_max_cell_fraction`` (default: {deviants_max_cell_fraction}).
8. Unless ``must_complete_cover`` (default: {must_complete_cover}), invoke
:py:func:`metacells.tools.dissolve.dissolve_metacells` to dissolve small unconvincing
metacells, using the same
``target_metacell_size`` (default: {target_metacell_size}),
and the effective cell sizes
and the
``dissolve_min_robust_size_factor`` (default: {dissolve_min_robust_size_factor}),
``dissolve_min_convincing_size_factor`` (default: {dissolve_min_convincing_size_factor}),
``dissolve_min_convincing_gene_fold_factor`` (default: {dissolve_min_convincing_size_factor})
and
``dissolve_min_metacell_cells`` (default: ``dissolve_min_metacell_cells``).
'''
fdata = \
extract_feature_data(adata, what, top_level=False,
downsample_min_samples=feature_downsample_min_samples,
downsample_min_cell_quantile=feature_downsample_min_cell_quantile,
downsample_max_cell_quantile=feature_downsample_max_cell_quantile,
min_gene_relative_variance=feature_min_gene_relative_variance,
min_gene_total=feature_min_gene_total,
min_gene_top3=feature_min_gene_top3,
forced_gene_names=feature_gene_names,
forced_gene_patterns=feature_gene_patterns,
forbidden_gene_names=forbidden_gene_names,
forbidden_gene_patterns=forbidden_gene_patterns,
random_seed=random_seed)
if fdata is None:
raise ValueError('Empty feature data, giving up')
effective_cell_sizes, max_cell_size, _cell_scale_factors = \
compute_effective_cell_sizes(adata,
max_cell_size=max_cell_size,
max_cell_size_factor=max_cell_size_factor,
cell_sizes=cell_sizes)
ut.log_calc('effective_cell_sizes',
effective_cell_sizes, formatter=ut.sizes_description)
if max_cell_size is not None:
if candidates_min_metacell_cells is not None:
target_metacell_size = \
max(target_metacell_size,
max_cell_size * candidates_min_metacell_cells)
if dissolve_min_metacell_cells is not None:
target_metacell_size = \
max(target_metacell_size,
max_cell_size * dissolve_min_metacell_cells)
if candidates_min_metacell_cells is not None \
or dissolve_min_metacell_cells is not None:
ut.log_calc('target_metacell_size', target_metacell_size)
data = ut.get_vo_proper(fdata, 'downsampled', layout='row_major')
data = ut.to_numpy_matrix(data, copy=True)
if cells_similarity_value_normalization > 0:
data += cells_similarity_value_normalization
if cells_similarity_log_data:
data = ut.log_data(data, base=2)
if knn_k is None:
if effective_cell_sizes is None:
median_cell_size = 1.0
else:
median_cell_size = float(np.median(effective_cell_sizes))
knn_k = int(round(target_metacell_size / median_cell_size))
if min_knn_k is not None:
knn_k = max(knn_k, min_knn_k)
if knn_k == 0:
ut.log_calc('knn_k: 0 (too small, try single metacell)')
ut.set_o_data(fdata, 'candidate',
np.full(fdata.n_obs, 0, dtype='int32'),
formatter=lambda _: '* <- 0')
elif knn_k >= fdata.n_obs:
ut.log_calc(f'knn_k: {knn_k} (too large, try single metacell)')
ut.set_o_data(fdata, 'candidate',
|
np.full(fdata.n_obs, 0, dtype='int32')
|
numpy.full
|
from __future__ import print_function, division
import os
import shutil
import tempfile
from astropy.tests.helper import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal_nulp
from .. import Model
from ..image import Image
from ...util.functions import random_id
from .test_helpers import get_test_dust
class TestImageSimpleModel(object):
def setup_class(self):
m = Model()
m.set_cartesian_grid([-1., 1.],
[-1., 1.],
[-1., 1.])
s = m.add_point_source()
s.luminosity = 1.
s.temperature = 6000.
i = m.add_peeled_images(sed=False, image=True)
i.set_viewing_angles([1., 2.], [1., 2.])
i.set_image_limits(-1., 1., -1., 1.)
i.set_image_size(10, 20)
i.set_wavelength_range(5, 0.1, 100.)
i.set_stokes(True)
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=1)
self.tmpdir = tempfile.mkdtemp()
m.write(os.path.join(self.tmpdir, random_id()))
self.m = m.run()
def teardown_class(self):
shutil.rmtree(self.tmpdir)
def test_image_group(self):
wav, nufnu = self.m.get_image(group=0)
def test_image_group_invalid1(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_image(group=-2)
# negative indexing allowed, but only one group present
assert exc.value.args[0] == 'File only contains 1 image/SED group(s)'
def test_image_group_invalid2(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_image(group=1)
# zero-based, and only one group present
assert exc.value.args[0] == 'File only contains 1 image/SED group(s)'
def test_image_dim(self):
wav, nufnu = self.m.get_image()
assert nufnu.shape == (2, 20, 10, 5)
def test_image_dim_incl1(self):
wav, nufnu = self.m.get_image(inclination=0)
assert nufnu.shape == (20, 10, 5)
def test_image_dim_incl2(self):
wav, nufnu = self.m.get_image(inclination=1)
assert nufnu.shape == (20, 10, 5)
def test_image_dim_incl_invalid1(self):
with pytest.raises(IndexError):
wav, nufnu = self.m.get_image(inclination=2)
def test_image_dim_incl_invalid2(self):
with pytest.raises(IndexError):
wav, nufnu = self.m.get_image(inclination=-3)
def test_image_dim_incl_invalid3(self):
with pytest.raises(Exception) as exc:
wav, nufnu = self.m.get_image(inclination=12.3)
assert exc.value.args[0] == "inclination should be an integer (it should be the index of the inclination, not the value itself)"
@pytest.mark.parametrize(('stokes'), ['I', 'Q', 'U', 'V',
'linpol', 'circpol'])
def test_image_stokes(self, stokes):
wav, nufnu = self.m.get_image(stokes=stokes)
assert nufnu.shape == (2, 20, 10, 5)
@pytest.mark.parametrize(('stokes'), ['A', 'b', 1, (3,), # invalid values
'i', 'q', 'u', 'v']) # lowercase
def test_image_stokes_invalid(self, stokes):
with pytest.raises(ValueError):
wav, nufnu = self.m.get_image(stokes=stokes)
@pytest.mark.parametrize(('units'), ['ergs/s'])
def test_image_nodistance_units(self, units):
wav, nufnu = self.m.get_image(units=units)
@pytest.mark.parametrize(('units'), ['ergs/cm^2/s', 'mJy', 'Jy', 'ergs/cm^2/s/Hz', 'MJy/sr'])
def test_image_nodistance_units_invalid(self, units):
with pytest.raises(ValueError):
wav, nufnu = self.m.get_image(units=units)
class TestSEDSimpleModelTrackingDetailed(object):
def setup_class(self):
m = Model()
m.set_cartesian_grid([-1., 1.],
[-1., 1.],
[-1., 1.])
m.add_density_grid(np.array([[[1.e-30]]]), get_test_dust())
s = m.add_point_source()
s.name = 'first'
s.luminosity = 1.
s.temperature = 6000.
s = m.add_point_source()
s.name = 'second'
s.luminosity = 1.
s.temperature = 6000.
i = m.add_peeled_images(sed=False, image=True)
i.set_viewing_angles([1., 2.], [1., 2.])
i.set_image_limits(-1., 1., -1., 1.)
i.set_image_size(10, 20)
i.set_wavelength_range(5, 0.1, 100.)
i.set_track_origin('detailed')
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=1)
self.tmpdir = tempfile.mkdtemp()
m.write(os.path.join(self.tmpdir, random_id()))
self.m = m.run()
def teardown_class(self):
shutil.rmtree(self.tmpdir)
def test_image_source_all(self):
wav, nufnu = self.m.get_image(source_id='all', component='source_emit')
def test_image_source_valid1(self):
wav, nufnu = self.m.get_image(source_id=0, component='source_emit')
def test_image_source_valid2(self):
wav, nufnu = self.m.get_image(source_id=1, component='source_emit')
def test_image_source_valid3(self):
wav, nufnu = self.m.get_image(source_id='first', component='source_emit')
def test_image_source_valid4(self):
wav, nufnu = self.m.get_image(source_id='second', component='source_emit')
def test_image_source_invalid1(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_image(source_id=-1, component='source_emit')
assert exc.value.args[0] == 'source_id should be between 0 and 1'
def test_image_source_invalid2(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_image(source_id=2, component='source_emit')
assert exc.value.args[0] == 'source_id should be between 0 and 1'
def test_image_dust_all(self):
wav, nufnu = self.m.get_image(dust_id='all', component='dust_emit')
def test_image_dust_valid1(self):
wav, nufnu = self.m.get_image(dust_id=0, component='dust_emit')
def test_image_dust_invalid1(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_image(dust_id=-1, component='dust_emit')
assert exc.value.args[0] == 'dust_id should be between 0 and 0'
def test_image_dust_invalid2(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_image(dust_id=1, component='dust_emit')
assert exc.value.args[0] == 'dust_id should be between 0 and 0'
class TestSimpleModelInside(object):
def setup_class(self):
m = Model()
m.set_cartesian_grid([-1., 1.],
[-1., 1.],
[-1., 1.])
s = m.add_point_source()
s.luminosity = 1.
s.temperature = 6000.
i = m.add_peeled_images(sed=False, image=True)
i.set_inside_observer((0., 0., 0.))
i.set_image_limits(1., -1., -1., 1.)
i.set_image_size(10, 20)
i.set_wavelength_range(5, 0.1, 100.)
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=1)
self.tmpdir = tempfile.mkdtemp()
m.write(os.path.join(self.tmpdir, random_id()))
self.m = m.run()
def teardown_class(self):
shutil.rmtree(self.tmpdir)
def test_distance_fail(self):
with pytest.raises(ValueError) as e:
wav, nufnu = self.m.get_image(distance=1.)
assert e.value.args[0] == 'Cannot specify distance for inside observers'
def test_regression_depth_bug(tmpdir):
"""
This is a regression test for issue #21 reported by <NAME>. If multiple
images are requested with different depths, then if a photon did not fall
in a depth interval, it was not included in subsequent image groups
because 'return' was used instead 'cycle'.
"""
m = Model()
m.set_cartesian_grid([-1., 1.],
[-1., 1.],
[-1., 1.])
m.add_density_grid(np.array([[[1.e-30]]]), get_test_dust())
s = m.add_point_source()
s.luminosity = 1.
s.temperature = 6000.
i = m.add_peeled_images(sed=False, image=True)
i.set_viewing_angles([0.], [0.])
i.set_image_limits(-1., 1., -1., 1.)
i.set_image_size(1, 1)
i.set_wavelength_range(1, 0.01, 1000.)
i.set_depth(0.5, 1.0)
i = m.add_peeled_images(sed=False, image=True)
i.set_viewing_angles([0.], [0.])
i.set_image_limits(-1., 1., -1., 1.)
i.set_image_size(1, 1)
i.set_wavelength_range(1, 0.01, 1000.)
i.set_depth(-0.5, 0.5)
i = m.add_peeled_images(sed=False, image=True)
i.set_viewing_angles([0.], [0.])
i.set_image_limits(-1., 1., -1., 1.)
i.set_image_size(1, 1)
i.set_wavelength_range(1, 0.01, 1000.)
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=1)
m.write(tmpdir.join(random_id()).strpath)
mo = m.run(tmpdir.join(random_id()).strpath)
wav, image1 = mo.get_image(group=0)
wav, image2 = mo.get_image(group=1)
wav, image3 = mo.get_image(group=2)
assert image1.sum() == 0.
assert image2.sum() > 0.
assert image3.sum() == image2.sum()
class TestImage(object):
def setup_class(self):
m = Model()
m.set_cartesian_grid([-1., 1.],
[-1., 1.],
[-1., 1.])
s = m.add_point_source()
s.luminosity = 1.
s.temperature = 6000.
i = m.add_peeled_images(sed=False, image=True)
i.set_viewing_angles([1., 2.], [1., 2.])
i.set_image_limits(-1., 2., -3., 4.)
i.set_image_size(10, 20)
i.set_wavelength_range(5, 0.1, 100.)
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=10000)
self.tmpdir = tempfile.mkdtemp()
m.write(os.path.join(self.tmpdir, random_id()))
self.m = m.run()
def teardown_class(self):
shutil.rmtree(self.tmpdir)
def test_get_image_object(self):
image = self.m.get_image(group=0)
assert isinstance(image, Image)
def test_image_attributes_no_distance(self):
image = self.m.get_image(group=0, units='ergs/s')
assert image.x_min == -1.
assert image.x_max == +2.
assert image.y_min == -3.
assert image.y_max == +4.
assert image.lon_min is None
assert image.lon_max is None
assert image.lat_min is None
assert image.lat_max is None
assert image.pix_area_sr is None
assert image.distance is None
assert not image.inside_observer
assert image.units == 'ergs/s'
assert image.nu.shape == (5,)
assert image.wav.shape == (5,)
assert image.val.shape == (2, 20, 10, 5)
def test_image_attributes_distance(self):
image = self.m.get_image(group=0, units='ergs/cm^2/s', distance=100.)
assert image.x_min == -1.
assert image.x_max == +2.
assert image.y_min == -3.
assert image.y_max == +4.
lon_min = np.degrees(np.arctan(-1. / 100.))
lon_max = np.degrees(np.arctan(+2. / 100.))
lat_min = np.degrees(np.arctan(-3. / 100.))
lat_max = np.degrees(np.arctan(+4. / 100.))
assert_array_almost_equal_nulp(image.lon_min, lon_min, 5)
assert_array_almost_equal_nulp(image.lon_max, lon_max, 5)
assert_array_almost_equal_nulp(image.lat_min, lat_min, 5)
assert_array_almost_equal_nulp(image.lat_max, lat_max, 5)
pix_area_sr = np.radians(lon_max - lon_min) * np.radians(lat_max - lat_min) / 200
assert_array_almost_equal_nulp(image.pix_area_sr, pix_area_sr, 5)
assert image.distance == 100.
assert not image.inside_observer
assert image.units == 'ergs/cm^2/s'
assert image.nu.shape == (5,)
assert image.wav.shape == (5,)
assert image.val.shape == (2, 20, 10, 5)
def test_unit_conversion(self):
# Assume that the initial scaling in ergs/cm^2/s is correct, so then
# we just need to check the relative scaling.
ref = self.m.get_image(group=0, units='ergs/cm^2/s', distance=100., inclination=1)
# Make sure the flux is non-zero
assert
|
np.sum(ref.val)
|
numpy.sum
|
import pytest
import pyCGM_Single.pycgmStatic as pycgmStatic
import numpy as np
from mock import patch
rounding_precision = 8
class TestPycgmStaticAxis():
"""
This class tests the axis functions in pycgmStatic.py:
staticCalculationHead
pelvisJointCenter
hipJointCenter
hipAxisCenter
kneeJointCenter
ankleJointCenter
footJointCenter
headJC
uncorrect_footaxis
rotaxis_footflat
rotaxis_nonfootflat
findJointC
"""
nan_3d = [np.nan, np.nan, np.nan]
rand_coor = [np.random.randint(0, 10), np.random.randint(0, 10), np.random.randint(0, 10)]
@pytest.mark.parametrize(["head", "expected"], [
# Test from running sample data
([[[244.87227957886893, 326.0240255639856, 1730.4189843948805],
[243.89575702706503, 325.0366593474616, 1730.1515677531293],
[244.89086730509763, 324.80072493605866, 1731.1283433097797]],
[244.89547729492188, 325.0578918457031, 1730.1619873046875]],
0.25992807335420975),
# Test with zeros for all params
([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [0, 0, 0]],
np.nan),
# Testing when values are added to head[0][0]
([[[-1, 8, 9], [0, 0, 0], [0, 0, 0]], [0, 0, 0]],
1.5707963267948966),
# Testing when values are added to head[0][1]
([[[0, 0, 0], [7, 5, 7], [0, 0, 0]], [0, 0, 0]],
np.nan),
# Testing when values are added to head[0][2]
([[[0, 0, 0], [0, 0, 0], [3, -6, -2]], [0, 0, 0]],
0.0),
# Testing when values are added to head[0]
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [0, 0, 0]],
-1.3521273809209546),
# Testing when values are added to head[1]
([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [-4, 7, 8]],
0.7853981633974483),
# Testing when values are added to head
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [-4, 7, 8]],
-0.09966865249116204),
# Testing that when head is composed of lists of ints
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [-4, 7, 8]],
-0.09966865249116204),
# Testing that when head is composed of numpy arrays of ints
([np.array([[-1, 8, 9], [7, 5, 7], [3, -6, -2]], dtype='int'), np.array([-4, 7, 8], dtype='int')],
-0.09966865249116204),
# Testing that when head is composed of lists of floats
([[[-1.0, 8.0, 9.0], [7.0, 5.0, 7.0], [3.0, -6.0, -2.0]], [-4.0, 7.0, 8.0]],
-0.09966865249116204),
# Testing that when head is composed of numpy arrays of floats
([np.array([[-1.0, 8.0, 9.0], [7.0, 5.0, 7.0], [3.0, -6.0, -2.0]], dtype='float'), np.array([-4.0, 7.0, 8.0], dtype='float')],
-0.09966865249116204)])
def test_staticCalculationHead(self, head, expected):
"""
This test provides coverage of the staticCalculationHead function in pycgmStatic.py, defined as staticCalculationHead(frame, head)
This test takes 2 parameters:
head: array containing the head axis and head origin
expected: the expected result from calling staticCalculationHead on head
This function first calculates the x, y, z axes of the head by subtracting the given head axes by the head
origin. It then calls headoffCalc on this head axis and a global axis to find the head offset angles.
This test ensures that:
- the head axis and the head origin both have an effect on the final offset angle
- the resulting output is correct when head is composed of lists of ints, numpy arrays of ints, lists of
floats, and numpy arrays of floats.
"""
result = pycgmStatic.staticCalculationHead(None, head)
np.testing.assert_almost_equal(result, expected, rounding_precision)
@pytest.mark.parametrize(["frame", "expected"], [
# Test from running sample data
({'RASI': np.array([357.90066528, 377.69210815, 1034.97253418]),
'LASI': np.array([145.31594849, 405.79052734, 1030.81445312]),
'RPSI': np.array([274.00466919, 205.64402771, 1051.76452637]),
'LPSI': np.array([189.15231323, 214.86122131, 1052.73486328])},
[np.array([251.60830688, 391.74131775, 1032.89349365]),
np.array([[251.74063624, 392.72694721, 1032.78850073], [250.61711554, 391.87232862, 1032.8741063], [251.60295336, 391.84795134, 1033.88777762]]),
np.array([231.57849121, 210.25262451, 1052.24969482])]),
# Test with zeros for all params
({'SACR': np.array([0, 0, 0]), 'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]),
'RPSI': np.array([0, 0, 0]), 'LPSI': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([0, 0, 0])]),
# Testing when adding values to frame['RASI'] and frame['LASI']
({'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]), 'RPSI': np.array([0, 0, 0]),
'LPSI': np.array([0, 0, 0])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-7.44458106, -1.48072284, 2.32771179], [-6.56593805, -2.48907071, 1.86812391], [-6.17841206, -1.64617634, 2.93552855]]),
np.array([0, 0, 0])]),
# Testing when adding values to frame['RPSI'] and frame['LPSI']
({'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]), 'RPSI': np.array([1, 0, -4]),
'LPSI': np.array([7, -2, 2])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([4., -1.0, -1.0])]),
# Testing when adding values to frame['SACR']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]),
'RPSI': np.array([0, 0, 0]), 'LPSI': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([-4, 8, -5, ])]),
# Testing when adding values to frame['RASI'], frame['LASI'], frame['RPSI'] and frame['LPSI']
({'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]), 'RPSI': np.array([1, 0, -4]),
'LPSI': np.array([7, -2, 2])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-7.45825845, -1.47407957, 2.28472598], [-6.56593805, -2.48907071, 1.86812391], [-6.22180416, -1.64514566, 2.9494945]]),
np.array([4.0, -1.0, -1.0])]),
# Testing when adding values to frame['SACR'], frame['RASI'] and frame['LASI']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]),
'RPSI': np.array([0, 0, 0]), 'LPSI': np.array([0, 0, 0])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing when adding values to frame['SACR'], frame['RPSI'] and frame['LPSI']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]),
'RPSI': np.array([1, 0, -4]), 'LPSI': np.array([7, -2, 2])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([-4, 8, -5])]),
# Testing when adding values to frame['SACR'], frame['RASI'], frame['LASI'], frame['RPSI'] and frame['LPSI']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]),
'RPSI': np.array([1, 0, -4]), 'LPSI': np.array([7, -2, 2])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed of lists of ints
({'SACR': [-4, 8, -5], 'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]), 'RPSI': [1, 0, -4],
'LPSI': [7, -2, 2]},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed ofe numpy arrays of ints
({'SACR': np.array([-4, 8, -5], dtype='int'), 'RASI': np.array([-6, 6, 3], dtype='int'),
'LASI': np.array([-7, -9, 1], dtype='int'), 'RPSI': np.array([1, 0, -4], dtype='int'),
'LPSI': np.array([7, -2, 2], dtype='int')},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed of lists of floats
({'SACR': [-4.0, 8.0, -5.0], 'RASI': np.array([-6.0, 6.0, 3.0]), 'LASI': np.array([-7.0, -9.0, 1.0]),
'RPSI': [1.0, 0.0, -4.0], 'LPSI': [7.0, -2.0, 2.0]},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed of numpy arrays of floats
({'SACR': np.array([-4.0, 8.0, -5.0], dtype='float'), 'RASI': np.array([-6.0, 6.0, 3.0], dtype='float'),
'LASI': np.array([-7.0, -9.0, 1.0], dtype='float'), 'RPSI': np.array([1.0, 0.0, -4.0], dtype='float'),
'LPSI': np.array([7.0, -2.0, 2.0], dtype='float')},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])])])
def test_pelvisJointCenter(self, frame, expected):
"""
This test provides coverage of the pelvisJointCenter function in pycgmStatic.py, defined as pelvisJointCenter(frame)
This test takes 2 parameters:
frame: dictionary of marker lists
expected: the expected result from calling pelvisJointCenter on frame
This test is checking to make sure the pelvis joint center and axis are calculated correctly given the input
parameters. The test checks to see that the correct values in expected are updated per each input parameter added:
When values are added to frame['RASI'] and frame['LASI'], expected[0] and expected[1] should be updated
When values are added to frame['RPSI'] and frame['LPSI'], expected[2] should be updated
When values are added to frame['SACR'], expected[2] should be updated, and expected[1] should also be updated
if there are values for frame['RASI'] and frame['LASI']
Values produced from frame['SACR'] takes precedent over frame['RPSI'] and frame['LPSI']
If RPSI and LPSI are given, then the sacrum will be the midpoint of those two markers. If they are not given then the sacrum is already calculated / specified.
The origin of the pelvis is midpoint of the RASI and LASI markers.
The axis of the pelvis is calculated using LASI, RASI, origin, and sacrum in the Gram-Schmidt orthogonalization procedure (ref. Kadaba 1990).
Lastly, it checks that the resulting output is correct when frame is composed of lists of ints, numpy arrays of
ints, lists of floats, and numpy arrays of floats. frame['LASI'] and frame['RASI'] were kept as numpy arrays
every time as list would cause an error in the following line of pycgmStatic.py as lists cannot be divided by floats:
origin = (RASI+LASI)/2.0
"""
result = pycgmStatic.pelvisJointCenter(frame)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
np.testing.assert_almost_equal(result[2], expected[2], rounding_precision)
@pytest.mark.parametrize(["pel_origin", "pel_x", "pel_y", "pel_z", "vsk", "expected"], [
# Test from running sample data
([251.608306884766, 391.741317749023, 1032.893493652344], [251.740636241119, 392.726947206848, 1032.788500732036], [250.617115540376, 391.872328624646, 1032.874106304030], [251.602953357582, 391.847951338178, 1033.887777624562],
{'MeanLegLength': 940.0, 'R_AsisToTrocanterMeasure': 72.512, 'L_AsisToTrocanterMeasure': 72.512, 'InterAsisDistance': 215.908996582031},
[[182.57097863, 339.43231855, 935.52900126], [308.38050472, 322.80342417, 937.98979061]]),
# Basic test with zeros for all params
([0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[0, 0, 0], [0, 0, 0]]),
# Testing when values are added to pel_origin
([1, 0, -3], [0, 0, 0], [0, 0, 0], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[-6.1387721, 0, 18.4163163], [8.53165418, 0, -25.59496255]]),
# Testing when values are added to pel_x
([0, 0, 0], [-5, -3, -6], [0, 0, 0], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[54.02442793, 32.41465676, 64.82931352], [54.02442793, 32.41465676, 64.82931352]]),
# Testing when values are added to pel_y
([0, 0, 0], [0, 0, 0], [4, -1, 2], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[29.34085257, -7.33521314, 14.67042628], [-29.34085257, 7.33521314, -14.67042628]]),
# Testing when values are added to pel_z
([0, 0, 0], [0, 0, 0], [0, 0, 0], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[31.82533363, 84.86755635, 21.21688909], [31.82533363, 84.86755635, 21.21688909]]),
# Test when values are added to pel_x, pel_y, and pel_z
([0, 0, 0], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[115.19061413, 109.94699997, 100.71662889], [56.508909 , 124.61742625, 71.37577632]]),
# Test when values are added to pel_origin, pel_x, pel_y, and pel_z
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[109.05184203, 109.94699997, 119.13294518], [65.04056318, 124.61742625, 45.78081377]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[MeanLegLength]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[100.88576753, 97.85280235, 106.39612748], [61.83654463, 110.86920998, 41.31408931]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[R_AsisToTrocanterMeasure]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[109.05184203, 109.94699997, 119.13294518], [-57.09307697, 115.44008189, 14.36512267]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[L_AsisToTrocanterMeasure]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0-7.0, 'InterAsisDistance': 0.0},
[[73.42953032, 107.27027453, 109.97003528], [65.04056318, 124.61742625, 45.78081377]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[InterAsisDistance]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 11.0},
[[125.55184203, 104.44699997, 146.63294518], [48.54056318, 130.11742625, 18.28081377]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and all values in vsk
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': -7.0, 'InterAsisDistance': 11.0},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are lists of ints and vsk values are ints
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 15, 'R_AsisToTrocanterMeasure': -24, 'L_AsisToTrocanterMeasure': -7, 'InterAsisDistance': 11},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are numpy arrays of ints and vsk values are ints
(np.array([1, 0, -3], dtype='int'), np.array([-5, -3, -6], dtype='int'), np.array([4, -1, 2], dtype='int'),
np.array([3, 8, 2], dtype='int'),
{'MeanLegLength': 15, 'R_AsisToTrocanterMeasure': -24, 'L_AsisToTrocanterMeasure': -7, 'InterAsisDistance': 11},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are lists of floats and vsk values are floats
([1.0, 0.0, -3.0], [-5.0, -3.0, -6.0], [4.0, -1.0, 2.0], [3.0, 8.0, 2.0],
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': -7.0, 'InterAsisDistance': 11.0},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are numpy arrays of floats and vsk values are floats
(np.array([1.0, 0.0, -3.0], dtype='float'), np.array([-5.0, -3.0, -6.0], dtype='float'),
np.array([4.0, -1.0, 2.0], dtype='float'), np.array([3.0, 8.0, 2.0], dtype='float'),
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': -7.0, 'InterAsisDistance': 11},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]])])
def test_hipJointCenter(self, pel_origin, pel_x, pel_y, pel_z, vsk, expected):
"""
This test provides coverage of the hipJointCenter function in pycgmStatic.py, defined as hipJointCenter(frame, pel_origin, pel_x, pel_y, pel_z, vsk)
This test takes 6 parameters:
pel_origin: array of x,y,z position of origin of the pelvis
pel_x: array of x,y,z position of x-axis of the pelvis
pel_y: array of x,y,z position of y-axis of the pelvis
pel_z: array of x,y,z position of z-axis of the pelvis
vsk: dictionary containing subject measurements from a VSK file
expected: the expected result from calling hipJointCenter on pel_origin, pel_x, pel_y, pel_z, and vsk
This test is checking to make sure the hip joint center is calculated correctly given the input parameters.
The test checks to see that the correct values in expected are updated per each input parameter added. Any
parameter that is added should change the value of every value in expected.
The hip joint center axis and origin are calculated using the Hip Joint Center Calculation (ref. Davis_1991).
Lastly, it checks that the resulting output is correct when pel_origin, pel_x, pel_y, and pel_z are composed of
lists of ints, numpy arrays of ints, lists of floats, and numpy arrays of floats and vsk values are ints or floats.
"""
result = pycgmStatic.hipJointCenter(None, pel_origin, pel_x, pel_y, pel_z, vsk)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
@pytest.mark.parametrize(["l_hip_jc", "r_hip_jc", "pelvis_axis", "expected"], [
# Test from running sample data
([182.57097863, 339.43231855, 935.52900126], [308.38050472, 322.80342417, 937.98979061],
[np.array([251.60830688, 391.74131775, 1032.89349365]), np.array([[251.74063624, 392.72694721, 1032.78850073], [250.61711554, 391.87232862, 1032.8741063], [251.60295336, 391.84795134, 1033.88777762]]), np.array([231.57849121, 210.25262451, 1052.24969482])],
[[245.47574167208043, 331.1178713574418, 936.7593959314677], [[245.60807102843359, 332.10350081526684, 936.6544030111602], [244.48455032769033, 331.2488822330648, 936.7400085831541], [245.47038814489719, 331.22450494659665, 937.7536799036861]]]),
# Basic test with zeros for all params
([0, 0, 0], [0, 0, 0],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[0, 0, 0], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]]),
# Testing when values are added to l_hip_jc
([1, -3, 2], [0, 0, 0],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[0.5, -1.5, 1], [[0.5, -1.5, 1], [0.5, -1.5, 1], [0.5, -1.5, 1]]]),
# Testing when values are added to r_hip_jc
([0, 0, 0], [-8, 1, 4],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[-4, 0.5, 2], [[-4, 0.5, 2], [-4, 0.5, 2], [-4, 0.5, 2]]]),
# Testing when values are added to l_hip_jc and r_hip_jc
([8, -3, 7], [5, -2, -1],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[6.5, -2.5, 3], [[6.5, -2.5, 3], [6.5, -2.5, 3], [6.5, -2.5, 3]]]),
# Testing when values are added to pelvis_axis[0]
([0, 0, 0], [0, 0, 0],
[np.array([1, -3, 6]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[0, 0, 0], [[-1, 3, -6], [-1, 3, -6], [-1, 3, -6]]]),
# Testing when values are added to pelvis_axis[1]
([0, 0, 0], [0, 0, 0],
[np.array([0, 0, 0]), np.array([[1, 0, 5], [-2, -7, -3], [9, -2, 7]]), np.array(rand_coor)],
[[0, 0, 0], [[1, 0, 5], [-2, -7, -3], [9, -2, 7]]]),
# Testing when values are added to pelvis_axis[0] and pelvis_axis[1]
([0, 0, 0], [0, 0, 0],
[np.array([-3, 0, 5]), np.array([[-4, 5, -2], [0, 0, 0], [8, 5, -1]]), np.array(rand_coor)],
[[0, 0, 0], [[-1, 5, -7], [3, 0, -5], [11, 5, -6]]]),
# Testing when values are added to all params
([-5, 3, 8], [-3, -7, -1],
[np.array([6, 3, 9]), np.array([[5, 4, -2], [0, 0, 0], [7, 2, 3]]), np.array(rand_coor)],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of lists of ints
([-5, 3, 8], [-3, -7, -1],
[[6, 3, 9], [[5, 4, -2], [0, 0, 0], [7, 2, 3]], rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of numpy arrays of ints
(np.array([-5, 3, 8], dtype='int'), np.array([-3, -7, -1], dtype='int'),
[np.array([6, 3, 9], dtype='int'), np.array([[5, 4, -2], [0, 0, 0], [7, 2, 3]], dtype='int'), rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of lists of floats
([-5.0, 3.0, 8.0], [-3.0, -7.0, -1.0],
[[6.0, 3.0, 9.0], [[5.0, 4.0, -2.0], [0.0, 0.0, 0.0], [7.0, 2.0, 3.0]], rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of numpy arrays of floats
(np.array([-5.0, 3.0, 8.0], dtype='float'), np.array([-3.0, -7.0, -1.0], dtype='float'),
[np.array([6.0, 3.0, 9.0], dtype='float'),
np.array([[5.0, 4.0, -2.0], [0.0, 0.0, 0.0], [7.0, 2.0, 3.0]], dtype='float'), rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]])])
def test_hipAxisCenter(self, l_hip_jc, r_hip_jc, pelvis_axis, expected):
"""
This test provides coverage of the hipAxisCenter function in pycgmStatic.py, defined as hipAxisCenter(l_hip_jc, r_hip_jc, pelvis_axis)
This test takes 4 parameters:
l_hip_jc: array of left hip joint center x,y,z position
r_hip_jc: array of right hip joint center x,y,z position
pelvis_axis: array of pelvis origin and axis
expected: the expected result from calling hipAxisCenter on l_hip_jc, r_hip_jc, and pelvis_axis
This test is checking to make sure the hip axis center is calculated correctly given the input parameters.
The test checks to see that the correct values in expected are updated per each input parameter added:
When values are added to l_hip_jc or r_hip_jc, every value in expected should be updated
When values are added to pelvis_axis, expected[1] should be updated
The hip axis center is calculated using the midpoint of the right and left hip joint centers.
Then, the given pelvis_axis variable is converted into x,y,z axis format.
The pelvis axis is then translated to the shared hip center by calculating the sum of:
pelvis_axis axis component + hip_axis_center axis component
Lastly, it checks that the resulting output is correct when l_hip_jc, r_hip_jc, and pelvis_axis are composed of
lists of ints, numpy arrays of ints, lists of floats, and numpy arrays of floats.
"""
result = pycgmStatic.hipAxisCenter(l_hip_jc, r_hip_jc, pelvis_axis)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
@pytest.mark.parametrize(["frame", "hip_JC", "vsk", "mockReturnVal", "expectedMockArgs", "expected"], [
# Test from running sample data
({'RTHI': np.array([426.50338745, 262.65310669, 673.66247559]),
'LTHI': np.array([51.93867874, 320.01849365, 723.03186035]),
'RKNE': np.array([416.98687744, 266.22558594, 524.04089355]),
'LKNE': np.array([84.62355804, 286.69122314, 529.39819336])},
[[182.57097863, 339.43231855, 935.52900126], [308.38050472, 322.80342417, 937.98979061]],
{'RightKneeWidth': 105.0, 'LeftKneeWidth': 105.0},
[np.array([364.17774614, 292.17051722, 515.19181496]), np.array([143.55478579, 279.90370346, 524.78408753])],
[[[426.50338745, 262.65310669, 673.66247559], [308.38050472, 322.80342417, 937.98979061], [416.98687744, 266.22558594, 524.04089355], 59.5],
[[51.93867874, 320.01849365, 723.03186035], [182.57097863, 339.43231855, 935.52900126], [84.62355804, 286.69122314, 529.39819336], 59.5]],
[np.array([364.17774614, 292.17051722, 515.19181496]),
np.array([143.55478579, 279.90370346, 524.78408753]),
np.array([[[364.61959153, 293.06758353, 515.18513093], [363.29019771, 292.60656648, 515.04309095], [364.04724541, 292.24216264, 516.18067112]],
[[143.65611282, 280.88685896, 524.63197541], [142.56434499, 280.01777943, 524.86163553], [143.64837987, 280.04650381, 525.76940383]]])]),
# Test with zeros for all params
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0], [[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing when values are added to frame
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[1, 2, 4], [0, 0, 0], [8, -4, 5], 7.0], [[-1, 0, 8], [0, 0, 0], [8, -8, 5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing when values are added to hip_JC
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [1, -9, 2], [0, 0, 0], 7.0], [[0, 0, 0], [-8, 8, -2], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, [0.10783277, -0.97049496, 0.21566555]],
[nan_3d, nan_3d, [-0.69631062, 0.69631062, -0.17407766]]])]),
# Testing when values are added to vsk
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 11.5], [[0, 0, 0], [0, 0, 0], [0, 0, 0], 4.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing when values are added to mockReturnVal
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0], [[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[nan_3d, nan_3d, [-4.56314797, -4.56314797, -8.21366635]],
[nan_3d, nan_3d, [2.64143142, -5.28286283, -4.4023857]]])]),
# Testing when values are added to frame and hip_JC
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 7.0], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[[-0.0512465, -0.22206816, -0.97368348], [0.99284736, 0.09394289, -0.07368069], [0.10783277, -0.97049496, 0.21566555]],
[[-0.68318699, -0.71734633, -0.1366374 ], [-0.22001604, 0.02378552, 0.97520623], [-0.69631062, 0.69631062, -0.17407766]]])]),
# Testing when values are added to frame, hip_JC, and vsk
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]),
'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[[-0.0512465, -0.22206816, -0.97368348], [0.99284736 ,0.09394289, -0.07368069], [0.10783277, -0.97049496, 0.21566555]],
[[-0.68318699, -0.71734633, -0.1366374 ], [-0.22001604, 0.02378552, 0.97520623], [-0.69631062, 0.69631062, -0.17407766]]])]),
# Testing when values are added to frame, hip_JC, vsk, and mockReturnVal
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of lists of ints and vsk values are ints
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 9, 'LeftKneeWidth': -6},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of numpy arrays of ints and vsk values are ints
({'RTHI': np.array([1, 2, 4], dtype='int'), 'LTHI': np.array([-1, 0, 8], dtype='int'),
'RKNE': np.array([8, -4, 5], dtype='int'), 'LKNE': np.array([8, -8, 5], dtype='int')},
np.array([[-8, 8, -2], [1, -9, 2]], dtype='int'),
{'RightKneeWidth': 9, 'LeftKneeWidth': -6},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of lists of floats and vsk values are floats
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8.0, 8.0, -2.0], [1.0, -9.0, 2.0]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of numpy arrays of floats and vsk values are floats
({'RTHI': np.array([1.0, 2.0, 4.0], dtype='float'), 'LTHI': np.array([-1.0, 0.0, 8.0], dtype='float'),
'RKNE': np.array([8.0, -4.0, 5.0], dtype='float'), 'LKNE': np.array([8.0, -8.0, 5.0], dtype='float')},
np.array([[-8.0, 8.0, -2.0], [1.0, -9.0, 2.0]], dtype='int'),
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])])])
def test_kneeJointCenter(self, frame, hip_JC, vsk, mockReturnVal, expectedMockArgs, expected):
"""
This test provides coverage of the kneeJointCenter function in pycgmStatic.py, defined as kneeJointCenter(frame, hip_JC, delta, vsk)
This test takes 6 parameters:
frame: dictionary of marker lists
hip_JC: array of hip_JC containing the x,y,z axes marker positions of the hip joint center
vsk: dictionary containing subject measurements from a VSK file
mockReturnVal: the value to be returned by the mock for findJointC
expectedMockArgs: the expected arguments used to call the mocked function, findJointC
expected: the expected result from calling kneeJointCenter on frame, hip_JC, vsk, and mockReturnVal
This test is checking to make sure the knee joint center and axis are calculated correctly given the input
parameters. This tests mocks findJointC to make sure the correct parameters are being passed into it given the
parameters passed into kneeJointCenter, and to also ensure that kneeJointCenter returns the correct value considering
the return value of findJointC, mockReturnVal.
For each direction (L or R) D, the D knee joint center is calculated using DTHI, D hip joint center, and
DKNE in the Rodriques' rotation formula. The knee width for each knee is applied after the rotation in the formula as well.
Each knee joint center and the RKNE / LKNE markers are used in the Knee Axis Calculation
(ref. Clinical Gait Analysis hand book, Baker2013) calculation formula.
Lastly, it checks that the resulting output is correct when hip_JC is composed of lists of ints, numpy arrays of
ints, lists of floats, and numpy arrays of floats and vsk values are ints and floats. The values in frame were
kept as numpy arrays as lists would cause an error in the following lines of pycgmStatic.py as lists cannot
be subtracted
by each other:
thi_kne_R = RTHI-RKNE
thi_kne_L = LTHI-LKNE
"""
with patch.object(pycgmStatic, 'findJointC', side_effect=mockReturnVal) as mock_findJointC:
result = pycgmStatic.kneeJointCenter(frame, hip_JC, None, vsk)
# Asserting that there were only 2 calls to findJointC
np.testing.assert_equal(mock_findJointC.call_count, 2)
# Asserting that the correct params were sent in the 1st (right) call to findJointC
np.testing.assert_almost_equal(expectedMockArgs[0][0], mock_findJointC.call_args_list[0][0][0], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][1], mock_findJointC.call_args_list[0][0][1], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][2], mock_findJointC.call_args_list[0][0][2], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][3], mock_findJointC.call_args_list[0][0][3], rounding_precision)
# Asserting that the correct params were sent in the 2nd (left) call to findJointC
np.testing.assert_almost_equal(expectedMockArgs[1][0], mock_findJointC.call_args_list[1][0][0], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][1], mock_findJointC.call_args_list[1][0][1], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][2], mock_findJointC.call_args_list[1][0][2], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][3], mock_findJointC.call_args_list[1][0][3], rounding_precision)
# Asserting that findShoulderJC returned the correct result given the return value given by mocked findJointC
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
np.testing.assert_almost_equal(result[2], expected[2], rounding_precision)
@pytest.mark.parametrize(["frame", "knee_JC", "vsk", "mockReturnVal", "expectedMockArgs", "expected"], [
# Test from running sample data
({'RTIB': np.array([433.97537231, 211.93408203, 273.3008728 ]), 'LTIB': np.array([50.04016495, 235.90718079, 364.32226562]),
'RANK': np.array([422.77005005, 217.74053955, 92.86152649]), 'LANK': np.array([58.57380676, 208.54806519, 86.16953278])},
[np.array([364.17774614, 292.17051722, 515.19181496]), np.array([143.55478579, 279.90370346, 524.78408753]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 70.0, 'LeftAnkleWidth': 70.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([393.76181608, 247.67829633, 87.73775041]), np.array([98.74901939, 219.46930221, 80.6306816])],
[[[433.97537231, 211.93408203, 273.3008728 ], [364.17774614, 292.17051722, 515.19181496], [422.77005005, 217.74053955, 92.86152649], 42.0],
[[50.04016495, 235.90718079, 364.32226562], [143.55478579, 279.90370346, 524.78408753], [58.57380676, 208.54806519, 86.16953278], 42.0]],
[np.array([393.76181608, 247.67829633, 87.73775041]), np.array([98.74901939, 219.46930221, 80.6306816]),
[[np.array([394.48171575, 248.37201348, 87.715368]),
np.array([393.07114384, 248.39110006, 87.61575574]),
np.array([393.69314056, 247.78157916, 88.73002876])],
[np.array([98.47494966, 220.42553803, 80.52821783]),
np.array([97.79246671, 219.20927275, 80.76255901]),
np.array([98.84848169, 219.60345781, 81.61663775])]]]),
# Test with zeros for all params
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)],
[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)]]]),
# Testing when values are added to frame
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]),
'LANK': np.array([2, -4, -5])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[-9, 6, -9], [0, 0, 0], [1, 0, -5], 7.0],
[[0, 2, -1], [0, 0, 0], [2, -4, -5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)],
[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)]]]),
# Testing when values are added to knee_JC
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [-7, 1, 2], [0, 0, 0], 7.0],
[[0, 0, 0], [9, -8, 9], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array([-0.95257934, 0.13608276, 0.27216553])],
[np.array(nan_3d), np.array(nan_3d), np.array([0.59867109, -0.53215208, 0.59867109])]]]),
# Testing when values are added to vsk
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], -12.0],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], 16.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)],
[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)]]]),
# Testing when values are added to mockReturnVal
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array(nan_3d), np.array(nan_3d), np.array([1.7018576 , -4.25464401, 3.40371521])],
[np.array(nan_3d), np.array(nan_3d), np.array([7.07001889, -2.65125708, 0.88375236])]]]),
# Testing when values are added to frame and knee_JC
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], 7.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array([-0.26726124, -0.80178373, -0.53452248]), np.array([0.14547859, -0.58191437, 0.80013226]), np.array([-0.95257934, 0.13608276, 0.27216553])],
[np.array([0.79317435, 0.49803971, -0.35047239]), np.array([-0.11165737, 0.68466825, 0.72025136]), np.array([0.59867109, -0.53215208, 0.59867109])]]]),
# Testing when values are added to frame, knee_JC, and vsk
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array([-0.30428137, -0.41913816, -0.85541572]), np.array([-0.00233238, -0.89766624, 0.4406698]), np.array([-0.95257934, 0.13608276, 0.27216553])],
[np.array([0.7477279, 0.63929183, -0.1794685]), np.array([-0.287221, 0.55508569, 0.7806305]), np.array([0.59867109, -0.53215208, 0.59867109])]]]),
# Testing when values are added to frame, knee_JC, vsk and mockReturnVal
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997 ]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]]),
# Testing that when knee_JC is composed of lists of ints and vsk values are ints
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[[-7, 1, 2], [9, -8, 9],
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38, 'LeftAnkleWidth': 18, 'RightTibialTorsion': 29, 'LeftTibialTorsion': -13},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]]),
# Testing that when knee_JC is composed of numpy arrays of ints and vsk values are ints
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[np.array([-7, 1, 2], dtype='int'), np.array([9, -8, 9], dtype='int'),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38, 'LeftAnkleWidth': 18, 'RightTibialTorsion': 29, 'LeftTibialTorsion': -13},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]]),
# Testing that when knee_JC is composed of lists of floats and vsk values are floats
({'RTIB': np.array([-9.0, 6.0, -9.0]), 'LTIB': np.array([0.0, 2.0, -1.0]), 'RANK': np.array([1.0, 0.0, -5.0]), 'LANK': np.array([2.0, -4.0, -5.0])},
[[-7.0, 1.0, 2.0], [9.0, -8.0, 9.0],
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]]),
# Testing that when knee_JC is composed of numpy arrays of floats and vsk values are floats
({'RTIB': np.array([-9.0, 6.0, -9.0]), 'LTIB': np.array([0.0, 2.0, -1.0]), 'RANK': np.array([1.0, 0.0, -5.0]), 'LANK': np.array([2.0, -4.0, -5.0])},
[np.array([-7.0, 1.0, 2.0], dtype='float'), np.array([9.0, -8.0, 9.0], dtype='float'),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]], dtype='float')],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]])])
def test_ankleJointCenter(self, frame, knee_JC, vsk, mockReturnVal, expectedMockArgs, expected):
"""
This test provides coverage of the ankleJointCenter function in pycgmStatic.py, defined as ankleJointCenter(frame, knee_JC, delta, vsk)
This test takes 6 parameters:
frame: dictionary of marker lists
knee_JC: array of knee_JC each x,y,z position.
vsk: dictionary containing subject measurements from a VSK file
mockReturnVal: the value to be returned by the mock for findJointC
expectedMockArgs: the expected arguments used to call the mocked function, findJointC
expected: the expected result from calling ankleJointCenter on frame, knee_JC, vsk, and mockReturnVal
This test is checking to make sure the ankle joint center and axis are calculated correctly given the input
parameters. This tests mocks findJointC to make sure the correct parameters are being passed into it given the
parameters passed into ankleJointCenter, and to also ensure that ankleJointCenter returns the correct value considering
the return value of findJointC, mockReturnVal.
The ankle joint center left and right origin are defined by using the ANK, Tib, and KJC marker positions in the Rodriques' rotation formula.
The ankle joint center axis is calculated using the Ankle Axis Calculation(ref. Clinical Gait Analysis hand book, Baker2013).
Lastly, it checks that the resulting output is correct when knee_JC is composed of lists of ints, numpy arrays
of ints, lists of floats, and numpy arrays of floats and vsk values are ints and floats. The values in frame
were kept as numpy arrays as lists would cause an error in the following lines of pycgmStatic.py as lists cannot
be subtracted by each other:
tib_ank_R = tib_R-ank_R
tib_ank_L = tib_L-ank_L
"""
with patch.object(pycgmStatic, 'findJointC', side_effect=mockReturnVal) as mock_findJointC:
result = pycgmStatic.ankleJointCenter(frame, knee_JC, None, vsk)
# Asserting that there were only 2 calls to findJointC
np.testing.assert_equal(mock_findJointC.call_count, 2)
# Asserting that the correct params were sent in the 1st (right) call to findJointC
np.testing.assert_almost_equal(expectedMockArgs[0][0], mock_findJointC.call_args_list[0][0][0], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][1], mock_findJointC.call_args_list[0][0][1], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][2], mock_findJointC.call_args_list[0][0][2], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][3], mock_findJointC.call_args_list[0][0][3], rounding_precision)
# Asserting that the correct params were sent in the 2nd (left) call to findJointC
np.testing.assert_almost_equal(expectedMockArgs[1][0], mock_findJointC.call_args_list[1][0][0], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][1], mock_findJointC.call_args_list[1][0][1], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][2], mock_findJointC.call_args_list[1][0][2], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][3], mock_findJointC.call_args_list[1][0][3], rounding_precision)
# Asserting that findShoulderJC returned the correct result given the return value given by mocked findJointC
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
np.testing.assert_almost_equal(result[2], expected[2], rounding_precision)
@pytest.mark.parametrize(["frame", "static_info", "ankle_JC", "expected"], [
# Test from running sample data
({'RTOE': np.array([442.81997681, 381.62280273, 42.66047668]), 'LTOE': np.array([39.43652725, 382.44522095, 41.78911591])},
[[0.03482194, 0.14879424, np.random.randint(0, 10)], [0.01139704, 0.02142806, np.random.randint(0, 10)]],
[np.array([393.76181608, 247.67829633, 87.73775041]),
np.array([98.74901939, 219.46930221, 80.6306816]),
[[np.array(nan_3d), np.array([393.07114384, 248.39110006, 87.61575574]), np.array(nan_3d)],
[np.array(nan_3d), np.array([97.79246671, 219.20927275, 80.76255901]), np.array(nan_3d)]]],
[np.array([442.81997681, 381.62280273, 42.66047668]),
np.array([39.43652725, 382.44522095, 41.78911591]),
np.array([[[442.8881541, 381.76460597, 43.64802096],
[441.89515447, 382.00308979, 42.66971773],
[442.44573691, 380.70886969, 42.81754643]],
[[39.50785213, 382.67891581, 42.75880631],
[38.49231839, 382.14765966, 41.93027863],
[39.75805858, 381.51956227, 41.98854914]]])]),
# Test with zeros for all params
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[[0, 0, np.random.randint(0, 10)], [0, 0, np.random.randint(0, 10)]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)],
[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values added to frame
({'RTOE': np.array([-1, -1, -5]), 'LTOE': np.array([-5, -6, 1])},
[[0, 0, np.random.randint(0, 10)], [0, 0, np.random.randint(0, 10)]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)],
[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values added to static_info
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[[-6, 7, np.random.randint(0, 10)], [2, -9, np.random.randint(0, 10)]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)],
[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values added to ankle_JC
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[[0, 0, np.random.randint(0, 10)], [0, 0, np.random.randint(0, 10)]],
[np.array([6, 0, 3]), np.array([1, 4, -3]),
[[np.array(nan_3d), np.array([-2, 8, 5]), np.array(nan_3d)],
[np.array(nan_3d), np.array([1, -6, 8]), np.array(nan_3d)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[[0.3713906763541037, 0.5570860145311556, -0.7427813527082074], [-0.24913643956121992, 0.8304547985373997, 0.49827287912243984], [0.8944271909999159, 0.0, 0.4472135954999579]],
[[-0.6855829496241487, 0.538672317561831, 0.4897021068743917], [0.701080937355391, 0.3073231506215415, 0.6434578466138523], [0.19611613513818404, 0.7844645405527362, -0.5883484054145521]]])]),
# Testing with values added to frame and static_info
({'RTOE': np.array([-1, -1, -5]), 'LTOE': np.array([-5, -6, 1])},
[[-6, 7, np.random.randint(0, 10)], [2, -9, np.random.randint(0, 10)]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)],
[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values added to frame and ankle_JC
({'RTOE': np.array([-1, -1, -5]), 'LTOE': np.array([-5, -6, 1])},
[[0, 0, np.random.randint(0, 10)], [0, 0, np.random.randint(0, 10)]],
[np.array([6, 0, 3]), np.array([1, 4, -3]),
[[np.array(nan_3d), np.array([-2, 8, 5]), np.array(nan_3d)],
[np.array(nan_3d), np.array([1, -6, 8]), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.4764529245456802, -0.34134400184779123, -5.540435690791556], [-1.544126730072802, -0.25340750990010874, -4.617213172448785], [-0.3443899318928142, -0.9063414188418306, -4.250731350734645]],
[[-5.617369411832039, -5.417908840272649, 1.5291737815703186], [-4.3819280753253675, -6.057228881914318, 1.7840356822261547], [-4.513335736607712, -5.188892894346187, 0.6755571577384749]]])]),
# Testing with values added to static_info and ankle_JC
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[[-6, 7, np.random.randint(0, 10)], [2, -9, np.random.randint(0, 10)]],
[np.array([6, 0, 3]), np.array([1, 4, -3]),
[[np.array(nan_3d), np.array([-2, 8, 5]), np.array(nan_3d)],
[np.array(nan_3d), np.array([1, -6, 8]), np.array(nan_3d)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[[0.8676189717605698, 0.41998838044559317, -0.2661711481957037], [-0.35944921047092726, 0.8996435491853136, 0.2478663944569317], [0.3435601620283683, -0.11937857722363693, 0.9315123028533232]],
[[0.5438323231671144, -0.8140929502604927, -0.20371321168453085], [0.12764145145799288, 0.32016712879535714, -0.9387228928222822], [0.829429963377473, 0.48450560159311296, 0.27802923924749284]]])]),
# Testing with values added to frame, static_info and ankle_JC
({'RTOE': np.array([-1, -1, -5]), 'LTOE': np.array([-5, -6, 1])},
[[-6, 7, np.random.randint(0, 10)], [2, -9, np.random.randint(0, 10)]],
[np.array([6, 0, 3]), np.array([1, 4, -3]),
[[np.array(nan_3d), np.array([-2, 8, 5]), np.array(nan_3d)],
[np.array(nan_3d), np.array([1, -6, 8]), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.17456964188738444, -0.44190534702217665, -4.915176169482615], [-1.564451151846412, -0.1819624820720035, -4.889503319319258], [-1.0077214691178664, -1.139086223544123, -4.009749828914483]],
[[-4.638059331793927, -6.864633064377841, 0.6515626072260268], [-4.6226610672854616, -5.522323332954951, 0.2066272429566376], [-4.147583269429562, -5.844325128086398, 1.4991503297587707]]])]),
# Testing that when frame, static_info and ankle_JC are composed of lists of ints
({'RTOE': [-1, -1, -5], 'LTOE': [-5, -6, 1]},
[[-6, 7, np.random.randint(0, 10)], [2, -9, np.random.randint(0, 10)]],
[[6, 0, 3], [1, 4, -3],
[[nan_3d, [-2, 8, 5], nan_3d],
[nan_3d, [1, -6, 8], nan_3d]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.17456964188738444, -0.44190534702217665, -4.915176169482615], [-1.564451151846412, -0.1819624820720035, -4.889503319319258], [-1.0077214691178664, -1.139086223544123, -4.009749828914483]],
[[-4.638059331793927, -6.864633064377841, 0.6515626072260268], [-4.6226610672854616, -5.522323332954951, 0.2066272429566376], [-4.147583269429562, -5.844325128086398, 1.4991503297587707]]])]),
# Testing that when frame, static_info and ankle_JC are composed of numpy arrays of ints
({'RTOE': np.array([-1, -1, -5], dtype='int'), 'LTOE': np.array([-5, -6, 1], dtype='int')},
[np.array([-6, 7, np.random.randint(0, 10)], dtype='int'), np.array([2, -9, np.random.randint(0, 10)], dtype='int')],
[np.array([6, 0, 3], dtype='int'), np.array([1, 4, -3], dtype='int'),
[[np.array(nan_3d), np.array([-2, 8, 5], dtype='int'), np.array(nan_3d)],
[np.array(nan_3d), np.array([1, -6, 8], dtype='int'), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.17456964188738444, -0.44190534702217665, -4.915176169482615], [-1.564451151846412, -0.1819624820720035, -4.889503319319258], [-1.0077214691178664, -1.139086223544123, -4.009749828914483]],
[[-4.638059331793927, -6.864633064377841, 0.6515626072260268], [-4.6226610672854616, -5.522323332954951, 0.2066272429566376], [-4.147583269429562, -5.844325128086398, 1.4991503297587707]]])]),
# Testing that when frame, static_info and ankle_JC are composed of lists of floats
({'RTOE': [-1.0, -1.0, -5.0], 'LTOE': [-5.0, -6.0, 1.0]},
[[-6.0, 7.0, np.random.randint(0, 10)], [2.0, -9.0, np.random.randint(0, 10)]],
[[6.0, 0.0, 3.0], [1.0, 4.0, -3.0],
[[nan_3d, [-2.0, 8.0, 5.0], nan_3d],
[nan_3d, [1.0, -6.0, 8.0], nan_3d]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.17456964188738444, -0.44190534702217665, -4.915176169482615], [-1.564451151846412, -0.1819624820720035, -4.889503319319258], [-1.0077214691178664, -1.139086223544123, -4.009749828914483]],
[[-4.638059331793927, -6.864633064377841, 0.6515626072260268], [-4.6226610672854616, -5.522323332954951, 0.2066272429566376], [-4.147583269429562, -5.844325128086398, 1.4991503297587707]]])]),
# Testing that when frame, static_info and ankle_JC are composed of numpy arrays of floats
({'RTOE': np.array([-1.0, -1.0, -5.0], dtype='float'), 'LTOE': np.array([-5.0, -6.0, 1.0], dtype='float')},
[np.array([-6.0, 7.0, np.random.randint(0, 10)], dtype='float'),
np.array([2.0, -9.0, np.random.randint(0, 10)], dtype='float')],
[np.array([6.0, 0.0, 3.0], dtype='float'), np.array([1.0, 4.0, -3.0], dtype='float'),
[[np.array(nan_3d), np.array([-2.0, 8.0, 5.0], dtype='float'), np.array(nan_3d)],
[np.array(nan_3d), np.array([1.0, -6.0, 8.0], dtype='float'), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.17456964188738444, -0.44190534702217665, -4.915176169482615], [-1.564451151846412, -0.1819624820720035, -4.889503319319258], [-1.0077214691178664, -1.139086223544123, -4.009749828914483]],
[[-4.638059331793927, -6.864633064377841, 0.6515626072260268], [-4.6226610672854616, -5.522323332954951, 0.2066272429566376], [-4.147583269429562, -5.844325128086398, 1.4991503297587707]]])])])
def test_footJointCenter(self, frame, static_info, ankle_JC, expected):
"""
This test provides coverage of the footJointCenter function in pycgmStatic.py, defined as footJointCenter(frame, static_info, ankle_JC, knee_JC, delta)
This test takes 4 parameters:
frame: dictionaries of marker lists
static_info: array containing offset angles
ankle_JC: array of ankle_JC each x,y,z position
expected: the expected result from calling footJointCenter on frame, static_info, and ankle_JC
The incorrect foot joint axes for both feet are calculated using the following calculations:
z-axis = ankle joint center - TOE marker
y-flex = ankle joint center flexion - ankle joint center
x-axis = y-flex \cross z-axis
y-axis = z-axis cross x-axis
Calculate the foot joint axis by rotating incorrect foot joint axes about offset angle.
This test is checking to make sure the foot joint center and axis are calculated correctly given the input
parameters. The test checks to see that the correct values in expected are updated per each input parameter added:
When values are added to frame, expected[0] and expected[1] should be updated
When values are added to vsk, expected[2] should be updated as long as there are values for frame and ankle_JC
When values are added to ankle_JC, expected[2] should be updated
"""
result = pycgmStatic.footJointCenter(frame, static_info, ankle_JC, None, None)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
np.testing.assert_almost_equal(result[2], expected[2], rounding_precision)
@pytest.mark.parametrize(["frame", "expected"], [
# Test from running sample data
({'LFHD': np.array([184.55158997, 409.68713379, 1721.34289551]), 'RFHD': np.array([325.82983398, 402.55450439, 1722.49816895]), 'LBHD': np.array([197.8621521 , 251.28889465, 1696.90197754]), 'RBHD': np.array([304.39898682, 242.91339111, 1694.97497559])},
[[[255.21590218, 407.10741939, 1722.0817318], [254.19105385, 406.14680918, 1721.91767712], [255.18370553, 405.95974655, 1722.90744993]], [255.19071197509766, 406.1208190917969, 1721.9205322265625]]),
# Basic test with a variance of 1 in the x and y dimensions of the markers
({'LFHD': np.array([1, 1, 0]), 'RFHD': np.array([0, 1, 0]), 'LBHD': np.array([1, 0, 0]), 'RBHD': np.array([0, 0, 0])},
[[[0.5, 2, 0], [1.5, 1, 0], [0.5, 1, -1]], [0.5, 1, 0]]),
# Setting the markers so there's no variance in the x-dimension
({'LFHD': np.array([0, 1, 0]), 'RFHD': np.array([0, 1, 0]), 'LBHD': np.array([0, 0, 0]), 'RBHD': np.array([0, 0, 0])},
[[nan_3d, nan_3d, nan_3d], [0, 1, 0]]),
# Setting the markers so there's no variance in the y-dimension
({'LFHD': np.array([1, 0, 0]), 'RFHD': np.array([0, 0, 0]), 'LBHD': np.array([1, 0, 0]), 'RBHD': np.array([0, 0, 0])},
[[nan_3d, nan_3d, nan_3d], [0.5, 0, 0]]),
# Setting each marker in a different xy quadrant
({'LFHD': np.array([-1, 1, 0]), 'RFHD': np.array([1, 1, 0]), 'LBHD': np.array([-1, -1, 0]), 'RBHD': np.array([1, -1, 0])},
[[[0, 2, 0], [-1, 1, 0], [0, 1, 1]], [0, 1, 0]]),
# Setting values of the markers so that midpoints will be on diagonals
({'LFHD': np.array([-2, 1, 0]), 'RFHD': np.array([1, 2, 0]), 'LBHD': np.array([-1, -2, 0]), 'RBHD': np.array([2, -1, 0])},
[[[-0.81622777, 2.4486833 , 0], [-1.4486833, 1.18377223, 0], [-0.5, 1.5, 1]], [-0.5, 1.5, 0]]),
# Adding the value of 1 in the z dimension for all 4 markers
({'LFHD': np.array([1, 1, 1]), 'RFHD': np.array([0, 1, 1]), 'LBHD': np.array([1, 0, 1]), 'RBHD': np.array([0, 0, 1])},
[[[0.5, 2, 1], [1.5, 1, 1], [0.5, 1, 0]], [0.5, 1, 1]]),
# Setting the z dimension value higher for LFHD and LBHD
({'LFHD': np.array([1, 1, 2]), 'RFHD': np.array([0, 1, 1]), 'LBHD': np.array([1, 0, 2]), 'RBHD': np.array([0, 0, 1])},
[[[0.5, 2, 1.5], [1.20710678, 1, 2.20710678], [1.20710678, 1, 0.79289322]], [0.5, 1, 1.5]]),
# Setting the z dimension value higher for LFHD and RFHD
({'LFHD': np.array([1, 1, 2]), 'RFHD': np.array([0, 1, 2]), 'LBHD': np.array([1, 0, 1]), 'RBHD': np.array([0, 0, 1])},
[[[0.5, 1.70710678, 2.70710678], [1.5, 1, 2], [0.5, 1.70710678, 1.29289322]], [0.5, 1, 2]]),
# Testing that when frame is composed of lists of ints
({'LFHD': [1, 1, 2], 'RFHD': [0, 1, 2], 'LBHD': [1, 0, 1], 'RBHD': [0, 0, 1]},
[[[0.5, 1.70710678, 2.70710678], [1.5, 1, 2], [0.5, 1.70710678, 1.29289322]], [0.5, 1, 2]]),
# Testing that when frame is composed of numpy arrays of ints
({'LFHD': np.array([1, 1, 2], dtype='int'), 'RFHD': np.array([0, 1, 2], dtype='int'),
'LBHD': np.array([1, 0, 1], dtype='int'), 'RBHD': np.array([0, 0, 1], dtype='int')},
[[[0.5, 1.70710678, 2.70710678], [1.5, 1, 2], [0.5, 1.70710678, 1.29289322]], [0.5, 1, 2]]),
# Testing that when frame is composed of lists of floats
({'LFHD': [1.0, 1.0, 2.0], 'RFHD': [0.0, 1.0, 2.0], 'LBHD': [1.0, 0.0, 1.0], 'RBHD': [0.0, 0.0, 1.0]},
[[[0.5, 1.70710678, 2.70710678], [1.5, 1, 2], [0.5, 1.70710678, 1.29289322]], [0.5, 1, 2]]),
# Testing that when frame is composed of numpy arrays of floats
({'LFHD': np.array([1.0, 1.0, 2.0], dtype='float'), 'RFHD': np.array([0.0, 1.0, 2.0], dtype='float'),
'LBHD': np.array([1.0, 0.0, 1.0], dtype='float'), 'RBHD': np.array([0.0, 0.0, 1.0], dtype='float')},
[[[0.5, 1.70710678, 2.70710678], [1.5, 1, 2], [0.5, 1.70710678, 1.29289322]], [0.5, 1, 2]])])
def test_headJC(self, frame, expected):
"""
This test provides coverage of the headJC function in pycgmStatic.py, defined as headJC(frame)
This test takes 3 parameters:
frame: dictionary of marker lists
expected: the expected result from calling headJC on frame
This test is checking to make sure the head joint center and head joint axis are calculated correctly given
the 4 coordinates given in frame. This includes testing when there is no variance in the coordinates,
when the coordinates are in different quadrants, when the midpoints will be on diagonals, and when the z
dimension is variable. It also checks to see the difference when a value is set for HeadOffSet in vsk.
The function uses the LFHD, RFHD, LBHD, and RBHD markers from the frame to calculate the midpoints of the front, back, left, and right center positions of the head.
The head axis vector components are then calculated using the aforementioned midpoints.
Afterwords, the axes are made orthogonal by calculating the cross product of each individual axis.
Finally, the head axis is then rotated around the y axis based off the head offset angle in the VSK.
Lastly, it checks that the resulting output is correct when frame composed of lists of ints, numpy arrays of
ints, lists of floats, and numpy arrays of floats and when headOffset is an int and a float.
"""
result = pycgmStatic.headJC(frame)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
@pytest.mark.parametrize(["frame", "ankle_JC", "expected"], [
# Test from running sample data
({'RTOE': np.array([433.33508301, 354.97229004, 44.27765274]),
'LTOE': np.array([31.77310181, 331.23657227, 42.15322876])},
[np.array([397.45738291, 217.50712216, 87.83068433]), np.array([112.28082818, 175.83265027, 80.98477997]),
[[np.array(rand_coor), np.array([396.73749179, 218.18875543, 87.69979179]), np.array(rand_coor)],
[np.array(rand_coor), np.array([111.34886681, 175.49163538, 81.10789314]), np.array(rand_coor)]]],
[np.array([433.33508301, 354.97229004, 44.27765274]), np.array([31.77310181, 331.23657227, 42.15322876]),
[[[433.4256618315962, 355.25152027652007, 45.233595181827035],
[432.36890500826763, 355.2296456773885, 44.29402798451682],
[433.09363829389764, 354.0471962330562, 44.570749823731354]],
[[31.806110207058808, 331.49492345678016, 43.11871573923792],
[30.880216288550965, 330.81014854432254, 42.29786022762896],
[32.2221740692973, 330.36972887034574, 42.36983123198873]]]]),
# Test with zeros for all params
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]]]),
# Testing when values are added to frame['RTOE']
({'RTOE': np.array([-7, 3, -8]), 'LTOE': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
[np.array([-7, 3, -8]), np.array([0, 0, 0]),
[[nan_3d, nan_3d, [-6.36624977770237, 2.7283927618724446, -7.275714031659851]],
[nan_3d, nan_3d, nan_3d]]]),
# Testing when values are added to frame['LTOE']
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([8, 0, -8])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor),
|
np.array([0, 0, 0])
|
numpy.array
|
from pioneer.common import linalg
from enum import Enum
import cv2
import numpy as np
class Pos(Enum):
LEFT = 0
CENTER = 1
RIGHT = 2
class CylindricalProjection():
''' cylindrical projection for 3 cameras
args:
intrinsic_calibrations: list of the 3 intrinsic cameras calibration
distortion_coef: list of the 3 distorsion cameras coef
extrinsic_calibrations: list of the 3 extrinsics 4x4 matrix, the middle is equal to identity
config: configuration dict
radius: cylinder radius (meter)
FOV_h: total horizontal cylinder FOV (rad)
FOV_v: total vertical cylinder FOV (rad)
image_h : horizontal cylinder (pixel)
image_v : vertical cylinder (pixel)
fusion_overlap_ratio : overlap ratio between take in account for image merging or fusion (0.0 to 1.0)
'''
def __init__(self
, intrinsic_calibrations = None
, distortion_coef = None
, extrinsic_calibrations = None
, config={'radius':50.0, 'FOV_h':np.deg2rad(210), 'FOV_v':np.deg2rad(67.5), 'image_h':2000, 'image_v':int(2000*0.25), 'fusion_overlap_ratio': 0.25}
):
self.__assert_intrinsic_calibrations(intrinsic_calibrations)
self.__assert_distortion_coefficients(distortion_coef)
self.__assert_extrinsic_calibrations(extrinsic_calibrations)
self.radius = config.get('radius', 50.0)
self.FOV_width = config.get('FOV_h', np.deg2rad(210))
self.FOV_height = config.get('FOV_v', np.deg2rad(67.5))
self.image_width = config.get('image_h', 2000)
self.image_height = config.get('image_v', int(2000*0.25))
self.fusion_overlap_ratio = config.get('fusion_overlap_ratio', 0.25)
self.cylinder_points, self.cylinder_points_2d = self.__get_cylinder_points(self.image_width, self.image_height, self.FOV_width, self.FOV_height, self.radius)
self.intrinsic_calibrations = {}
self.extrinsic_calibrations = {}
self.distortion_coefficients = {}
self.new_matrices = {}
self.keeped_in_cam_points = {}
self.keeped_cylinder_points_2d = {}
self.images_min_x = {}
self.images_max_x = {}
self.masks = {}
for pos in Pos:
self.intrinsic_calibrations[pos] = intrinsic_calibrations[pos.value]
self.distortion_coefficients[pos] = distortion_coef[pos.value]
self.extrinsic_calibrations[pos] = extrinsic_calibrations[pos.value]
self.new_matrices[pos] = None
self.keeped_in_cam_points[pos] = None
self.keeped_cylinder_points_2d[pos] = None
self.images_min_x[pos] = None
self.images_max_x[pos] = None
self.masks[pos] = None
def project_pts(self, pts, mask_fov=False, output_mask=False, margin=0):
''' project 3D in the 2D cylindrical referiencial
Args:
pts_3D: 3D point in the center camera referential (3xN)
mask_fov (optionnal): removes points outside the fov
output_mask (optionnal): if True, returns the mask applied to the points
margin (optionnal): margin (in pixels) outside the image unaffected by the fov mask
Return:
2xN: 2D points in cylindrical image referential
mask (optionnal): returned if output_mask is True
'''
assert len(pts.shape)==2, '2 dimensionals array needed'
assert pts.shape[0]==3, '3d points format 3xN'
azimut = np.arctan2(pts[0,:], pts[2,:])
norm_xz = np.linalg.norm(pts[[0,2],:], axis = 0)
elevation = np.arctan2(pts[1,:], norm_xz)
x = (self.image_width/2 + azimut * (self.image_width/self.FOV_width)).astype(int)
y = (self.image_height/2 + elevation * (self.image_height/self.FOV_height)).astype(int)
pts = np.column_stack((x,y))
mask = (pts[2,:] > 0)
if mask_fov or output_mask:
mask = (azimut > -self.FOV_width/2 - margin/self.image_width*self.FOV_width) & \
(azimut < self.FOV_width/2 + margin/self.image_width*self.FOV_width) & \
(elevation > -self.FOV_height/2 - margin/self.image_height*self.FOV_height) & \
(elevation < self.FOV_height/2 + margin/self.image_height*self.FOV_height)
if mask_fov:
pts = pts[mask]
if output_mask:
return pts, mask
return pts
def stitch(self, images=None):
self.__assert_image(images)
for i, image in enumerate(images):
if image.ndim == 2:
images[i] = self.gray_to_rgb(image)
rectified_images = dict()
cylinder = dict()
for position in Pos:
# only the first time, compute matrix able to reproject in the undistord image
if self.new_matrices[position] is None:
self.new_matrices[position] = self.__compute_optimal_new_matrix(images[position.value], self.intrinsic_calibrations[position], self.distortion_coefficients[position])
# undistor the image
rectified_images[position] = cv2.undistort(images[position.value], self.intrinsic_calibrations[position], self.distortion_coefficients[position], None, self.new_matrices[position])
# each camera will be reprojected in these cylinder images
cylinder[position] = np.zeros([self.image_height, self.image_width, 3], dtype=images[Pos.CENTER.value].dtype)
# only the first time, compute LUT for each camera to project 2D camera image in the cylinder
if self.__are_keeped_in_cam_points_none():
self.__compute_lookup_table_cameras_to_cylinders(images[Pos.CENTER.value])
# only the first time, compute masks for each camera used to merge them in the final cylinder image
if self.__are_masks_none():
self.__compute_mask()
# do the projection in each cylinder
for position in Pos:
cylinder[position][self.keeped_cylinder_points_2d[position][1,:],self.keeped_cylinder_points_2d[position][0,:],:] = rectified_images[position][self.keeped_in_cam_points[position][1,:],self.keeped_in_cam_points[position][0,:],:]
# nerge the 3 projected image in a final cylinder image
pano = cylinder[Pos.LEFT] * np.tile(self.masks[Pos.LEFT][:,:,np.newaxis],3) + cylinder[Pos.CENTER] * np.tile(self.masks[Pos.CENTER][:,:,np.newaxis],3) + cylinder[Pos.RIGHT] * np.tile(self.masks[Pos.RIGHT][:,:,np.newaxis],3)
return pano.astype(np.uint8)
def __compute_optimal_new_matrix(self, image, matrix, distortion_coefficient):
height, width = image.shape[:2]
new_camera_matrix, _ = cv2.getOptimalNewCameraMatrix(matrix, distortion_coefficient, (width,height), 0, (width,height))
return new_camera_matrix
#TODO: Refactor this function
def __compute_lookup_table_cameras_to_cylinders(self, image_center):
# u,v,scale in camera, used to checked if points are in front or behind the camera
pt_in_cam_3 = self.new_matrices[Pos.CENTER] @ self.cylinder_points[:3,:]
# project 3D cylinder points in 2D image
pt_in_cam = (cv2.projectPoints(self.cylinder_points[:3,:],np.zeros((3, 1)),np.zeros((3, 1)), self.new_matrices[Pos.CENTER], self.distortion_coefficients[Pos.CENTER]*0.0))[0].reshape(-1,2).T
# keep point respect image shape, and point in front of the camera
keep = np.logical_and(np.logical_and(np.logical_and(np.logical_and(pt_in_cam[0,:] >=0, pt_in_cam[0,:] < image_center.shape[1]), pt_in_cam[1,:] >=0), pt_in_cam[1,:] < image_center.shape[0] ),pt_in_cam_3[2,:]>0)
self.keeped_in_cam_points[Pos.CENTER] = pt_in_cam[:,keep].astype(np.int)
self.keeped_cylinder_points_2d[Pos.CENTER] = self.cylinder_points_2d[:,keep].astype(np.int)
# compute left and right image limits in the cylinder, used to creat the right merging masks
self.images_min_x[Pos.CENTER] = self.keeped_cylinder_points_2d[Pos.CENTER][0,self.keeped_cylinder_points_2d[Pos.CENTER].reshape(2,-1)[1,:]==self.image_height//2].min()
self.images_max_x[Pos.CENTER] = self.keeped_cylinder_points_2d[Pos.CENTER][0,self.keeped_cylinder_points_2d[Pos.CENTER].reshape(2,-1)[1,:]==self.image_height//2].max()
# left camera
calib_extrinsic_l_c_inv = linalg.tf_inv(self.extrinsic_calibrations[Pos.LEFT])
pt_in_cam_3 = self.new_matrices[Pos.LEFT] @ (calib_extrinsic_l_c_inv @ self.cylinder_points)[:3,:]
pt_in_cam_3d = (calib_extrinsic_l_c_inv @ self.cylinder_points)[:3,:]
pt_in_cam = (cv2.projectPoints(pt_in_cam_3d,np.zeros((3, 1)),np.zeros((3, 1)), self.new_matrices[Pos.LEFT], self.distortion_coefficients[Pos.LEFT]*0.0))[0].reshape(-1,2).T
keep = np.logical_and(np.logical_and(np.logical_and(np.logical_and(pt_in_cam[0,:] >=0, pt_in_cam[0,:] < image_center.shape[1]), pt_in_cam[1,:] >=0), pt_in_cam[1,:] < image_center.shape[0] ),pt_in_cam_3[2,:]>0)
self.keeped_in_cam_points[Pos.LEFT] = pt_in_cam[:,keep].astype(np.int)
self.keeped_cylinder_points_2d[Pos.LEFT] = self.cylinder_points_2d[:,keep].astype(np.int)
self.images_min_x[Pos.LEFT] = self.keeped_cylinder_points_2d[Pos.LEFT][0,self.keeped_cylinder_points_2d[Pos.LEFT].reshape(2,-1)[1,:]==self.image_height//2].min()
self.images_max_x[Pos.LEFT] = self.keeped_cylinder_points_2d[Pos.LEFT][0,self.keeped_cylinder_points_2d[Pos.LEFT].reshape(2,-1)[1,:]==self.image_height//2].max()
# right camera
calib_extrinsic_r_c_inv = linalg.tf_inv(self.extrinsic_calibrations[Pos.RIGHT])
pt_in_cam_3 = self.new_matrices[Pos.RIGHT] @ (calib_extrinsic_r_c_inv @ self.cylinder_points)[:3,:]
pt_in_cam_3d = (calib_extrinsic_r_c_inv @ self.cylinder_points)[:3,:]
pt_in_cam = (cv2.projectPoints(pt_in_cam_3d,np.zeros((3, 1)),np.zeros((3, 1)), self.new_matrices[Pos.RIGHT], self.distortion_coefficients[Pos.RIGHT]*0.0))[0].reshape(-1,2).T
keep = np.logical_and(np.logical_and(np.logical_and(np.logical_and(pt_in_cam[0,:] >=0, pt_in_cam[0,:] < image_center.shape[1]), pt_in_cam[1,:] >=0), pt_in_cam[1,:] < image_center.shape[0] ),pt_in_cam_3[2,:]>0)
self.keeped_in_cam_points[Pos.RIGHT] = pt_in_cam[:,keep].astype(np.int)
self.keeped_cylinder_points_2d[Pos.RIGHT] = self.cylinder_points_2d[:,keep].astype(np.int)
self.images_min_x[Pos.RIGHT] = self.keeped_cylinder_points_2d[Pos.RIGHT][0,self.keeped_cylinder_points_2d[Pos.RIGHT].reshape(2,-1)[1,:]==self.image_height//2].min()
self.images_max_x[Pos.RIGHT] = self.keeped_cylinder_points_2d[Pos.RIGHT][0,self.keeped_cylinder_points_2d[Pos.RIGHT].reshape(2,-1)[1,:]==self.image_height//2].max()
def __compute_mask(self):
# generate fusion masks
for pos in Pos:
self.masks[pos] = np.zeros((self.image_height, self.image_width), dtype=np.float32)
span_lc = (self.images_max_x[Pos.LEFT]-self.images_min_x[Pos.CENTER]) // 2
center_lc = span_lc + self.images_min_x[Pos.CENTER]
span_cr = (self.images_max_x[Pos.CENTER]-self.images_min_x[Pos.RIGHT]) // 2
center_cr = span_cr + self.images_min_x[Pos.RIGHT]
img_c_min_x2 = int(center_lc - span_lc * self.fusion_overlap_ratio)
img_l_max_x2 = int(center_lc + span_lc * self.fusion_overlap_ratio)
img_r_min_x2 = int(center_cr - span_cr * self.fusion_overlap_ratio)
img_c_max_x2 = int(center_cr + span_cr * self.fusion_overlap_ratio)
self.masks[Pos.LEFT][:,:img_c_min_x2] = 1.0
self.masks[Pos.CENTER][:,img_l_max_x2:img_r_min_x2] = 1.0
self.masks[Pos.RIGHT][:,img_c_max_x2:] = 1.0
self.masks[Pos.LEFT][:,img_c_min_x2:img_l_max_x2] = np.linspace(1.0, 0.0,img_l_max_x2-img_c_min_x2)
self.masks[Pos.CENTER][:,img_c_min_x2:img_l_max_x2] = np.linspace(0.0, 1.0,img_l_max_x2-img_c_min_x2)
self.masks[Pos.CENTER][:,img_r_min_x2:img_c_max_x2] =
|
np.linspace(1.0, 0.0,img_c_max_x2-img_r_min_x2)
|
numpy.linspace
|
import time
import random
import numpy as np
import pypolyagamma as pypolyagamma
def calculate_C_w(S, w_i):
w_mat = np.diag(w_i)
return np.dot(S.T, np.dot(w_mat, S))
def sample_w_i(S, J_i):
"""
:param S: observation matrix
:param J_i: neuron i's couplings
:return: samples for w_i from a polyagamma distribution
"""
nthreads = pypolyagamma.get_omp_num_threads()
seeds = np.random.randint(2 ** 16, size=nthreads)
ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds]
T = S.shape[0]
A =
|
np.ones(T)
|
numpy.ones
|
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.signal
def dynet_ar2pdc(KF,srate,freqs,metric = 'sPDC',univ = 0,flow = 1,PSD = 0, gauss_filt=False):
"""
Obtain PDC, sPDC, info-PDC from tvAR coeffients
M.Rubega, D.Pascucci, 17.10.2018
Last update: 02.10.2020
--------------------------------------------------------------------------
INPUTs
- KF: Dynet_SSM object (dynet_statespace.Dynet_SSM())
containing the 4d array of AR coefficients
[n x n x order x time]
and the estimated measurement noise covariance matrix R
[n x n x time]
- srate: Sampling rate
- freqs: Frequency 1d array
- metric: see OUTPUT
- univ: Remove (0, default) or Keep (1) the diagonal elements
- flow: normalization per columns (1) or rows (2)
- PSD: (1) Add the normalized parametric PSD on diagonals
(0) none (only for graphical purpose)
--------------------------------------------------------------------------
OUTPUTs
- PDC: 4d array: [Nodes X Nodes X Freq X Time]
one of
'PDC' % non-squared Eq. (18) in [2]
'sPDC' % squared Eq. (7) in [3]
'PDCnn' % non-normalized
'sPDCnn' % squared non-normalized
'iPDC' % info-PDC Eq. (5.25) in [4]
'iPDCs' % info-PDC squared
--------------------------------------------------------------------------
References:
[1] <NAME>., <NAME>., ..., & <NAME>. (2010), Neuroimage, 50(3),
960-969. A new Kalman filter approach for the estimation of
high-dimensional time-variant multivariate AR models and its
application in analysis of laser-evoked brain potentials.
[2] <NAME>. & <NAME>. (2001) Biol Cybern, 84 (6), 463�474
Partial directed coherence: a new concept in neural structure
determination.
[3] <NAME>., ..., & <NAME>. (2006), IEEE Transactions on
Biomedical Engineering, 53(9), 1802-1812
Assessing cortical functional connectivity by partial directed
coherence: simulations and application to real data.
[4] <NAME> and <NAME>, (2014), CRC Press
Methods in brain connectivity inference through multivariate time
series analysis.
[5] <NAME>., ..., & <NAME>. (2013), 35th Annual International
Conference of the IEEE EMBS, 4346-4349
The Effect of Normalization of Partial Directed Coherence on the
Statistical Assessment of Connectivity Patterns: A Simulation Study
"""
if flow not in [1,2]:
raise Exception('Check the value of "flow" flow must be a value in either 1 or 2')
# Check R
nodes,_,order,time = KF.AR.shape
if (KF.R is not None) and (len(KF.R.shape)<3):
KF.R = np.transpose(
|
np.tile(KF.R,(time,1,1))
|
numpy.tile
|
import time
import os
import pandas as pd
import numpy
#This code could calculate the cLD variance according to the distance groups and cMAF groups.#
#One can use this file and LDvar file to plot the ratio of variance#
openfile = open(r'/PATH/filteredgene.csv','r') #This is the filtered gene file from filter.
cut1 = 35 * 1000
cut2 = cut1 * 2
cut3 = cut2 * 2
cut4 = cut3 * 2
cut5 = cut4 * 2
cut6 = cut5 * 2
cut7 = cut6 * 2
cut8 = cut7 * 2
cut9 = cut8 * 2
cut10 = cut9 * 2
cut11 = cut10 * 2
cut12 = cut11 * 2
#cmafcut1 = 0.00125
#cmafcut2 = 0.0025
#cmafcut3 = 0.005
#cmafcut4 = 0.01
#cmafcut5 = 0.02
cmafcut1 = 0.05
cmafcut2 = 0.1
cmafcut3 = 0.2
cmafcut4 = 0.4
cmafcut5 = 0.5
cmaf1p11 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf1p10 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf1p01 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf1p00 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
ncmaf1 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf2p11 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf2p10 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf2p01 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf2p00 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
ncmaf2 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf3p11 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf3p10 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf3p01 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf3p00 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
ncmaf3 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf4p11 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf4p10 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf4p01 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf4p00 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
ncmaf4 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf5p11 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf5p10 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf5p01 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cmaf5p00 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
ncmaf5 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
def find_pos(openfile,genepos):
openfile.seek(0,0)
line = openfile.readline()
line_list = line.split(',')
while line:
if line_list[1] ==genepos:
break
line = openfile.readline()
line_list = line.split(',')
return(1)
def cal_pab(line1,line2):
line_list1 = line1.split(',')
line_list2 = line2.split(',')
count = 0
for i in range(4,n):
if line_list1[i] == '1|0' or line_list1[i] == '1|0\n':
if line_list2[i] == '1|0'or line_list2[i] == '1|0\n':
count = count + 1
if line_list2[i] == '1|1'or line_list2[i] == '1|1\n':
count = count + 1
if line_list1[i] == '0|1' or line_list1[i] == '0|1\n':
if line_list2[i] == '0|1'or line_list2[i] == '0|1\n':
count = count + 1
if line_list2[i] == '1|1'or line_list2[i] == '1|1\n':
count = count + 1
if line_list1[i] == '1|1' or line_list1[i] == '1|1\n':
if line_list2[i] == '1|0'or line_list2[i] == '1|0\n' or line_list2[i] == '0|1'or line_list2[i] == '0|1\n':
count = count + 1
if line_list2[i] == '1|1'or line_list2[i] == '1|1\n':
count = count + 2
return(count/(2*(n-4)))
def cal_p01(line1,line2):
line_list1 = line1.split(',')
line_list2 = line2.split(',')
count = 0
for i in range(4,n):
if line_list1[i] == '1|0' or line_list1[i] == '1|0\n':
if line_list2[i] == '0|1'or line_list2[i] == '0|1\n':
count = count + 1
if line_list2[i] == '1|1'or line_list2[i] == '1|1\n':
count = count + 1
if line_list1[i] == '0|1' or line_list1[i] == '0|1\n':
if line_list2[i] == '1|0'or line_list2[i] == '1|0\n':
count = count + 1
if line_list2[i] == '1|1'or line_list2[i] == '1|1\n':
count = count + 1
if line_list1[i] == '0|0' or line_list1[i] == '0|0\n':
if line_list2[i] == '1|0'or line_list2[i] == '1|0\n' or line_list2[i] == '0|1'or line_list2[i] == '0|1\n':
count = count + 1
if line_list2[i] == '1|1'or line_list2[i] == '1|1\n':
count = count + 2
return(count/(2*(n-4)))
def cal_p10(line1,line2):
line_list1 = line1.split(',')
line_list2 = line2.split(',')
count = 0
for i in range(4,n):
if line_list1[i] == '1|0' or line_list1[i] == '1|0\n':
if line_list2[i] == '0|1'or line_list2[i] == '0|1\n':
count = count + 1
if line_list2[i] == '0|0'or line_list2[i] == '0|0\n':
count = count + 1
if line_list1[i] == '0|1' or line_list1[i] == '0|1\n':
if line_list2[i] == '1|0'or line_list2[i] == '1|0\n':
count = count + 1
if line_list2[i] == '0|0'or line_list2[i] == '0|0\n':
count = count + 1
if line_list1[i] == '1|1' or line_list1[i] == '1|1\n':
if line_list2[i] == '1|0'or line_list2[i] == '1|0\n' or line_list2[i] == '0|1'or line_list2[i] == '0|1\n':
count = count + 1
if line_list2[i] == '0|0'or line_list2[i] == '0|0\n':
count = count + 2
return(count/(2*(n-4)))
def cal_p00(line1,line2):
line_list1 = line1.split(',')
line_list2 = line2.split(',')
count = 0
for i in range(4,n):
if line_list1[i] == '1|0' or line_list1[i] == '1|0\n':
if line_list2[i] == '1|0'or line_list2[i] == '1|0\n':
count = count + 1
if line_list2[i] == '0|0'or line_list2[i] == '0|0\n':
count = count + 1
if line_list1[i] == '0|1' or line_list1[i] == '0|1\n':
if line_list2[i] == '0|1'or line_list2[i] == '0|1\n':
count = count + 1
if line_list2[i] == '0|0'or line_list2[i] == '0|0\n':
count = count + 1
if line_list1[i] == '0|0' or line_list1[i] == '0|0\n':
if line_list2[i] == '1|0'or line_list2[i] == '1|0\n' or line_list2[i] == '0|1'or line_list2[i] == '0|1\n':
count = count + 1
if line_list2[i] == '0|0'or line_list2[i] == '0|0\n':
count = count + 2
return(count/(2*(n-4)))
def distance(line1,line2):
line_list1 = line1.split(',')
line_list2 = line2.split(',')
dist1 = (float(line_list1[2])+float(line_list1[3]))/2
dist2 = (float(line_list2[2])+float(line_list2[3]))/2
crtdist = abs(dist1-dist2)
distgroup = 0
if crtdist < cut1:
distgroup = 1
elif crtdist < cut2:
distgroup = 2
elif crtdist < cut3:
distgroup = 3
elif crtdist < cut4:
distgroup = 4
elif crtdist < cut5:
distgroup = 5
elif crtdist < cut6:
distgroup = 6
elif crtdist < cut7:
distgroup = 7
elif crtdist < cut8:
distgroup = 8
elif crtdist < cut9:
distgroup = 9
elif crtdist < cut10:
distgroup = 10
elif crtdist < cut11:
distgroup = 11
elif crtdist < cut12:
distgroup = 12
elif crtdist > cut12:
distgroup = 13
return(distgroup)
def cmaffun(cmaf1,cmaf2):
cmafindex = 0
if min(cmaf1,cmaf2) < cmafcut1:
cmafindex = 1
elif min(cmaf1,cmaf2) < cmafcut2:
cmafindex = 2
elif min(cmaf1,cmaf2) < cmafcut3:
cmafindex = 3
elif min(cmaf1,cmaf2) < cmafcut4:
cmafindex = 4
elif min(cmaf1,cmaf2) > cmafcut4:
cmafindex = 5
return(cmafindex)
def cldvar(plist,n):
p1 = plist[0]
#p2 = plist[1]
#p3 = plist[2]
p4 = plist[3]
p2 = (plist[1] + plist[2])/2
p3 = p2
index = 1
if ((p1+p2)**2)*((p1+p3)**2)*((-n+p1+p2)**2)*((-n+p1+p3)**2) == 0 or ((p1+p2)**2)*(p1+p3)*((-n+p1+p2)**2)*(-n+p1+p3)==0 or ((p1+p3)**2)*(p1+p2)*((-n+p1+p3)**2)*(-n+p1+p2) == 0:
index = 0
else:
v1 = (n*(n-2*p1-p2-p3)*(n*p1-(p1+p2)*(p1+p3))*(n*p1*(p2+p3)+2*n*p2*p3-(p1+p2)*(p1+p3)*(p2+p3)))/(((p1+p2)**2)*((p1+p3)**2)*((-n+p1+p2)**2)*((-n+p1+p3)**2))
v2 = (n*(p1**2)*((-n+p1+p2)**2)-n*(p3**2)*((p1+p2)**2))/(((p1+p2)**2)*(p1+p3)*((-n+p1+p2)**2)*(-n+p1+p3))
v3 = (n*(p1**2)*((-n+p1+p3)**2)-n*(p2**2)*((p1+p3)**2))/(((p1+p3)**2)*(p1+p2)*((-n+p1+p3)**2)*(-n+p1+p2))
v = numpy.mat([v1,v2,v3,0])
#print('vector v is',v)
p1 = p1/n
p2 = p2/n
p3 = p3/n
p4 = p4/n
#print('p are',p1,p2,p3,p4)
matr = numpy.mat([[p1-p1**2,-p1*p2,-p1*p3,-p1*p4],
[-p2*p1,p2-p2**2,-p2*p3,-p2*p4],
[-p3*p1,-p3*p2,p3-p3**2,-p3*p4],
[-p4*p1,-p4*p2,-p4*p3,p4-p4**2]])
#print('matr is',matr)
if index == 0:
ans = 0
else:
varres = v*matr*v.T
ans = float(n*varres)
return(ans)
def cldfunc(mlist,n):
cld = 0
m1 = float(mlist[0])
m2 = float(mlist[1])
m3 = float(mlist[2])
m4 = float(mlist[3])
cldnumer = (m1/n - (1/(n**2))*(m1+m2)*(m1+m3))**2
clddeno = ((m1+m3)/n)*(1-((m1+m3)/n))*((m1+m2)/n)*(1-((m1+m2)/n))
if not clddeno == 0:
cld = cldnumer/clddeno
return(cld)
def count_one(line_list):
count = 0
for element in line_list[4:n]:
if element == '0|1'or element == '0|1\n' or element == '1|0'or element == '1|0\n':
count = count + 1
if element == '1|1'or element == '1|1\n':
count = count + 2
return(count/(2*(n-4)))
#save pa pb pc ...
pone = []
#head
line = openfile.readline()
#second
line = openfile.readline()
line_list = line.split(',')
n = len(line_list)
while line:
pone.append(count_one(line_list))
line = openfile.readline()
line_list = line.split(',')
np = len(pone)
#refresh the start
openfile.seek(0,0)
#first line
line = openfile.readline()
head_list = line.split(',')
#second line & initial
line1 = openfile.readline()
line_list1 = line1.split(',')
print('n of sample',len(line_list1))
print('initial end, start loop')
pablist = []
p01list = []
p10list = []
p00list = []
numsample = 2*(n-4)
cldvarlist1 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cldvarlist2 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cldvarlist3 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cldvarlist4 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cldvarlist5 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cldlist1 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cldlist2 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cldlist3 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cldlist4 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
cldlist5 = [0,0,0,0,0,0,0,0,0,0,0,0,0]
i = 0
while line1:
line_list1 = line1.split(',')
currentpos = line_list1[1]
count = count_one(line_list1)
line2 = openfile.readline()
j = i + 1
if j == np:
print('the whole process is over,i is:',i)
#break
while line2:
count2 = count_one(line2.split(','))
cmaf1 = count
cmaf2 = count2
cmaf12 = cmaffun(cmaf1,cmaf2)
dist12 = distance(line1,line2)
#nomalized cld#
pab = cal_pab(line1,line2) #it's p11#
p01 = cal_p01(line1,line2)
p10 = cal_p10(line1,line2)
p00 = cal_p00(line1,line2)
pablist.append(pab)
p01list.append(p01)
p10list.append(p10)
p00list.append(p00)
if cmaf12 == 1:
cmaf1p00[dist12-1] = cmaf1p00[dist12-1] + p00
cmaf1p11[dist12-1] = cmaf1p11[dist12-1] + pab
cmaf1p01[dist12-1] = cmaf1p01[dist12-1] + p01
cmaf1p10[dist12-1] = cmaf1p10[dist12-1] + p10
ncmaf1[dist12-1] = ncmaf1[dist12-1] + 1
nplist = [numsample*pab,numsample*p01,numsample*p10,numsample*p00]
cldlist1[dist12-1] = cldlist1[dist12-1] + cldfunc(nplist,numsample)
cldvarlist1[dist12-1] = cldvarlist1[dist12-1] + cldvar(nplist,numsample)
if cmaf12 == 2:
cmaf2p00[dist12-1] = cmaf2p00[dist12-1] + p00
cmaf2p11[dist12-1] = cmaf2p11[dist12-1] + pab
cmaf2p01[dist12-1] = cmaf2p01[dist12-1] + p01
cmaf2p10[dist12-1] = cmaf2p10[dist12-1] + p10
ncmaf2[dist12-1] = ncmaf2[dist12-1] + 1
nplist = [numsample*pab,numsample*p01,numsample*p10,numsample*p00]
cldlist2[dist12-1] = cldlist2[dist12-1] + cldfunc(nplist,numsample)
cldvarlist2[dist12-1] = cldvarlist2[dist12-1] + cldvar(nplist,numsample)
if cmaf12 == 3:
cmaf3p00[dist12-1] = cmaf3p00[dist12-1] + p00
cmaf3p11[dist12-1] = cmaf3p11[dist12-1] + pab
cmaf3p01[dist12-1] = cmaf3p01[dist12-1] + p01
cmaf3p10[dist12-1] = cmaf3p10[dist12-1] + p10
ncmaf3[dist12-1] = ncmaf3[dist12-1] + 1
nplist = [numsample*pab,numsample*p01,numsample*p10,numsample*p00]
cldlist3[dist12-1] = cldlist3[dist12-1] + cldfunc(nplist,numsample)
cldvarlist3[dist12-1] = cldvarlist3[dist12-1] + cldvar(nplist,numsample)
if cmaf12 == 4:
cmaf4p00[dist12-1] = cmaf4p00[dist12-1] + p00
cmaf4p11[dist12-1] = cmaf4p11[dist12-1] + pab
cmaf4p01[dist12-1] = cmaf4p01[dist12-1] + p01
cmaf4p10[dist12-1] = cmaf4p10[dist12-1] + p10
ncmaf4[dist12-1] = ncmaf4[dist12-1] + 1
nplist = [numsample*pab,numsample*p01,numsample*p10,numsample*p00]
cldlist4[dist12-1] = cldlist4[dist12-1] + cldfunc(nplist,numsample)
cldvarlist4[dist12-1] = cldvarlist4[dist12-1] + cldvar(nplist,numsample)
if cmaf12 == 5:
cmaf5p00[dist12-1] = cmaf5p00[dist12-1] + p00
cmaf5p11[dist12-1] = cmaf5p11[dist12-1] + pab
cmaf5p01[dist12-1] = cmaf5p01[dist12-1] + p01
cmaf5p10[dist12-1] = cmaf5p10[dist12-1] + p10
ncmaf5[dist12-1] = ncmaf5[dist12-1] + 1
nplist = [numsample*pab,numsample*p01,numsample*p10,numsample*p00]
cldlist5[dist12-1] = cldlist5[dist12-1] + cldfunc(nplist,numsample)
cldvarlist5[dist12-1] = cldvarlist5[dist12-1] + cldvar(nplist,numsample)
line2 = openfile.readline()
j = j + 1
if j == np:
print('this line is over,i is:',i)
break
print('cld line writed')
find_pos(openfile,currentpos)
line1 = openfile.readline()
i = i + 1
for i in range(0,13):
if ncmaf1[i] == 0:
ncmaf1[i] = 1
if ncmaf2[i] == 0:
ncmaf2[i] = 1
if ncmaf3[i] == 0:
ncmaf3[i] = 1
if ncmaf4[i] == 0:
ncmaf4[i] = 1
if ncmaf5[i] == 0:
ncmaf5[i] = 1
print('length of pab is:',len(pablist))
print('length of p01 is:',len(p01list))
print('length of p10 is:',len(p10list))
print('length of p00 is:',len(p00list))
print('length of cmaf1 is:',ncmaf1)
print('length of cmaf2 is:',ncmaf2)
print('length of cmaf3 is:',ncmaf3)
print('length of cmaf4 is:',ncmaf4)
print('length of cmaf5 is:',ncmaf5)
print('mean of pab is:',numpy.mean(pablist))
print('mean of p01 is:',numpy.mean(p01list))
print('mean of p10 is:',numpy.mean(p10list))
print('mean of p00 is:',numpy.mean(p00list))
print('mean of cmaf1p11 is:',numpy.divide(numpy.array(cmaf1p11),numpy.array(ncmaf1)))
print('mean of cmaf1p01 is:',numpy.divide(numpy.array(cmaf1p01),numpy.array(ncmaf1)))
print('mean of cmaf1p10 is:',numpy.divide(numpy.array(cmaf1p10),numpy.array(ncmaf1)))
print('mean of cmaf1p00 is:',numpy.divide(numpy.array(cmaf1p00),numpy.array(ncmaf1)))
print('mean of cmaf2p11 is:',numpy.divide(numpy.array(cmaf2p11),numpy.array(ncmaf2)))
print('mean of cmaf2p01 is:',numpy.divide(numpy.array(cmaf2p01),numpy.array(ncmaf2)))
print('mean of cmaf2p10 is:',numpy.divide(numpy.array(cmaf2p10),numpy.array(ncmaf2)))
print('mean of cmaf2p00 is:',numpy.divide(numpy.array(cmaf2p00),numpy.array(ncmaf2)))
print('mean of cmaf3p11 is:',numpy.divide(numpy.array(cmaf3p11),numpy.array(ncmaf3)))
print('mean of cmaf3p01 is:',numpy.divide(numpy.array(cmaf3p01),numpy.array(ncmaf3)))
print('mean of cmaf3p10 is:',numpy.divide(numpy.array(cmaf3p10),numpy.array(ncmaf3)))
print('mean of cmaf3p00 is:',numpy.divide(numpy.array(cmaf3p00),numpy.array(ncmaf3)))
print('mean of cmaf4p11 is:',numpy.divide(numpy.array(cmaf4p11),numpy.array(ncmaf4)))
print('mean of cmaf4p01 is:',numpy.divide(numpy.array(cmaf4p01),numpy.array(ncmaf4)))
print('mean of cmaf4p10 is:',numpy.divide(numpy.array(cmaf4p10),numpy.array(ncmaf4)))
print('mean of cmaf4p00 is:',numpy.divide(numpy.array(cmaf4p00),numpy.array(ncmaf4)))
print('mean of cmaf5p11 is:',numpy.divide(numpy.array(cmaf5p11),numpy.array(ncmaf5)))
print('mean of cmaf5p01 is:',numpy.divide(numpy.array(cmaf5p01),numpy.array(ncmaf5)))
print('mean of cmaf5p10 is:',numpy.divide(numpy.array(cmaf5p10),numpy.array(ncmaf5)))
print('mean of cmaf5p00 is:',numpy.divide(numpy.array(cmaf5p00),numpy.array(ncmaf5)))
print('cmaf1 var group', numpy.divide(numpy.array(cldvarlist1),numpy.array(ncmaf1)))
print('cmaf2 var group', numpy.divide(numpy.array(cldvarlist2),numpy.array(ncmaf2)))
print('cmaf3 var group', numpy.divide(numpy.array(cldvarlist3),numpy.array(ncmaf3)))
print('cmaf4 var group', numpy.divide(numpy.array(cldvarlist4),numpy.array(ncmaf4)))
print('cmaf5 var group', numpy.divide(
|
numpy.array(cldvarlist5)
|
numpy.array
|
# create array
import numpy as np
from numpy import array
from numpy import empty
from numpy import zeros
from numpy import ones
from numpy import array
from numpy import vstack
from numpy import hstack
print("Create array")
# create array
l = [[1.0, 2.0, 3.0],[4.0, 1.5, 3.3]]
a = array(l)
# display array
print(a)
# display array shape
print(a.shape)
# display array data type
print(a.dtype)
print("-------------------------------------------------------------------")
print("Create array with empty")
a = empty([3,3])
print(a)
print("-------------------------------------------------------------------")
print("Create array with zeros")
a = zeros([3,5])
print(a)
print("-------------------------------------------------------------------")
print("Create array with ones")
a = ones([3,5])
print(a)
print("-------------------------------------------------------------------")
print("Create array with vstack")
# create array with vstack
# create first array
a1 = array([1,2,3])
print(a1)
# create second array
a2 = array([4,5,6])
print(a2)
# vertical stack
a3 = vstack((a1, a2))
print(a3)
print(a3.shape)
print("-------------------------------------------------------------------")
print("Create array with hstack")
# create first array
a1 = array([1,2,3])
print(a1)
# create second array
a2 = array([4,5,6])
print(a2)
# create horizontal stack
a3 = hstack((a1, a2))
print(a3)
print(a3.shape)
print("-------------------------------------------------------------------")
print("Create array with array then concatenate")
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
arr = np.concatenate((arr1, arr2))
print(arr)
print("-------------------------------------------------------------------")
print("Create array with random")
x = np.random.randint(100, size=(3, 5))
print(x)
print("-------------------------------------------------------------------")
print("Create array")
# create one-dimensional array
from numpy import array
# list of data
data = [11, 22, 33, 44, 55]
# array of data
data = array(data)
print(data)
print(type(data))
print("-------------------------------------------------------------------")
# create two-dimensional array
from numpy import array
# list of data
data = [[11, 22],
[33, 44],
[55, 66]]
print("Type for data before numpy conversion")
print(type(data))
# array of data
data =
|
array(data)
|
numpy.array
|
#!/usr/bin/env python3
import numpy as np
from astropy.coordinates import EarthLocation, SkyCoord, AltAz
from astropy import units as u
from astropy.time import Time
#import matplotlib as mpl
#mpl.rc("text", usetex=True)
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from mwa_pb import primary_beam as pb
from mwa_metadb_utils import get_common_obs_metadata, mwa_alt_az_za
import sys
import argparse
import logging
logger = logging.getLogger(__name__)
def compute_target_position(ra, dec, time):
MWA_LAT = -26.7033
MWA_LON = 116.671
MWA_ELEV = 377.827
MWA_LOCATION = EarthLocation(lat=MWA_LAT * u.deg,
lon=MWA_LON * u.deg,
height=MWA_ELEV * u.m)
coords = SkyCoord(ra, dec, unit=(u.hourangle, u.deg))
return coords.transform_to(AltAz(obstime=time, location=MWA_LOCATION))
def log_normalise(data, vmin, vmax):
""" This is almost directly copied from matplotlib's color.py """
result = np.ma.masked_less_equal(data, 0, copy=False)
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin <= 0:
raise ValueError("values must all be positive")
elif vmin == vmax:
result.fill(0)
else:
mask = np.ma.getmask(result)
result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax), mask=mask)
resdat = result.data
mask = result.mask
if mask is np.ma.nomask:
mask = (resdat <= 0)
else:
mask |= (resdat <= 0)
np.log(resdat, resdat)
resdat -= np.log(vmin)
resdat /= (np.log(vmax) - np.log(vmin))
result = np.ma.array(resdat, mask=mask, copy=False)
return result
def plot_beam(obs, target, cal, freq):
metadata = get_common_obs_metadata(obs)
phi = np.linspace(0,360,3600)
theta = np.linspace(0,90,900)
# make coordinate grid
az, za = np.meshgrid(np.radians(phi), np.radians(theta))
# compute beam and plot
delays = metadata[4] #x and y delays
logger.debug("delays: {0}".format(delays))
logger.debug("freq*1e6: {0}".format(freq*1e6))
logger.debug("za: {0}".format(za))
logger.debug("az: {0}".format(az))
gx, gy = pb.MWA_Tile_analytic(za, az, freq=int(freq*1e6), delays=delays, power=True, zenithnorm=True)
beam = (gx + gy) / 2.0
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111, polar=True, aspect='auto')
# filled contour setup
lower_contour = 7e-3
upper_contour = beam.max()
fill_min = 1e-2 # 1% of zenith power
fill_max = 0.95 * beam.max() # 95% max beam power ( != zenith power)
Z = np.copy(beam)
Z[Z <= fill_min] = 0
Z[Z >= fill_max] = fill_max
cc_levels = np.logspace(
|
np.log10(lower_contour)
|
numpy.log10
|
# -*- coding: UTF-8 -*-
################################################################################
#
# Copyright (c) 2020 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
"""
本文件定义了LAC类,实现其调用分词,词性标注,训练模型的接口。
"""
import os
import shutil
import logging
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.core import PaddleTensor
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import create_paddle_predictor
from . import reader
from . import utils
from . import nets
from .custom import Customization
from ._compat import *
def _get_abs_path(path): return os.path.normpath(
os.path.join(os.getcwd(), os.path.dirname(__file__), path))
DEFAULT_LAC = _get_abs_path('lac_model')
DEFAULT_SEG = _get_abs_path('seg_model')
class LAC(object):
"""docstring for LAC"""
def __init__(self, model_path=None, mode='lac', use_cuda=False):
super(LAC, self).__init__()
utils.check_cuda(use_cuda)
if model_path is None:
model_path = DEFAULT_SEG if mode == 'seg' else DEFAULT_LAC
self.args = utils.DefaultArgs(model_path)
self.args.use_cuda = use_cuda
self.model_path = model_path
config = AnalysisConfig(self.args.init_checkpoint)
config.disable_glog_info()
if use_cuda:
self.place = fluid.CUDAPlace(
int(os.getenv('FLAGS_selected_gpus', '0')))
config.enable_use_gpu(memory_pool_init_size_mb=500,
device_id=int(
os.getenv('FLAGS_selected_gpus', '0')),
)
else:
self.place = fluid.CPUPlace()
# init executor
self.exe = fluid.Executor(self.place)
self.dataset = reader.Dataset(self.args)
self.predictor = create_paddle_predictor(config)
self.custom = None
self.batch = False
self.return_tag = mode != 'seg'
def run(self, texts):
"""执行模型预测过程
Args:
texts: 模型输入的文本,一个Unicode编码的字符串或者
由Unicode编码字符串组成的List
Returns:
返回LAC处理结果
如果mode=='seg', 则只返回分词结果
如果mode=='lac', 则同时返回分词与标签
"""
if isinstance(texts, list) or isinstance(texts, tuple):
self.batch = True
else:
if len(texts.strip()) == 0:
return ([], []) if self.return_tag else []
texts = [texts]
self.batch = False
tensor_words = self.texts2tensor(texts)
crf_decode = self.predictor.run([tensor_words])
result = self.parse_result(texts, crf_decode[0], self.dataset)
if self.return_tag:
return result if self.batch else result[0]
else:
if not self.batch:
return result[0][0]
return [word for word, _ in result]
def parse_result(self, lines, crf_decode, dataset):
"""将模型输出的Tensor转为明文"""
offset_list = crf_decode.lod[0]
crf_decode = crf_decode.data.int64_data()
batch_size = len(offset_list) - 1
batch_out = []
for sent_index in range(batch_size):
begin, end = offset_list[sent_index], offset_list[sent_index + 1]
sent = lines[sent_index]
tags = [dataset.id2label_dict[str(id)]
for id in crf_decode[begin:end]]
if self.custom:
self.custom.parse_customization(sent, tags)
sent_out = []
tags_out = []
for ind, tag in enumerate(tags):
# for the first char
if len(sent_out) == 0 or tag.endswith("B") or tag.endswith("S"):
sent_out.append(sent[ind])
tags_out.append(tag[:-2])
continue
sent_out[-1] += sent[ind]
# 取最后一个tag作为标签
tags_out[-1] = tag[:-2]
batch_out.append([sent_out, tags_out])
return batch_out
def train(self, model_save_dir, train_data, test_data=None):
"""执行模型增量训练
Args:
model_save_dir: 训练结束后模型保存的路径
train_data: 训练数据路径
test_data: 测试数据路径,若为None则不进行测试
"""
self.args.train_data = train_data
self.args.test_data = test_data
logging.info("Start Training!")
test_program, fetch_list = nets.do_train(self.args)
fluid.io.save_inference_model(os.path.join(model_save_dir, 'model'),
['words'],
fetch_list,
self.exe,
main_program=test_program,
)
# 拷贝配置文件
if os.path.exists(os.path.join(model_save_dir, 'conf')):
shutil.rmtree(os.path.join(model_save_dir, 'conf'))
shutil.copytree(os.path.join(self.model_path, 'conf'),
os.path.join(model_save_dir, 'conf'))
self.load_model(model_save_dir)
logging.info("Finish Training!")
def load_model(self, model_dir):
"""装载预训练的模型"""
use_cuda = self.args.use_cuda
self.args = utils.DefaultArgs(model_dir)
self.args.use_cuda = use_cuda
self.dataset = reader.Dataset(self.args)
self.return_tag = self.args.tag_type != 'seg'
self.model_path = model_dir
config = AnalysisConfig(os.path.join(model_dir, 'model'))
config.disable_glog_info()
if self.args.use_cuda:
config.enable_use_gpu(memory_pool_init_size_mb=500,
device_id=int(
os.getenv('FLAGS_selected_gpus', '0')),
)
self.predictor = create_paddle_predictor(config)
def load_customization(self, customization_file):
"""装载用户词典"""
self.custom = Customization()
self.custom.load_customization(customization_file)
def texts2tensor(self, texts):
"""将文本输入转为Paddle输入的Tensor
Args:
texts: 由string组成的list,模型输入的文本
Returns:
Paddle模型输入用的Tensor
"""
lod = [0]
data = []
for i, text in enumerate(texts):
text_inds = self.dataset.word_to_ids(text)
data += text_inds
lod.append(len(text_inds) + lod[i])
data_np =
|
np.array(data, dtype="int64")
|
numpy.array
|
#!/usr/bin/env python
import numpy as np
from olympus.datasets import Dataset
from olympus.utils.data_transformer import DataTransformer
np.random.seed(100691)
data = np.random.uniform(low=0, high=1, size=(3, 2))
def test_train_identity_array():
data_transformer = DataTransformer(transformations="identity")
data_transformer.train(data)
assert np.all(data_transformer._min == np.amin(data, axis=0))
assert np.all(data_transformer._max == np.amax(data, axis=0))
assert np.all(data_transformer._stddev == np.std(data, axis=0))
assert np.all(data_transformer._mean == np.mean(data, axis=0))
def test_train_standardize_array():
data_transformer = DataTransformer(transformations="standardize")
data_transformer.train(data)
transformed = data_transformer.transform(data)
assert np.all(np.abs(
|
np.mean(transformed, axis=0)
|
numpy.mean
|
'''
Contains routines to read and manipulate radar data from the CRS, HIWRAP, and EXRAD
during the IMPACTS experiment.
Copyright <NAME>, Univ. of Washington, 2022.
'''
import xarray as xr
import h5py
import netCDF4
import numpy as np
import scipy
from pyproj import Proj
from datetime import datetime, timedelta
from scipy.ndimage import gaussian_filter, gaussian_filter1d
def despeckle(er2data, sigma=1.):
'''
Mask radar data based on the specified gaussian filter.
Parameters
----------
er2data: radar variable generated by er2read()
sigma: threshold to use in gaussian filter (default: 1)
Output
----------
er2data: copy of radar variable
'''
temp = np.copy(er2data)
temp_mask = gaussian_filter(temp, sigma)
temp_mask = np.ma.masked_invalid(temp_mask)
er2data = np.ma.masked_where(np.ma.getmask(temp_mask), er2data)
return er2data
def ice_atten(crs_object, hiwrap_object, hrrr_hiwrap_object):
'''
Correct W-band reflectivity from atten4uation due to ice scattering.
Uses mean Z_ku-k_w relationship from Fig. 7 in Kulie et al. (2014; https://doi.org/10.1175/JAMC-D-13-066.1).
Parameters
----------
crs_object: CRS dict object from er2read() funtion, optionally resampled from resample() function
hiwrap_object: HIWRAP dict object from er2read() funtion, optionally resampled from resample() function
hrrr_hiwrap_object: xarray dataset object containing HRRR fields interpolated to HIWRAP grid
Execution
----------
crs_object = er2_radar.er2read(crs_filename, **args)
hiwrap_object = er2_radar.er2read(hiwrap_filename, **args)
hrrr_hiwrap_object = xr.open_dataset(hrrr_hiwrap_filename)
'''
# Get start/end times from HIWRAP object (e.g., start/end of entire flight or of leg)
start_time, end_time = [np.datetime_as_string(hiwrap_object['time'][0]), np.datetime_as_string(hiwrap_object['time'][-1])]
# Resample radar data if needed
if len(crs_object['time'])!=len(hiwrap_object['time']):
print('Attempting to resample the CRS data before correction.')
hiwrap_sub, crs_sub = resample(hiwrap_object, start_time, end_time, crs_object=crs_object)
crs_object = crs_sub
hiwrap_object = hiwrap_sub
del crs_sub, hiwrap_sub
# Fit Kulie et al. (2014) relationship to a Z_ku-dependent func
dbz_ku_lin = np.array([0., 2000., 4000., 6000., 8000.]) # mm**6 / m**-3
ks_w_coeff = np.array([0., 7.5, 15.5, 23.75, 31.5]) # db / km
ks_w_func = np.poly1d(np.polyfit(dbz_ku_lin, ks_w_coeff, deg=1)) # slope, intercept coeffs
# Build mask for T > 0C
temp_hrrr = hrrr_hiwrap_object['temperature'].values[:, np.where((hrrr_hiwrap_object['time']>=np.datetime64(start_time)) & (hrrr_hiwrap_object['time']<=np.datetime64(end_time)))[0]]
temp_mask = np.zeros(temp_hrrr.shape, dtype=bool)
temp_inds = np.where(temp_hrrr>=0.)
if len(temp_inds[0])>0:
for beamnum in range(len(temp_inds[0])):
temp_mask[temp_inds[0][beamnum]:, temp_inds[1][beamnum]] = True
# Calculate 2-way PIA for each resampled CRS gate
ks_w = ks_w_func(10.**(hiwrap_object['dbz_Ku']/10.))
ks_w = ks_w * 0.0265 # convert to db/gate
ks_w[np.isnan(ks_w)] = 0.; ks_w[ks_w<0.] = 0.
ks_w = np.ma.array(2. * np.cumsum(ks_w, axis=(0))) # calc the 2-way attenuation
ks_w = np.ma.masked_where(temp_mask, ks_w) # mask values where T > 0C
# Correct W-band reflectivity
crs_object['dbz_W'] = crs_object['dbz_W'] + ks_w
return crs_object
def resample(hiwrap_object, start_time, end_time, crs_object=None, exrad_object=None):
'''
Resample CRS and/or EXRAD nadir beam data to the HIWRAP grid, which is the coarsest of the 3 radars.
Only works on portions of a flight to speed up performance.
INPUTS:
hiwrap_object: HIWRAP object obtained from er2_radar.er2read() method
start_time: Start time in YYYY-mm-ddTHH:MM:SS string format
end_time: End time in YYYY-mm-ddTHH:MM:SS string format
crs_object: CRS object obtained from er2_radar.er2read() method
exrad_object: EXRAD object obtained from er2_radar.er2read() method
OUTPUTS:
hiwrap_resampled: Trimmed HIWRAP object based on the start/end times
crs_resampled [optional]: Trimmed CRS object with the same shape as `hiwrap_resampled`
exrad_resampled [optional]: Trimmed EXRAD object with the same shape as `hiwrap_resampled`
'''
# Trim HIWRAP data for processing
time_inds_hiwrap = np.where((hiwrap_object['time']>=np.datetime64(start_time)) & (hiwrap_object['time']<=np.datetime64(end_time)))[0]
hiwrap_resampled = {}
hiwrap_resampled['time'] = hiwrap_object['time'][time_inds_hiwrap]
hiwrap_resampled['nomdist'] = hiwrap_object['nomdist'][time_inds_hiwrap] - hiwrap_object['nomdist'][time_inds_hiwrap][0] # reset to 0 for period
hiwrap_resampled['time_gate'] = hiwrap_object['time_gate'][:, time_inds_hiwrap]
hiwrap_resampled['alt_gate'] = hiwrap_object['alt_gate'][:, time_inds_hiwrap]
hiwrap_resampled['lon_gate'] = hiwrap_object['lon_gate'][:, time_inds_hiwrap]
hiwrap_resampled['lat_gate'] = hiwrap_object['lat_gate'][:, time_inds_hiwrap]
hiwrap_resampled['dbz_Ka'] = hiwrap_object['dbz_Ka'][:, time_inds_hiwrap]
hiwrap_resampled['ldr_Ka'] = hiwrap_object['ldr_Ka'][:, time_inds_hiwrap]
hiwrap_resampled['vel_Ka'] = hiwrap_object['vel_Ka'][:, time_inds_hiwrap]
hiwrap_resampled['width_Ka'] = hiwrap_object['width_Ka'][:, time_inds_hiwrap]
hiwrap_resampled['dbz_Ku'] = hiwrap_object['dbz_Ku'][:, time_inds_hiwrap]
hiwrap_resampled['ldr_Ku'] = hiwrap_object['ldr_Ku'][:, time_inds_hiwrap]
hiwrap_resampled['vel_Ku'] = hiwrap_object['vel_Ku'][:, time_inds_hiwrap]
hiwrap_resampled['width_Ku'] = hiwrap_object['width_Ku'][:, time_inds_hiwrap]
# Set reference point (currently Albany, NY)
lat_0 = 42.6526
lon_0 = -73.7562
# Define a map projection to calculate cartesian distances
p = Proj(proj='laea', zone=10, ellps='WGS84', lat_0=lat_0, lon_0=lon_0)
# Get HIWRAP cartesian points
lon_hiwrap = hiwrap_object['lon_gate'][0, time_inds_hiwrap] # only need first gate coordinate as the rest in each beam are the same
lat_hiwrap = hiwrap_object['lat_gate'][0, time_inds_hiwrap] # only need first gate coordinate as the rest in each beam are the same
hiwrap_x, hiwrap_y = p(lon_hiwrap, lat_hiwrap)
# Resample CRS data if specified
if crs_object is not None:
time_inds_crs = np.where((crs_object['time']>=np.datetime64(start_time)) & (crs_object['time']<=np.datetime64(end_time)))[0]
hiwrap_x_tile = np.tile(np.reshape(hiwrap_x, (len(hiwrap_x), 1)), (1, len(time_inds_crs)))
hiwrap_y_tile = np.tile(np.reshape(hiwrap_y, (len(hiwrap_y), 1)), (1, len(time_inds_crs)))
# Get CRS cartesian points
crs_lon = crs_object['lon_gate'][0, time_inds_crs]
crs_lat = crs_object['lat_gate'][0, time_inds_crs]
crs_x, crs_y = p(crs_lon, crs_lat)
crs_x_tile = np.tile(np.reshape(crs_x, (1, len(crs_x))), (len(time_inds_hiwrap), 1))
crs_y_tile = np.tile(np.reshape(crs_y, (1, len(crs_y))), (len(time_inds_hiwrap), 1))
# Get CRS beam indices and save some variables to a dictionary
dists = np.sqrt((hiwrap_x_tile - crs_x_tile)**2. + (hiwrap_y_tile-crs_y_tile)**2.)
crs_beam_inds = np.argmin(dists, axis=1)
crs_resampled = {}
crs_resampled['time'] = hiwrap_resampled['time']
crs_resampled['nomdist'] = hiwrap_resampled['nomdist']
# Loop through beams and determine nearest CRS gate to each HIWRAP gate
dbz_w = np.ma.zeros(hiwrap_object['dbz_Ku'][:, time_inds_hiwrap].shape)
ldr_w = np.ma.zeros(hiwrap_object['ldr_Ku'][:, time_inds_hiwrap].shape)
vel_w = np.ma.zeros(hiwrap_object['vel_Ku'][:, time_inds_hiwrap].shape)
width_w = np.ma.zeros(hiwrap_object['width_Ku'][:, time_inds_hiwrap].shape)
for time_ind in range(len(crs_beam_inds)):
alt_beam_hiwrap = hiwrap_object['alt_gate'][:, time_inds_hiwrap[time_ind]]
alt_beam_crs = crs_object['alt_gate'][:, time_inds_crs[crs_beam_inds[time_ind]]]
alt_beam_hiwrap = np.tile(np.reshape(alt_beam_hiwrap, (len(alt_beam_hiwrap), 1)), (1, len(alt_beam_crs)))
alt_beam_crs = np.tile(np.reshape(alt_beam_crs, (1, len(alt_beam_crs))), (alt_beam_hiwrap.shape[0], 1))
crs_gate_inds = np.argmin(np.abs(alt_beam_hiwrap - alt_beam_crs), axis=1)
dbz_w[:, time_ind] = crs_object['dbz_W'][crs_gate_inds, time_inds_crs[crs_beam_inds[time_ind]]]
ldr_w[:, time_ind] = crs_object['ldr_W'][crs_gate_inds, time_inds_crs[crs_beam_inds[time_ind]]]
vel_w[:, time_ind] = crs_object['vel_W'][crs_gate_inds, time_inds_crs[crs_beam_inds[time_ind]]]
width_w[:, time_ind] = crs_object['width_W'][crs_gate_inds, time_inds_crs[crs_beam_inds[time_ind]]]
# Assign variables to dictionary
crs_resampled['time_gate'] = hiwrap_resampled['time_gate']
crs_resampled['alt_gate'] = hiwrap_resampled['alt_gate']
crs_resampled['lon_gate'] = hiwrap_resampled['lon_gate']
crs_resampled['lat_gate'] = hiwrap_resampled['lat_gate']
crs_resampled['dbz_W'] = dbz_w
crs_resampled['ldr_W'] = ldr_w
crs_resampled['vel_W'] = vel_w
crs_resampled['width_W'] = width_w
# Resample EXRAD data if specified
if exrad_object is not None:
time_inds_exrad = np.where((exrad_object['time']>=np.datetime64(start_time)) & (exrad_object['time']<=np.datetime64(end_time)))[0]
hiwrap_x_tile = np.tile(np.reshape(hiwrap_x, (len(hiwrap_x), 1)), (1, len(time_inds_exrad)))
hiwrap_y_tile = np.tile(np.reshape(hiwrap_y, (len(hiwrap_y), 1)), (1, len(time_inds_exrad)))
# Get EXRAD cartesian points
exrad_lon = exrad_object['lon_gate'][0, time_inds_exrad]
exrad_lat = exrad_object['lat_gate'][0, time_inds_exrad]
exrad_x, exrad_y = p(exrad_lon, exrad_lat)
exrad_x_tile = np.tile(np.reshape(exrad_x, (1, len(exrad_x))), (len(time_inds_hiwrap), 1))
exrad_y_tile = np.tile(np.reshape(exrad_y, (1, len(exrad_y))), (len(time_inds_hiwrap), 1))
# Get EXRAD beam indices and save some variables to a dictionary
dists = np.sqrt((hiwrap_x_tile - exrad_x_tile)**2. + (hiwrap_y_tile-exrad_y_tile)**2.)
exrad_beam_inds = np.argmin(dists, axis=1)
exrad_resampled = {}
exrad_resampled['time'] = hiwrap_resampled['time']
exrad_resampled['nomdist'] = hiwrap_resampled['nomdist']
# Loop through beams and determine nearest EXRAD gate to each HIWRAP gate
dbz_x = np.ma.zeros(hiwrap_object['dbz_Ku'][:, time_inds_hiwrap].shape)
vel_x = np.ma.zeros(hiwrap_object['vel_Ku'][:, time_inds_hiwrap].shape)
width_x = np.ma.zeros(hiwrap_object['width_Ku'][:, time_inds_hiwrap].shape)
for time_ind in range(len(exrad_beam_inds)):
alt_beam_hiwrap = hiwrap_object['alt_gate'][:, time_inds_hiwrap[time_ind]]
alt_beam_exrad = exrad_object['alt_gate'][:, time_inds_exrad[exrad_beam_inds[time_ind]]]
alt_beam_hiwrap = np.tile(np.reshape(alt_beam_hiwrap, (len(alt_beam_hiwrap), 1)), (1, len(alt_beam_exrad)))
alt_beam_exrad = np.tile(np.reshape(alt_beam_exrad, (1, len(alt_beam_exrad))), (alt_beam_hiwrap.shape[0], 1))
exrad_gate_inds = np.argmin(np.abs(alt_beam_hiwrap - alt_beam_exrad), axis=1)
dbz_x[:, time_ind] = exrad_object['dbz_X'][exrad_gate_inds, time_inds_exrad[exrad_beam_inds[time_ind]]]
vel_x[:, time_ind] = exrad_object['vel_X'][exrad_gate_inds, time_inds_exrad[exrad_beam_inds[time_ind]]]
width_x[:, time_ind] = exrad_object['width_X'][exrad_gate_inds, time_inds_exrad[exrad_beam_inds[time_ind]]]
# Assign variables to dictionary
exrad_resampled['time_gate'] = hiwrap_resampled['time_gate']
exrad_resampled['alt_gate'] = hiwrap_resampled['alt_gate']
exrad_resampled['lon_gate'] = hiwrap_resampled['lon_gate']
exrad_resampled['lat_gate'] = hiwrap_resampled['lat_gate']
exrad_resampled['dbz_X'] = dbz_x
exrad_resampled['vel_X'] = vel_x
exrad_resampled['width_X'] = width_x
# Save out dictionaries
if crs_object is None: # save out HIWRAP and EXRAD data
return hiwrap_resampled, exrad_resampled
if exrad_object is None: # save out HIWRAP and EXRAD data
return hiwrap_resampled, crs_resampled
else: # save out all radar data
return hiwrap_resampled, crs_resampled, exrad_resampled
def er2read(er2file, beam='nadir', atten_file=None, max_roll=None, dbz_sigma=None, ldr_sigma=None, vel_sigma=None, width_sigma=None,
dbz_min=None, ldr_min=None, vel_min=None, width_min=None):
'''
Parses ER-2 radar data and performs QC as requested.
INPUTS:
er2file: Path to the ER-2 radar dataset
beam: 'nadir' or 'scanning' (currently only supports nadir beam data)
atten_file: None or path to file containing gridded attenuation due to atmospheric gases
max_roll: None or float value where data masked above threshold [deg]
dbz_sigma: None or float value where data masked above threshold using a Gaussian filter
ldr_sigma: None or float value where data masked above threshold using a Gaussian filter
vel_sigma: None or float value where data masked above threshold using a Gaussian filter
width_sigma: None or float value where data masked above threshold using a Gaussian filter
dbz_min: None or float value where data masked below threshold [dBZ]
ldr_min: None or float value where data masked below threshold [dB]
vel_min: None or float value where data masked below threshold [m/s]
width_min: None or float value where data masked below threshold [m/s]
OUTPUTS:
er2rad: Dictionary object with select navigation and radar variables
'''
er2rad = {}
hdf = h5py.File(er2file, 'r')
radname = hdf['Information']['RadarName'][0].decode('UTF-8')
# Aircraft nav information
alt_plane = hdf['Navigation']['Data']['Height'][:]
lat = hdf['Navigation']['Data']['Latitude'][:]
lon = hdf['Navigation']['Data']['Longitude'][:]
heading = hdf['Navigation']['Data']['Heading'][:] # deg from north (==0 for northward, ==90 for eastward, ==-90 for westward)
roll = hdf['Navigation']['Data']['Roll'][:]
pitch = hdf['Navigation']['Data']['Pitch'][:]
drift = hdf['Navigation']['Data']['Drift'][:]
nomdist = hdf['Navigation']['Data']['NominalDistance'][:]
# Time information
time_raw = hdf['Time']['Data']['TimeUTC'][:]
time_dt = [datetime(1970, 1, 1)+timedelta(seconds=time_raw[i]) for i in range(len(time_raw))] # Python datetime object
time_dt64 = np.array(time_dt, dtype='datetime64[ms]') # Numpy datetime64 object (e.g., for plotting)
# Radar information
rg = hdf['Products']['Information']['Range'][:]
if radname=='CRS':
radar_dbz = hdf['Products']['Data']['dBZe'][:].T
radar_ldr = hdf['Products']['Data']['LDR'][:].T
radar_vel = hdf['Products']['Data']['Velocity_corrected'][:].T
radar_width = hdf['Products']['Data']['SpectrumWidth'][:].T
if atten_file is not None: # Correct for 2-way path integrated attenuation
print('Correcting for attenuation at W-band due to atmospheric gases and LWC.')
atten_data = xr.open_dataset(atten_file)
radar_dbz = radar_dbz + atten_data['k_w'].values + atten_data['k_w_liquid'].values
elif radname=='HIWRAP':
radar_dbz = hdf['Products']['Ku']['Combined']['Data']['dBZe'][:].T
radar_ldr = hdf['Products']['Ku']['Combined']['Data']['LDR'][:].T
radar_vel = hdf['Products']['Ku']['Combined']['Data']['Velocity_corrected'][:].T
radar_width = hdf['Products']['Ku']['Combined']['Data']['SpectrumWidth'][:].T
radar2_dbz = hdf['Products']['Ka']['Combined']['Data']['dBZe'][:].T
radar2_ldr = hdf['Products']['Ka']['Combined']['Data']['LDR'][:].T
radar2_vel = hdf['Products']['Ka']['Combined']['Data']['Velocity_corrected'][:].T
radar2_width = hdf['Products']['Ka']['Combined']['Data']['SpectrumWidth'][:].T
if atten_file is not None: # Correct for 2-way path integrated attenuation
print('Correcting for attenuation at Ka- and Ku-band due to atmospheric gases.')
atten_data = xr.open_dataset(atten_file)
radar_dbz = radar_dbz + atten_data['k_ku'].values
radar2_dbz = radar2_dbz + atten_data['k_ka'].values
elif radname=='EXRAD':
radar_dbz = hdf['Products']['Data']['dBZe'][:].T
radar_ldr = -999. * np.ones(radar_dbz.shape) # dummy values as variable does not exist
if 'Velocity_corrected' in list(hdf['Products']['Data'].keys()):
radar_vel = hdf['Products']['Data']['Velocity_corrected'][:].T # for NUBF correction
else:
radar_vel = hdf['Products']['Data']['Velocity'][:].T
radar_width = np.ma.masked_invalid(hdf['Products']['Data']['SpectrumWidth'][:].T)
if atten_file is not None: # Correct for 2-way path integrated attenuation
print('Correcting for attenuation at X-band due to atmospheric gases.')
atten_data = xr.open_dataset(atten_file)
radar_dbz = radar_dbz + atten_data['k_x'].values
else:
print('Error: Unsupported radar')
# Make some 1D variables 2D
time2d = np.tile(time_dt64[np.newaxis, :], (len(rg), 1))
[alt2d_plane, rg2d] = np.meshgrid(alt_plane, rg)
alt_gate = alt2d_plane - rg2d # compute the altitude of each gate
lat2d = np.tile(lat[np.newaxis, :], (len(rg), 1))
lon2d = np.tile(lon[np.newaxis, :], (len(rg), 1))
roll2d = np.tile(roll[np.newaxis, :], (len(rg), 1))
# === QC data if user specifies it ===
# Remove if aircraft roll exceeds 10 deg
if max_roll is not None:
radar_dbz = np.ma.masked_where(np.abs(roll2d) > max_roll, radar_dbz)
radar_ldr = np.ma.masked_where(np.abs(roll2d) > max_roll, radar_ldr)
radar_vel = np.ma.masked_where(np.abs(roll2d) > max_roll, radar_vel)
radar_width = np.ma.masked_where(np.abs(roll2d) > max_roll, radar_width)
if radname=='HIWRAP': # Ka-band
radar2_dbz = np.ma.masked_where(np.abs(roll2d) > max_roll, radar2_dbz)
radar2_ldr = np.ma.masked_where(np.abs(roll2d) > max_roll, radar2_ldr)
radar2_vel = np.ma.masked_where(np.abs(roll2d) > max_roll, radar2_vel)
radar2_width = np.ma.masked_where(
|
np.abs(roll2d)
|
numpy.abs
|
#!/usr/bin/env python3
import numpy as np
import coremltools as ct
from coremltools.models.neural_network import datatypes, NeuralNetworkBuilder
# KxK GEMM with bias
K = 64
input_features = [('image', datatypes.Array(K))]
output_features = [('probs', datatypes.Array(K))]
weights = np.zeros((K, K)) + 3
bias =
|
np.ones(K)
|
numpy.ones
|
import collections
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def visualize_boxes(image, boxes, labels, probs, class_labels):
category_index = {}
for id_, label_name in enumerate(class_labels):
category_index[id_] = {"name": label_name}
image=visualize_boxes_and_labels_on_image_array(image, boxes, labels, probs, category_index)
return image
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
sorted_ind = np.argsort(-scores)
boxes=boxes[sorted_ind]
scores=scores[sorted_ind]
classes=classes[sorted_ind]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
for box, color in box_to_color_map.items():
xmin, ymin, xmax, ymax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(image, ymin, xmin, ymax, xmax, color=color,
thickness=line_thickness, display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
return image
def draw_bounding_box_on_image_array(image, ymin, xmin, ymax, xmax, color='red',
thickness=4, display_str_list=(), use_normalized_coordinates=True):
image_pil = Image.fromarray(
|
np.uint8(image)
|
numpy.uint8
|
import numpy as np
from gtfblib import Chen
import matplotlib.pyplot as plt
from scipy.io import wavfile
import pickle
from IFModel import generate_impulse_train_multi_channels
DEBUG = False
folder = './data/full/'
# train_x_raw = np.load(folder + 'x_small.npy', allow_pickle=True)
train_x_raw = np.load(folder + 'train_x.npy', allow_pickle=True)
test_x_raw = np.load(folder + 'test_x.npy', allow_pickle=True)
train_x_normalize = []
test_x_normalize = []
for x in train_x_raw:
train_x_normalize.append(x.astype(np.float64) / np.max(x))
for x in test_x_raw:
test_x_normalize.append(x.astype(np.float64) / np.max(x))
fil = Chen(fs=8000)
train_x_gtf = []
test_x_gtf = []
num = 0
for x in train_x_normalize:
a = fil.process(x).real
a = a / np.max(abs(a), axis=1)[:, np.newaxis] * 4
train_x_gtf.append(a)
num += 1
for x in test_x_normalize:
a = fil.process(x).real
a = a / np.max(a, axis=1)[:, np.newaxis] * 4
test_x_gtf.append(a)
train_impulse_trains = []
test_impulse_trains = []
alpha = 0.05
theta = 1000
freq =
|
np.array([99, 192, 341, 501, 685, 908, 1134, 1473, 1821, 2200, 2720, 3326, 4075, 4990, 5977, 7283])
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Ocean Systems Laboratory, Heriot-Watt University, UK.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Heriot-Watt University nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, with_statement
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import rospy
import roslib
roslib.load_manifest('bvt_pantilt')
from sensor_msgs.msg import JointState
from bvt_pantilt.msg import PanTiltOrientation
# topics
TOPIC_ORIENT = 'pantilt/orientation'
TOPIC_REQUEST = 'pantilt/orientation_request'
TOPIC_JOINT = '/joint_states'
# constants
INIT_ROLL = 0.0 # rad
INIT_POSE = [0.0, 0.0] # rad, rad
INIT_DELTA = 0.10 # sec
DEFAULT_AZ_MIN = -179.0 # deg
DEFAULT_AZ_MAX = 179.0 # deg
DEFAULT_EL_MIN = -30.0 # deg
DEFAULT_EL_MAX = 70.0 # deg
DEFAULT_AZ_SPD = 2.0 # deg/s
DEFAULT_EL_SPD = 2.0 # deg/s
def wrapTo2Pi(theta):
"""Normalize an angle in radians to [0, 2*pi]"""
return theta % (2.0 * np.pi)
def wrapToPi(theta):
"""Normalize an angle in radians to [-pi, pi]"""
return (wrapTo2Pi(theta + np.pi) - np.pi)
class PanTiltSimulator(object):
def __init__(self, name):
self.node_name = name
# initial state
self.roll = rospy.get_param('pantilt_roll', INIT_ROLL)
self.time_delta = rospy.get_param('pantilt_delta', INIT_DELTA)
self.current_pose = rospy.get_param('pantilt_pose', INIT_POSE)
self.goal_pose = self.current_pose
# current config (rads)
self.azimuth_min_ang = np.deg2rad(rospy.get_param('pantilt_az_min', DEFAULT_AZ_MIN))
self.azimuth_max_ang = np.deg2rad(rospy.get_param('pantilt_az_max', DEFAULT_AZ_MAX))
self.elevation_min_ang = np.deg2rad(rospy.get_param('pantilt_el_min', DEFAULT_EL_MIN))
self.elevation_max_ang = np.deg2rad(rospy.get_param('pantilt_el_max', DEFAULT_EL_MAX))
self.azimuth_speed = rospy.get_param('pantilt_az_spd', DEFAULT_AZ_SPD)
self.elevation_speed = rospy.get_param('pantilt_el_spd', DEFAULT_EL_SPD)
# ros interface
self.joint = rospy.Publisher(TOPIC_JOINT, JointState, queue_size=10)
self.sub = rospy.Subscriber(TOPIC_REQUEST, PanTiltOrientation, self.pantilt_callback, queue_size=1)
self.pub = rospy.Publisher(TOPIC_ORIENT, PanTiltOrientation, latch=True, queue_size=10)
self.tim = rospy.Timer(rospy.Duration(self.time_delta), self.pantilt_executor)
def pantilt_executor(self, event=None):
if self.goal_pose != self.current_pose:
# azimuth
if self.goal_pose[0] > self.current_pose[0]:
self.current_pose[0] += np.deg2rad(self.azimuth_speed * self.time_delta)
self.current_pose[0] = np.min(self.current_pose[0], self.goal_pose[0])
else:
self.current_pose[0] -= np.deg2rad(self.azimuth_speed * self.time_delta)
self.current_pose[0] = np.max(self.current_pose[0], self.goal_pose[0])
# elevation
if self.goal_pose[1] > self.current_pose[1]:
self.current_pose[1] += np.deg2rad(self.elevation_speed * self.time_delta)
self.current_pose[1] = np.min(self.current_pose[1], self.goal_pose[1])
else:
self.current_pose[1] -= np.deg2rad(self.elevation_speed * self.time_delta)
self.current_pose[1] =
|
np.max(self.current_pose[1], self.goal_pose[1])
|
numpy.max
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
loss function for training and sample function for testing
"""
import numpy as np
import mindspore as ms
from mindspore import Tensor
import mindspore.nn as nn
import mindspore.ops as P
from mindspore import context
class log_sum_exp(nn.Cell):
"""Numerically stable log_sum_exp
"""
def __init__(self):
super(log_sum_exp, self).__init__()
self.maxi = P.ReduceMax()
self.maxi_dim = P.ReduceMax(keep_dims=True)
self.log = P.Log()
self.sums = P.ReduceSum()
self.exp = P.Exp()
def construct(self, x):
axis = len(x.shape) - 1
m = self.maxi(x, axis)
m2 = self.maxi_dim(x, axis)
return m + self.log(self.sums(self.exp(x - m2), axis))
class log_softmax(nn.Cell):
"""
replacement of P.LogSoftmax(-1) in CPU mode
only support x.shape == 2 or 3
"""
def __init__(self):
super(log_softmax, self).__init__()
self.maxi = P.ReduceMax()
self.log = P.Log()
self.sums = P.ReduceSum()
self.exp = P.Exp()
self.axis = -1
self.concat = P.Concat(-1)
self.expanddims = P.ExpandDims()
def construct(self, x):
"""
Args:
x (Tensor): input
Returns:
Tensor: log_softmax of input
"""
c = self.maxi(x, self.axis)
logs, lsm = None, None
if len(x.shape) == 2:
for j in range(x.shape[-1]):
temp = self.expanddims(self.exp(x[:, j] - c), -1)
logs = temp if j == 0 else self.concat((logs, temp))
sums = self.sums(logs, -1)
for i in range(x.shape[-1]):
temp = self.expanddims(x[:, i] - c - self.log(sums), -1)
lsm = temp if i == 0 else self.concat((lsm, temp))
return lsm
if len(x.shape) == 3:
for j in range(x.shape[-1]):
temp = self.expanddims(self.exp(x[:, :, j] - c), -1)
logs = temp if j == 0 else self.concat((logs, temp))
sums = self.sums(logs, -1)
for i in range(x.shape[-1]):
temp = self.expanddims(x[:, :, i] - c - self.log(sums), -1)
lsm = temp if i == 0 else self.concat((lsm, temp))
return lsm
return None
class Stable_softplus(nn.Cell):
"""Numerically stable softplus
"""
def __init__(self):
super(Stable_softplus, self).__init__()
self.log_op = P.Log()
self.abs_op = P.Abs()
self.relu_op = P.ReLU()
self.exp_op = P.Exp()
def construct(self, x):
return self.log_op(1 + self.exp_op(- self.abs_op(x))) + self.relu_op(x)
class discretized_mix_logistic_loss(nn.Cell):
"""
Discretized_mix_logistic_loss
Args:
num_classes (int): Num_classes
log_scale_min (float): Log scale minimum value
"""
def __init__(self, num_classes=256, log_scale_min=-7.0, reduce=True):
super(discretized_mix_logistic_loss, self).__init__()
self.num_classes = num_classes
self.log_scale_min = log_scale_min
self.reduce = reduce
self.transpose_op = P.Transpose()
self.exp = P.Exp()
self.sigmoid = P.Sigmoid()
self.softplus = Stable_softplus()
self.log = P.Log()
self.cast = P.Cast()
self.expand_dims = P.ExpandDims()
self.tile = P.Tile()
self.maximum = P.Maximum()
self.sums = P.ReduceSum()
self.lse = log_sum_exp()
self.reshape = P.Reshape()
self.factor = self.log(Tensor((self.num_classes - 1) / 2, ms.float32))
self.tensor_one = Tensor(1., ms.float32)
if context.get_context("device_target") == "CPU":
self.logsoftmax = log_softmax()
else:
self.logsoftmax = P.LogSoftmax(-1)
def construct(self, y_hat, y):
"""
Args:
y_hat (Tensor): Predicted distribution
y (Tensor): Target
Returns:
Tensor: Discretized_mix_logistic_loss
"""
nr_mix = y_hat.shape[1] // 3
# (B x T x C)
y_hat = self.transpose_op(y_hat, (0, 2, 1))
# (B, T, num_mixtures) x 3
logit_probs = y_hat[:, :, :nr_mix]
means = y_hat[:, :, nr_mix:2 * nr_mix]
min_cut = self.log_scale_min * self.tile(self.tensor_one, (y_hat.shape[0], y_hat.shape[1], nr_mix))
log_scales = self.maximum(y_hat[:, :, 2 * nr_mix:3 * nr_mix], min_cut)
# B x T x 1 -> B x T x num_mixtures
y = self.tile(y, (1, 1, nr_mix))
centered_y = y - means
inv_stdv = self.exp(-log_scales)
plus_in = inv_stdv * (centered_y + 1. / (self.num_classes - 1))
cdf_plus = self.sigmoid(plus_in)
min_in = inv_stdv * (centered_y - 1. / (self.num_classes - 1))
cdf_min = self.sigmoid(min_in)
log_cdf_plus = plus_in - self.softplus(plus_in)
log_one_minus_cdf_min = -self.softplus(min_in)
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_y
log_pdf_mid = mid_in - log_scales - 2. * self.softplus(mid_in)
inner_inner_cond = self.cast(cdf_delta > 1e-5, ms.float32)
min_cut2 = 1e-12 * self.tile(self.tensor_one, cdf_delta.shape)
inner_inner_out = inner_inner_cond * \
self.log(self.maximum(cdf_delta, min_cut2)) + \
(1. - inner_inner_cond) * (log_pdf_mid - self.factor)
inner_cond = self.cast(y > 0.999, ms.float32)
inner_out = inner_cond * log_one_minus_cdf_min + (1. - inner_cond) * inner_inner_out
cond = self.cast(y < -0.999, ms.float32)
log_probs = cond * log_cdf_plus + (1. - cond) * inner_out
a, b, c = logit_probs.shape[0], logit_probs.shape[1], logit_probs.shape[2]
logit_probs = self.logsoftmax(self.reshape(logit_probs, (-1, c)))
logit_probs = self.reshape(logit_probs, (a, b, c))
log_probs = log_probs + logit_probs
if self.reduce:
return -self.sums(self.lse(log_probs))
return self.expand_dims(-self.lse(log_probs), -1)
def sample_from_discretized_mix_logistic(y, log_scale_min=-7.0):
"""
Sample from discretized mixture of logistic distributions
Args:
y (ndarray): B x C x T
log_scale_min (float): Log scale minimum value
Returns:
ndarray
"""
nr_mix = y.shape[1] // 3
# B x T x C
y = np.transpose(y, (0, 2, 1))
logit_probs = y[:, :, :nr_mix]
temp = np.random.uniform(1e-5, 1.0 - 1e-5, logit_probs.shape)
temp = logit_probs - np.log(- np.log(temp))
argmax = np.argmax(temp, axis=-1)
# (B, T) -> (B, T, nr_mix)
one_hot = np.eye(nr_mix)[argmax]
means = np.sum(y[:, :, nr_mix:2 * nr_mix] * one_hot, axis=-1)
log_scales = np.clip(np.sum(
y[:, :, 2 * nr_mix:3 * nr_mix] * one_hot, axis=-1), a_min=log_scale_min, a_max=None)
u = np.random.uniform(1e-5, 1.0 - 1e-5, means.shape)
x = means + np.exp(log_scales) * (np.log(u) - np.log(1. - u))
x = np.clip(x, -1., 1.)
return x.astype(np.float32)
class mix_gaussian_loss(nn.Cell):
"""
Mix gaussian loss
"""
def __init__(self, log_scale_min=-7.0, reduce=True):
super(mix_gaussian_loss, self).__init__()
self.log_scale_min = log_scale_min
self.reduce = reduce
self.transpose_op = P.Transpose()
self.maximum = P.Maximum()
self.tile = P.Tile()
self.exp = P.Exp()
self.expand_dims = P.ExpandDims()
self.sums = P.ReduceSum()
self.lse = log_sum_exp()
self.sq = P.Square()
self.sqrt = P.Sqrt()
self.const = P.ScalarToArray()
self.log = P.Log()
self.tensor_one = Tensor(1., ms.float32)
if context.get_context("device_target") == "CPU":
self.logsoftmax = log_softmax()
else:
self.logsoftmax = P.LogSoftmax(-1)
def construct(self, y_hat, y):
"""
Args:
y_hat (Tensor): Predicted probability
y (Tensor): Target
Returns:
Tensor: Mix_gaussian_loss
"""
C = y_hat.shape[1]
if C == 2:
nr_mix = 1
else:
nr_mix = y_hat.shape[1] // 3
# (B x T x C)
y_hat = self.transpose_op(y_hat, (0, 2, 1))
if C == 2:
logit_probs = None
means = y_hat[:, :, 0:1]
min_cut = self.log_scale_min * self.tile(self.tensor_one, (y_hat.shape[0], y_hat.shape[1], 1))
log_scales = self.maximum(y_hat[:, :, 1:2], min_cut)
else:
# (B, T, num_mixtures) x 3
logit_probs = y_hat[:, :, :nr_mix]
means = y_hat[:, :, nr_mix:2 * nr_mix]
min_cut = self.log_scale_min * self.tile(self.tensor_one, (y_hat.shape[0], y_hat.shape[1], nr_mix))
log_scales = self.maximum(y_hat[:, :, 2 * nr_mix:3 * nr_mix], min_cut)
# B x T x 1 -> B x T x num_mixtures
y = self.tile(y, (1, 1, nr_mix))
centered_y = y - means
sd = self.exp(log_scales)
unnormalized_log_prob = -1. * (self.sq(centered_y - 0.)) / (2. * self.sq(sd))
neg_normalization = -1. * self.log(self.const(2. * np.pi)) / 2. - self.log(sd)
log_probs = unnormalized_log_prob + neg_normalization
if nr_mix > 1:
log_probs = log_probs + self.logsoftmax(logit_probs)
if self.reduce:
if nr_mix == 1:
return -self.sums(log_probs)
return -self.sums(self.lse(log_probs))
if nr_mix == 1:
return -log_probs
return self.expand_dims(-self.lse(log_probs), -1)
def sample_from_mix_gaussian(y, log_scale_min=-7.0):
"""
Sample_from_mix_gaussian
Args:
y (ndarray): B x C x T
Returns:
ndarray
"""
C = y.shape[1]
if C == 2:
nr_mix = 1
else:
nr_mix = y.shape[1] // 3
# B x T x C
y = np.transpose(y, (0, 2, 1))
if C == 2:
logit_probs = None
else:
logit_probs = y[:, :, :nr_mix]
if nr_mix > 1:
temp = np.random.uniform(1e-5, 1.0 - 1e-5, logit_probs.shape)
temp = logit_probs - np.log(- np.log(temp))
argmax = np.argmax(temp, axis=-1)
# (B, T) -> (B, T, nr_mix)
one_hot =
|
np.eye(nr_mix)
|
numpy.eye
|
# Copyright (c) 2015.
# <NAME> <bytefish[at]gmx[dot]de> and
# <NAME> <flier[at]techfak.uni-bielefeld.de> and
# <NAME> <nkoester[at]techfak.uni-bielefeld.de>
#
#
# Released to public domain under terms of the BSD Simplified license.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the organization nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# See <http://www.opensource.org/licenses/bsd-license>
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
class AbstractDistance(object):
def __init__(self, name):
self._name = name
def __call__(self, p, q):
raise NotImplementedError("Every AbstractDistance must implement the __call__ method.")
@property
def name(self):
return self._name
def __repr__(self):
return self._name
class EuclideanDistance(AbstractDistance):
def __init__(self):
AbstractDistance.__init__(self, "EuclideanDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q =
|
np.asarray(q)
|
numpy.asarray
|
import math
from collections import defaultdict
import cv2
import time
import argparse
import torch
from sklearn import preprocessing
import numpy as np
import posenet
import numpy as np
from scipy.spatial.distance import cosine, cdist
from fastdtw import fastdtw
import re
import numpy as np
EPS=0.000001
PART_NAMES = [
"leftShoulder", "rightShoulder", "leftElbow", "rightElbow", "leftWrist", "rightWrist",
"leftHip", "rightHip", "leftKnee", "rightKnee", "leftAnkle", "rightAnkle"
]
def normalize(pose_scores, keypoint_scores, keypoint_coords, thresh=0.1):
keypoint_scores = keypoint_scores.reshape((17, -1))
keypoint_coords = keypoint_coords.reshape((17, 2))
# Step 1: filter out bad scores
mask = (keypoint_scores.ravel() > thresh)
if not np.any(mask):
return {}
# Step 2: Crop
min_x = np.min(keypoint_coords[mask, 0])
min_y = np.min(keypoint_coords[mask, 1])
keypoint_coords[:, 0] -= min_x
keypoint_coords[:, 1] -= min_y
# Step 3: Normalize
normalized_coords = preprocessing.normalize(keypoint_coords, norm='l2')
# Step 4: Convert to dict
output = {}
for i in range(17):
if mask[i]:
output[posenet.PART_NAMES[i]] = (normalized_coords[i, 0], normalized_coords[i, 1])
return output
def to_timeseries(dictionaries):
# combines list of dictionaries
ts = defaultdict(list)
# Iterate every part name and combine part values
for mini_dict in dictionaries:
for k, v in mini_dict.items():
ts[k].append(v)
return ts
def crop_dict(pose_dict):
if pose_dict is None:
return None
min_x = 10**6
min_y = 10**6
for v in pose_dict.values():
if v[0] <= min_x:
min_x = v[0]
if v[1] <= min_y:
min_y = v[1]
for k in pose_dict.keys():
pose_dict[k][0] -= min_x
pose_dict[k][1] -= min_y
return pose_dict
def DTW(dict1, dict2, normalize_user=False):
# computes the DTW between two dictionaries & values
# outputs distances to dictionary distances
distances = {}
for key in dict1:
if key in posenet.PART_NAMES:
if key == 'leftEye' or key == 'rightEye':
continue
if dict1[key] and dict2[key]:
x = np.array(dict1[key]) + EPS
y =
|
np.array(dict2[key])
|
numpy.array
|
# Sound Source locate
#
# @Time : 2019-10-23 12:43
# @Author : xyzhao
# @File : game_multi.py
# @Description: online learning in multiple rooms
import numpy as np
import collections
import math
import pickle
from walker import Walker
"""
env computing, reward computing
game play settings
"""
class Game:
def __init__(self):
self.n_features = 366
self.n_actions = 8
self.max_epoch = 30
self.max_steps = 100
# define sound source information
# fixme
self.src_pos_x = -3.0
self.src_pos_y = 1.6
self.src_pos_z = -4.0
# sample as a grid map with 0.5m unit
# fixme, change step length to 1m
self.unit = 1.0
self.room_grids_x = [i for i in np.arange(-3.0, 3.0 + self.unit, self.unit)]
self.room_grids_z = [i for i in np.arange(-4.0, 4.0 + self.unit, self.unit)]
# fixme, define wall and obstacles
self.wall_axis_z = {-4: [i for i in np.arange(-5.0, 6.0, 1.0)],
4: [i for i in np.arange(-5.0, 6.0, 1.0)],
0: [i for i in np.arange(-5.0, 6.0, 1.0) if i != 0]}
self.wall_axis_x = {5: [i for i in np.arange(-4.0, 5.0, 1.0)],
1: [i for i in np.arange(-4.0, 5.0, 1.0) if i != -2 and i != 2],
-1: [i for i in np.arange(-4.0, 5.0, 1.0) if i != -2 and i != 2],
-5: [i for i in np.arange(-4.0, 5.0, 1.0)]}
# fixme, define checkpoints: room gates, hall center
self.room_gates = [[-2.0, 1, -1.0], [2.0, 1, -1.0], [-2.0, 1, 1.0], [2.0, 1, 1.0]]
self.hall_center = [[0, 0, 0]]
# fixme, define room zone
self.room1_x = [i for i in np.arange(-3.5, 0, 0.5)]
self.room1_z = [i for i in np.arange(-4.5, -1, 0.5)]
self.room2_x = [i for i in np.arange(0.5, 4.0, 0.5)]
self.room2_z = [i for i in np.arange(-4.5, -1, 0.5)]
self.room3_x = [i for i in np.arange(-3.5, 0, 0.5)]
self.room3_z = [i for i in np.arange(1.5, 5.0, 0.5)]
self.room4_x = [i for i in np.arange(0.5, 4.0, 0.5)]
self.room4_z = [i for i in np.arange(1.5, 5.0, 0.5)]
self.hall_x = [i for i in np.arange(-3.5, 4.0, 0.5)]
self.hall_z = [i for i in np.arange(-0.5, 1.0, 0.5)]
self.walker = Walker(self.n_features, self.n_actions)
def detect_invalids(self, x, y, z, room):
invalids = []
directions = [[x, y, z - self.unit], [x + self.unit, y, z - self.unit],
[x + self.unit, y, z], [x + self.unit, y, z + self.unit],
[x, y, z + self.unit], [x - self.unit, y, z + self.unit],
[x - self.unit, y, z], [x - self.unit, y, z - self.unit]]
for direction in directions:
# along x axis, fix z, change x
if self.wall_axis_x.get(direction[2]) is not None:
if direction[0] in self.wall_axis_x[direction[2]]:
invalids.append(self.walker.action_labels.index(str(directions.index(direction) * 45)))
# along z axis, fix x, change z
if self.wall_axis_z.get(direction[0]) is not None:
if direction[2] in self.wall_axis_z[direction[0]]:
invalids.append(self.walker.action_labels.index(str(directions.index(direction) * 45)))
if room[4] is False:
for direction in directions:
if (direction[0] in self.room4_x and direction[2] in self.room4_z) or (
direction[0] == self.room_gates[3][0] and direction[2] == self.room_gates[3][2]):
invalids.append(self.walker.action_labels.index(str(directions.index(direction) * 45)))
if room[3] is False:
for direction in directions:
if (direction[0] in self.room3_x and direction[2] in self.room3_z) or (
direction[0] == self.room_gates[2][0] and direction[2] == self.room_gates[2][2]):
invalids.append(self.walker.action_labels.index(str(directions.index(direction) * 45)))
if room[2] is False:
for direction in directions:
if (direction[0] in self.room2_x and direction[2] in self.room2_z) or (
direction[0] == self.room_gates[1][0] and direction[2] == self.room_gates[1][2]):
invalids.append(self.walker.action_labels.index(str(directions.index(direction) * 45)))
if room[1] is False:
for direction in directions:
if (direction[0] in self.room1_x and direction[2] in self.room1_z) or (
direction[0] == self.room_gates[0][0] and direction[2] == self.room_gates[0][2]):
invalids.append(self.walker.action_labels.index(str(directions.index(direction) * 45)))
if room[0] is False:
for direction in directions:
if direction[0] in self.hall_x and direction[2] in self.hall_z:
invalids.append(self.walker.action_labels.index(str(directions.index(direction) * 45)))
# todo, add some obstacles
return invalids
# fixme, return 1, 2, 3, 4 room, 0-hall
def detect_which_room(self):
if self.walker.pos_x in self.room1_x and self.walker.pos_z in self.room1_z:
return 1
elif self.walker.pos_x in self.room2_x and self.walker.pos_z in self.room2_z:
return 2
elif self.walker.pos_x in self.room3_x and self.walker.pos_z in self.room3_z:
return 3
elif self.walker.pos_x in self.room4_x and self.walker.pos_z in self.room4_z:
return 4
elif self.walker.pos_x in self.hall_x and self.walker.pos_z in self.hall_z:
return 0
else:
return -1
"""
based on guide path to learn actions:
- learn: from inner room guide to gate; avoid obstacles
- not learn: gate into inner room
- reward: diff in angle
"""
def learn_guide_actions(self, path, visit):
a_his = None
for pos in path:
if path.index(pos) == len(path) - 2:
break
s = self.walker.observe_gcc_vector(pos[0], self.walker.pos_y, pos[1])
s = np.array(s)[np.newaxis, :]
pos_key = str(pos[0]) + "*" + str(pos[1])
visit[pos_key] += 1
pos_next = path[path.index(pos) + 1]
s_ = self.walker.observe_gcc_vector(pos_next[1], self.walker.pos_y, pos_next[1])
s_ = np.array(s_)[np.newaxis, :]
# get action
if pos_next[0] - pos[0] == 0 and pos_next[1] - pos[1] == -self.unit:
a = 0
elif pos_next[0] - pos[0] == self.unit and pos_next[1] - pos[1] == -self.unit:
a = 1
elif pos_next[0] - pos[0] == self.unit and pos_next[1] - pos[1] == 0:
a = 2
elif pos_next[0] - pos[0] == self.unit and pos_next[1] - pos[1] == self.unit:
a = 3
elif pos_next[0] - pos[0] == 0 and pos_next[1] - pos[1] == self.unit:
a = 4
elif pos_next[0] - pos[0] == -self.unit and pos_next[1] - pos[1] == self.unit:
a = 5
elif pos_next[0] - pos[0] == -self.unit and pos_next[1] - pos[1] == 0:
a = 6
elif pos_next[0] - pos[0] == -self.unit and pos_next[1] - pos[1] == -self.unit:
a = 7
else:
print("Wrong action get from GUIDE path... ")
a = None
if a_his is None:
a_his = a
# get diff reward
max_angle = max(float(self.walker.action_labels[a]), float(self.walker.action_labels[a_his]))
min_angle = min(float(self.walker.action_labels[a]), float(self.walker.action_labels[a_his]))
diff = min(abs(max_angle - min_angle), 360 - max_angle + min_angle)
r = 1 - diff / 180
pos_key = str(pos_next[0]) + "*" + str(pos_next[1])
r -= (visit[pos_key]) * 0.2
self.walker.learn(s, a, s_, r)
a_his = a
def play(self):
records_step = []
records_r = []
"""
Begin epoch
"""
for epoch in range(self.max_epoch):
print("========== Epoch %d ======" % epoch)
memory = collections.defaultdict(dict)
visit = {}
for i in self.room_grids_x:
for j in self.room_grids_z:
visit[str(i) + "*" + str(j)] = 0
for k in self.walker.action_labels:
memory[str(i) + "*" + str(j)][k] = 0
# init walker position
# fixme, random choose
self.walker.reset_walker_pos(2.0, 1, 3.0)
DONE = False
sum_reward = 0.0
a_his = None
# fixme, lock room zone and room gates
ROOM = [None] * 5
"""
Begin steps
"""
for step in range(self.max_steps):
print("************** step %d" % step)
GUIDE = False
print("x: " + str(self.walker.pos_x))
print("z: " + str(self.walker.pos_z))
s = self.walker.observe_gcc_vector(self.walker.pos_x, self.walker.pos_y, self.walker.pos_z)
s = np.array(s)[np.newaxis, :]
# fixme, judge: if walker in room, out or in.
room_type = self.detect_which_room()
# fixme, if already determine this room, not go out
if ROOM[room_type] is not True:
# walker in room
if room_type in [1, 2, 3, 4]:
print("detect walker in room%d " % room_type)
# source is not in the room, GUIDE
# todo, give more obs about binary
if self.walker.sound_in_room(s) is False:
print("source is not in room%d" % room_type)
path = self.walker.find_shortest_path(self.walker.pos_x, self.walker.pos_z,
self.room_gates[int("%d" % room_type) - 1][0],
self.room_gates[int("%d" % room_type) - 1][2])
self.walker.reset_walker_pos(self.room_gates[int("%d" % room_type) - 1][0],
self.walker.pos_y,
self.room_gates[int("%d" % room_type) - 1][2])
print("guide to room gate %d " % room_type)
if room_type == 1 or room_type == 2:
self.walker.reset_walker_pos(self.walker.pos_x, self.walker.pos_y,
self.walker.pos_z + self.unit)
else:
self.walker.reset_walker_pos(self.walker.pos_x, self.walker.pos_y,
self.walker.pos_z - self.unit)
print("step further to the hall ")
# fixme, based on path generate experiences to learn
self.learn_guide_actions(path, visit)
ROOM[room_type] = False
GUIDE = True
# source in the room
else:
print("find source in room %d" % room_type)
ROOM[room_type] = True
HALL = False
# walker in the gate, GUIDE into room
elif room_type == -1:
f = 0
if self.walker.pos_x == self.room_gates[0][0] and self.walker.pos_z == self.room_gates[0][2]:
self.walker.reset_walker_pos(self.walker.pos_x, self.walker.pos_y,
self.walker.pos_z - self.unit)
f = 1
elif self.walker.pos_x == self.room_gates[1][0] and self.walker.pos_z == self.room_gates[1][2]:
self.walker.reset_walker_pos(self.walker.pos_x, self.walker.pos_y,
self.walker.pos_z - self.unit)
f = 2
elif self.walker.pos_x == self.room_gates[2][0] and self.walker.pos_z == self.room_gates[2][2]:
self.walker.reset_walker_pos(self.walker.pos_x, self.walker.pos_y,
self.walker.pos_z + self.unit)
f = 3
elif self.walker.pos_x == self.room_gates[3][0] and self.walker.pos_z == self.room_gates[3][2]:
self.walker.reset_walker_pos(self.walker.pos_x, self.walker.pos_y,
self.walker.pos_z + self.unit)
f = 4
print("detect walker in gate%d" % f)
print("step further into room%d" % f)
GUIDE = True
elif room_type == 0:
print("detect walker in the hall")
# todo, give more obs when walker in the hall
# else: walker in the hall
# if step == 0 \
# or [self.walker.pos_x, self.walker.pos_y, self.walker.pos_z] in self.room_gates \
# or [self.walker.pos_x, self.walker.pos_y, self.walker.pos_z] in self.hall_center:
# fe = open('first_obs.pkl', 'rb')
# obs = pickle.load(fe)
#
# # ==================== Right obs
# s_r = obs['right']
# s_r = np.array(s_r)[np.newaxis, :]
# a_r, p_r = self.walker.choose_action(s_r, [])
# p_rr = [p_r[len(p_r) - 2], p_r[len(p_r) - 1]]
# p_rr = np.append(p_rr, p_r[:len(p_r) - 2])
#
# # ==================== Left obs
# s_l = obs['left']
# s_l = np.array(s_l)[np.newaxis, :]
# a_l, p_l = self.walker.choose_action(s_l, [])
# p_ll = [p_l[0], p_l[1]]
# p_ll = np.append(p_l[2:], p_ll)
#
# # ==================== Down obs
# s_d = obs['down']
# s_d = np.array(s_d)[np.newaxis, :]
# a_d, p_d = self.walker.choose_action(s_d, [])
# p_dd = [p_d[len(p_d) - 4], p_d[len(p_d) - 3], p_d[len(p_d) - 2], p_d[len(p_d) - 1]]
# p_dd = np.append(p_dd, p_d[:len(p_d) - 4])
#
# # ==================== Decide action
# p_mix = [0] * self.n_actions
# for i in range(self.n_actions):
# if i not in invalids:
# p_mix[i] = p[i] + p_rr[i] + p_ll[i] + p_dd[i]
#
# p_mix = np.array(p_mix)
# p_mix /= p_mix.sum()
# a_mix = np.argmax(p_mix)
#
# fe.close()
#
# a = a_mix
# a_his = a
# p = p_mix
# direction = self.walker.action_labels[a]
# if walker is guided to a new pos
if GUIDE is True:
# fixme, init a_his if guide to a new pos
a_his = None
continue
# detect walls and obstacles
invalids = self.detect_invalids(self.walker.pos_x, self.walker.pos_y, self.walker.pos_z, ROOM)
# fixme, cut down action space, but for the hall part allow more
pos_key = str(self.walker.pos_x) + "*" + str(self.walker.pos_z)
for i in memory[pos_key].keys():
if self.detect_which_room() == 0:
threshold = 5
else:
threshold = 2
if memory[pos_key][i] >= threshold:
invalids.append(self.walker.action_labels.index(i))
visit[pos_key] += 1
a, p = self.walker.choose_action(s, invalids)
if a_his is None:
a_his = a
direction = self.walker.action_labels[a]
# print(p)
print(direction)
memory[pos_key][direction] += 1
# step next
if direction == '0':
self.walker.reset_walker_pos(self.walker.pos_x, self.walker.pos_y,
self.walker.pos_z - self.unit)
elif direction == '45':
self.walker.reset_walker_pos(self.walker.pos_x + self.unit, self.walker.pos_y,
self.walker.pos_z - self.unit)
elif direction == '90':
self.walker.reset_walker_pos(self.walker.pos_x + self.unit, self.walker.pos_y,
self.walker.pos_z)
elif direction == '135':
self.walker.reset_walker_pos(self.walker.pos_x + self.unit, self.walker.pos_y,
self.walker.pos_z + self.unit)
elif direction == '180':
self.walker.reset_walker_pos(self.walker.pos_x, self.walker.pos_y,
self.walker.pos_z + self.unit)
elif direction == '225':
self.walker.reset_walker_pos(self.walker.pos_x - self.unit, self.walker.pos_y,
self.walker.pos_z + self.unit)
elif direction == '270':
self.walker.reset_walker_pos(self.walker.pos_x - self.unit, self.walker.pos_y,
self.walker.pos_z)
elif direction == '315':
self.walker.reset_walker_pos(self.walker.pos_x - self.unit, self.walker.pos_y,
self.walker.pos_z - self.unit)
# fixme, don't have s_ when get source
if self.walker.pos_x == self.src_pos_x and self.walker.pos_z == self.src_pos_z:
print("get source")
DONE = True
r = 5
s_ = np.array([0 for u in range(self.n_features)])[np.newaxis, :]
else:
# fixme, rebuild reward function
# r = self.walker.observe_volume(self.walker.pos_x, self.walker.pos_y, self.walker.pos_z)
# r = 1 - abs((a + a_his) % self.n_actions - a_his) / (self.n_actions - 1)
pos_key = str(self.walker.pos_x) + "*" + str(self.walker.pos_z)
max_angle = max(float(self.walker.action_labels[a]), float(self.walker.action_labels[a_his]))
min_angle = min(float(self.walker.action_labels[a]), float(self.walker.action_labels[a_his]))
diff = min(abs(max_angle - min_angle), 360 - max_angle + min_angle)
r = 1 - diff / 180
r -= (visit[pos_key]) * 0.2
# # note action has been performed
# # fixme, give additional reward when in hall
if self.detect_which_room() == 0:
for i in range(1, 5):
path_temp = self.walker.find_shortest_path(self.walker.pos_x, self.walker.pos_z,
self.room_gates[i - 1][0],
self.room_gates[i - 1][2])
locals()['dis%d' % i] = len(path_temp) - 1
sum_dis = 0.0
# todo, need calculate for all grids in hall to get max num
max_dis = 12
for i in range(1, 5):
if ROOM[i] is None:
sum_dis += locals()['dis%d' % i]
# todo, reward should be diff for large distance
if sum_dis >= 10:
addition = 10
else:
addition = 0
r = 1 - (sum_dis + addition) / max_dis
# todo, give punishment when step into false Room
# will only first step to gate, then inner room guide until to hall
if self.walker.pos_x == 2 and self.walker.pos_z == -2:
r -= 1
if self.walker.pos_x == 2 and self.walker.pos_z == -1:
r -= 1
print("x: " + str(self.walker.pos_x))
print("z: " + str(self.walker.pos_z))
print("reward: " + str(r))
# give punishment if detect obstacles
# pub = self.detect_invalids(self.walker.pos_x, self.walker.pos_y, self.walker.pos_z)
# if len(pub) > 0:
# r -= 0.5
s_ = self.walker.observe_gcc_vector(self.walker.pos_x, self.walker.pos_y, self.walker.pos_z)
s_ =
|
np.array(s_)
|
numpy.array
|
import numpy as np
from pyquaternion import Quaternion
import casadi as cs
from sklearn.metrics import mean_squared_error
def quaternion_to_euler(q):
q = Quaternion(w=q[0], x=q[1], y=q[2], z=q[3])
yaw, pitch, roll = q.yaw_pitch_roll
return [roll, pitch, yaw]
def euler_to_quaternion(roll, pitch, yaw):
qx = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)
qy = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)
qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)
qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)
return np.array([qw, qx, qy, qz])
def add_measurement_noise(xcurrent):
# Apply noise to inputs (uniformly distributed noise with standard deviation proportional to input magnitude)
qw = xcurrent[3]
qx = xcurrent[4]
qy = xcurrent[5]
qz = xcurrent[6]
quat = np.array([qw,qx,qy,qz])
euler_angles = quaternion_to_euler(quat)
x = xcurrent[0]
y = xcurrent[1]
z = xcurrent[2]
roll = euler_angles[0]
pitch = euler_angles[1]
yaw = euler_angles[2]
vx = xcurrent[7]
vy = xcurrent[8]
vz = xcurrent[9]
# mean of the noise
mean = 0
# scale of noise of each state
std_x = 0.01
std_y = 0.01
std_z = 0.01
std_roll = (np.pi / 180) / 2
std_pitch = (np.pi / 180) / 2
std_yaw = (np.pi / 180) / 2
std_vx = 0.001
std_vy = 0.001
std_vz = 0.001
# create the noisy states
x_noisy = x + np.random.normal(mean, std_x)
y_noisy = y + np.random.normal(mean, std_y)
z_noisy = z + np.random.normal(mean, std_z)
roll_noisy = roll + np.random.normal(mean, std_roll)
pitch_noisy = pitch + np.random.normal(mean, std_pitch)
yaw_noisy = yaw + np.random.normal(mean, std_yaw)
vx_noisy = vx + np.random.normal(mean, std_vx)
vy_noisy = vy + np.random.normal(mean, std_vy)
vz_noisy = vz + np.random.normal(mean, std_vz)
q_noisy = euler_to_quaternion(roll_noisy, pitch_noisy, yaw_noisy)
q_noisy = unit_quat(q_noisy)
qw_noisy = q_noisy[0]
qx_noisy = q_noisy[1]
qy_noisy = q_noisy[2]
qz_noisy = q_noisy[3]
# create new noisy measurement vector
xcurrent_noisy = np.array([x_noisy, y_noisy, z_noisy, qw_noisy, qx_noisy, qy_noisy, qz_noisy, vx_noisy, vy_noisy, vz_noisy])
return xcurrent_noisy
def ensure_unit_quat(xcurrent):
# ensure that the quaternion in the current state is a unit vector
x = xcurrent[0]
y = xcurrent[1]
z = xcurrent[2]
qw = xcurrent[3]
qx = xcurrent[4]
qy = xcurrent[5]
qz = xcurrent[6]
vx = xcurrent[7]
vy = xcurrent[8]
vz = xcurrent[9]
q = np.array([qw, qx, qy, qz])
q = unit_quat(q)
# extracting the elements from q
qw = q[0]
qx = q[1]
qy = q[2]
qz = q[3]
xcurrent = np.array([x, y, z, qw, qx, qy, qz, vx, vy, vz])
return xcurrent
def unit_quat(q):
"""
Normalizes a quaternion to be unit modulus.
:param q: 4-dimensional numpy array or CasADi object
:return: the unit quaternion in the same data format as the original one
"""
if isinstance(q, np.ndarray):
# if (q == np.zeros(4)).all():
# q = np.array([1, 0, 0, 0])
q_norm = np.sqrt(np.sum(q ** 2))
else:
q_norm = cs.sqrt(cs.sumsqr(q))
return 1 / q_norm * q
def R2D(rad):
return rad*180 / np.pi
def add_input_noise(u0,model):
# Apply noise to inputs (uniformly distributed noise with standard deviation proportional to input magnitude)
T = np.array([u0[0]])
w = u0[1:]
mean = 0
std_T = 0.01
std_w = np.std(w)
# std_q = np.std(q)
T_noisy = T +
|
np.random.normal(mean, std_T)
|
numpy.random.normal
|
import pdb, sys, os, glob, pickle, time, re
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage
from bayes.pyhm_dev import pyhm
import numexpr
import pysynphot
def loadStellarModel( Teff, MH, logg, stellarModel='k93models' ):
sp = pysynphot.Icat( 'k93models', Teff, MH, logg )
wavA = sp.wave # A
flam = sp.flux # erg s^-1 cm^-2 A^-1
#flam = flam*wavA
sp.convert( 'photlam' )
photlam = sp.flux
c = pysynphot.units.C
h = pysynphot.units.H
ePhot = h*c/wavA
myPhotlam = flam/ePhot
wavMicr = wavA*(1e-10)*(1e6)
ixs = ( wavMicr>0.2 )*( wavMicr<6 )
wavMicr = wavMicr[ixs]
flam = flam[ixs]
photlam = photlam[ixs]
#photlam = myPhotlam[ixs]
return wavMicr, flam, photlam
def checkStellarModel( e1d, bp, Teff, MH, logg, trim_disp_ixs, \
wavsol_dispbound_ixs, stellarModel='k93models' ):
"""
Routine for quickly trialing Teff, MH, logg values to
cross-correlate with model stellar spectrum. Adapted
from GetWavSol() in ClassDefs.py.
Note:
bp = ClassDefs.Bandpass()
bp.config = 'G141'
bp.fpath = '/path/to/bandpass/file'
bp.Read()
"""
#d1, d2 = trim_box[1] # dispersion limits
d1, d2 = trim_disp_ixs # dispersion limits
dwav_max = 0.3 # in micron
nshifts = int( np.round( 2*dwav_max*(1e4)+1 ) ) # 0.0001 micron = 0.1 nm
ndisp = len( e1d )
A2micron = 1e-4
ndisp = e1d.size
wbp = bp.bandpass_wavmicr
ybp = bp.bandpass_thput
dwbp = np.median( np.diff( wbp ) )
wstar, flam, photlam = loadStellarModel( Teff, MH, logg, stellarModel=stellarModel )
ystar = flam # still unsure why this works better than photlam...
#ystar = photlam
# Interpolate the stellar model onto the transmission wavelength grid:
ixs = ( wstar>wbp[0]-0.1 )*( wstar<wbp[-1]+0.1 )
ystar_interp = np.interp( wbp, wstar[ixs], ystar[ixs] )
# Modulate the interpolated stellar model by the throughput to
# simulate a measured spectrum:
ystar = ystar_interp*ybp
ystar /= ystar.max()
wstar = wbp
dwstar = np.median( np.diff( wstar ) )
ix = np.argmax( ystar )
w0 = wstar[ix]
x = np.arange( ndisp )
ix = np.argmax( e1d )
delx = x-x[ix]
wavsol0 = w0 + bp.dispersion_micrppix*delx
#x0 = np.arange( wavsol0.size )
# Smooth the stellar flux and model spectrum, because we use
# the sharp edges of the throughput curve to calibrate the
# wavelength solution:
fwhm_e1d = 4. # stdv of smoothing kernel in dispersion pixels
sig_e1d = fwhm_e1d/2./np.sqrt( 2.*np.log( 2 ) )
e1d_smth = scipy.ndimage.filters.gaussian_filter1d( e1d, sig_e1d )
sig_star = (sig_e1d*bp.dispersion_micrppix)/dwstar
ystar_smth = scipy.ndimage.filters.gaussian_filter1d( ystar, sig_star )
e1d_smth /= e1d_smth.max()
ystar_smth /= ystar_smth.max()
ix0, ix1 = wavsol_dispbound_ixs
cc = CrossCorrSol( wavsol0, e1d_smth, wstar, ystar_smth, \
ix0, ix1, dx_max=dwav_max, nshifts=nshifts )
wshift = cc[0]
vstretch = cc[1]
wavmicr0 = wavsol0-wshift
nl = np.arange( d1 )[::-1]
nr = np.arange( ndisp-d2-1 )
extl = wavmicr0[0]-(nl+1)*bp.dispersion_micrppix
extr = wavmicr0[-1]+(nr+1)*bp.dispersion_micrppix
wavmicr = np.concatenate( [ extl, wavmicr0, extr ] )
# Plot for checking the spectrum and wavelength solution:
plt.figure( figsize=[12,8] )
#specname = os.path.basename( self.btsettl_fpath )
titlestr = 'Teff={0:.0f}K, [M/H]={1:.2f}, logg={2:.2f}'.format( Teff, MH, logg )
plt.title( titlestr, fontsize=20 )
plt.plot( wbp, ybp/ybp.max(), '-g', \
label='{0} bandpass'.format( bp.config ) )
plt.plot( wavmicr0, e1d/e1d.max(), '-m', lw=2, \
label='cross-correlation' )
plt.plot( wstar, ystar_interp/ystar_interp.max(), '-r', \
label='stellar flux' )
plt.plot( wstar, ystar, '--c', lw=2, label='model spectrum' )
ixs = ( ybp>(1e-3)*ybp.max() )
plt.xlim( [ wbp[ixs].min(), wbp[ixs].max() ] )
plt.ylim( [ -0.1, 1.4 ] )
plt.legend( loc='upper left', ncol=2, fontsize=16 )
plt.xlabel( 'Wavelength (micron)', fontsize=18 )
plt.ylabel( 'Relative Flux/Throughput', fontsize=18 )
return None
def residsRMSVsBinSize( thrs, resids ):
"""
Given an array of residuals, computes the rms as a function
of bin size. For checking how the noise bins down compared
to white noise expectations.
"""
ndat = len( resids )
nbinsMin = 6 # minimum number of binned points for which to compute RMS
nptsMax = int( np.floor( ndat/float( nbinsMin ) ) )
nptsPerBin = 1+np.arange( nptsMax )
nbinSizes = len( nptsPerBin )
oixs = SplitHSTOrbixs( thrs )
norb = len( oixs )
x0 = np.arange( ndat )
rms = np.zeros( nbinSizes )
dthrs = np.median( np.diff( thrs ) )
tmin = np.zeros( nbinSizes )
#import matplotlib.pyplot as plt
#plt.close('all')
#plt.ion()
#plt.figure()
#plt.plot( thrs, resids, 'ok' )
for i in range( nbinSizes ):
npts_i = nptsPerBin[i]
tmin[i] = npts_i*dthrs*60
finished = False
residsb = []
tbin = []
for j in range( norb ):
ixs = oixs[j]
xj = x0[ixs]
nj = len( xj )
ixl = np.arange( 0, nj, npts_i )
ixu = ixl+npts_i#np.arange( npts_i, nj+1 )
nbj = len( ixl )
for k in range( nbj ):
residsb_jk = resids[ixs][ixl[k]:ixu[k]]
if len( residsb_jk )!=npts_i:
residsb_jk = resids[ixs][nj-npts_i:nj]
#print( len( residsb_jk ), npts_i )
#pdb.set_trace()
#print( 'aaaaa', j, len( residsb_jk ), npts_i )
residsb += [ np.mean( residsb_jk ) ]
#print( '\naaaa', j, k, ixl[k], ixu[k] )
#print( 'bbbbb', residsb_jk, resids[ixs][ixl[k]] )
#if npts_i>11:
# plt.plot( thrs[ixs][ixl[k]:ixu[k]], residsb_jk, '-x' )
# print( thrs[ixs][ixl[k]:ixu[k]], residsb_jk, residsb[-1] )
# pdb.set_trace()
residsb = np.array( residsb )
#pdb.set_trace()
#while finished==False:
#nbins_i = int( np.floor( ndat/float( npts_i ) ) )
#residsb = np.zeros( nbins_i )
#for j in range( nbins_i ):
# ix1 = j*npts_i
# ix2 = (j+1)*npts_i+1
# residsb[j] = np.mean( resids[ix1:ix2] )
rms[i] = np.sqrt( np.mean( residsb**2. ) )
#print( tmin )
#pdb.set_trace()
return nptsPerBin, tmin, rms
def residsRMSVsBinSizeBasic( resids ):
"""
Given an array of residuals, computes the rms as a function
of bin size. For checking how the noise bins down compared
to white noise expectations.
"""
ndat = len( resids )
nbinsMin = 6 # minimum number of binned points for which to compute RMS
nptsMax = int( np.floor( ndat/float( nbinsMin ) ) )
nptsPerBin = 1+np.arange( nptsMax )
nbinSizes = len( nptsPerBin )
rms = np.zeros( nbinSizes )
for i in range( nbinSizes ):
npts_i = nptsPerBin[i]
nbins_i = int( np.floor( ndat/float( npts_i ) ) )
residsb = np.zeros( nbins_i )
for j in range( nbins_i ):
ix1 = j*npts_i
ix2 = (j+1)*npts_i+1
residsb[j] = np.mean( resids[ix1:ix2] )
rms[i] = np.sqrt( np.mean( residsb**2. ) )
#pdb.set_trace()
return nptsPerBin, rms
def rvFunc( t, a1, a2 ):
"""
Within the context of the detector charge-trapping model,
a reasonable physical constraint is a1<0 and a2>0. These
restrictions are not applied within this routine, but can
be easily be built into whatever optimization metric is used
for determining the unknown parameters of the ramp model.
"""
return 1 + a1*np.exp( -a2*t )
def r0Func( torb, rvt, a3, a4, a5 ):
"""
Within the context of the detector charge-trapping model,
a reasonable physical constraint is a3<0, a4>0, and
a5<torb.min(). These restrictions are not applied within
this routine, but can be easily be built into whatever
optimization metric is used for determining the unknown
parameters of the ramp model.
"""
return 1 + a3*np.exp( -( torb-a5 )/(a4*rvt) )
def DERampNoBase( t, torb, pars ):
"""
Implementation of the double-exponential ramp model for WFC3 systematics.
Taken from Eq 1-3 of de Wit et al (2018).
"""
a1 = pars[0]
a2 = pars[1]
a3 = pars[2]
a4 = pars[3]
a5 = pars[4]
rvt = rvFunc( t, a1, a2 )
r0t = r0Func( torb, rvt, a3, a4, a5 )
bline = np.ones( t.size )
return bline, rvt*r0t
def DERampLinBase( bvar, t, torb, pars ):
"""
Implementation of the double-exponential ramp model for WFC3 systematics.
Taken from Eq 1-3 of de Wit et al (2018).
"""
a1 = pars[0]
a2 = pars[1]
a3 = pars[2]
a4 = pars[3]
a5 = pars[4]
b0 = pars[5]
b1 = pars[6]
rvt = rvFunc( t, a1, a2 )
r0t = r0Func( torb, rvt, a3, a4, a5 )
bline = b0 + b1*bvar # linear-time baseline trend
return bline, rvt*r0t
def DERampQuadBase( bvar, t, torb, pars ):
"""
Implementation of the double-exponential ramp model for WFC3 systematics.
Taken from Eq 1-3 of de Wit et al (2018).
"""
a1 = pars[0]
a2 = pars[1]
a3 = pars[2]
a4 = pars[3]
a5 = pars[4]
b0 = pars[5]
b1 = pars[6]
b2 = pars[7]
rvt = rvFunc( t, a1, a2 )
r0t = r0Func( torb, rvt, a3, a4, a5 )
bline = b0 + b1*bvar + b2*(bvar**2.) # quadratic-time baseline trend
return bline, rvt*r0t
def DERampExpBase( bvar, t, torb, pars ):
"""
Implementation of the double-exponential ramp model for WFC3 systematics.
Taken from Eq 1-3 of de Wit et al (2018).
"""
a1 = pars[0]
a2 = pars[1]
a3 = pars[2]
a4 = pars[3]
a5 = pars[4]
b0 = pars[5]
b1 = pars[6]
b2 = pars[7]
rvt = rvFunc( t, a1, a2 )
r0t = r0Func( torb, rvt, a3, a4, a5 )
bline = b0 + b1*np.exp( -b2*bvar ) # exponential-time baseline trend
return bline, rvt*r0t
def Zap2D( ecounts2d, nsig_transient=8, nsig_static=10, niter=1 ):
"""
Routine for identifying static and transient bad pixels in a 2d spectroscopic data cube.
Inputs:
ecounts2d - NxMxK data cube where N is cross-dispersion, M is dispersion, K is frame number.
nsig_cull_transient - threshold for flagging transient bad pixels.
nsig_cull_static - threshold for flagging static bad pixels.
niter - number of iterations to be used
Outputs:
e2d_zapped - NxMxK cube containing the data with bad pixels corrected.
transient_bad_pixs - NxMxK cube containing 1's for transient bad pixels and 0's otherwise
static_bad_pixs - NxMxK cube containing 1's for static bad pixels and 0's otherwise
e2d_medfilt - NxMxK cube containing nominal PSF for each frame made using median filter
"""
print( '\nCleaning cosmic rays:' )
# Initialise arrays to hold all the outputs:
ndisp, ncross, nframes = np.shape( ecounts2d )
e2d_zapped = np.zeros( [ ndisp, ncross, nframes ] )
e2d_medfilt = np.zeros( [ ndisp, ncross, nframes ] )
transient_bpixs = np.zeros( [ ndisp, ncross, nframes ] )
static_bpixs = np.zeros( [ ndisp, ncross, nframes ] )
############ DIFFERENCE?
# First apply a Gaussian filter to the pixel values
# along the time axis of the data cube:
e2d_smth = scipy.ndimage.filters.gaussian_filter1d( ecounts2d, sigma=5, axis=2 )
e2d_smthsub = ecounts2d - e2d_smth # pixel deviations from smoothed time series
############ DOES THE ABOVE ACTUALLY HELP?
med2d = np.median( e2d_smthsub, axis=2 ) # median deviation for each pixel
stdv2d = np.std( e2d_smthsub, axis=2 ) # standard deviation in the deviations for each pixel
# Loop over the data frames:
for i in range( nframes ):
e2d_zapped[:,:,i] = ecounts2d[:,:,i].copy()
# Identify and replace transient bad pixels, possibly iterating more than once:
for k in range( niter ):
# Find the deviations of each pixel in the current frame in terms of
# number-of-sigma relative to the corresponding smoothed time series for
# each pixel:
e2d_smthsub = e2d_zapped[:,:,i] - e2d_smth[:,:,i]
dsig_transient = np.abs( ( e2d_smthsub-med2d )/stdv2d )
# Flag the outliers:
ixs_transient = ( dsig_transient>nsig_transient )
# Create a median-filter frame by taking the median of 5 pixels along the
# cross-dispersion axis for each pixel, to be used as a nominal PSF:
medfilt_ik = scipy.ndimage.filters.median_filter( e2d_zapped[:,:,i], size=[5,1] )
# Interpolate any flagged pixels:
e2d_zapped[:,:,i][ixs_transient] = medfilt_ik[ixs_transient]
# Record the pixels that were flagged in the transient bad pixel map:
transient_bpixs[:,:,i][ixs_transient] = 1
ntransient = transient_bpixs[:,:,i].sum() # number of transient bad pixels for current frame
######## FLAGGING STATIC BAD PIXELS LIKE THIS SEEMS TO PRODUCE PROBLEMATIC
######## RESULTS SO I'M NULLING IT OUT FOR NOW BY SETTING THE NSIG REALLY HIGH:
nsig_static = 1e9 # delete/change eventually...
# Identify and replace static bad pixels, possibly iterating more than once:
for k in range( niter ):
# Create a median-filter frame by taking the median of 5 pixels along the
# cross-dispersion axis for each pixel, to be used as a nominal PSF:
medfilt_ik = scipy.ndimage.filters.median_filter( e2d_zapped[:,:,i], size=[5,1] )
# Find the deviations of each pixel in the current frame in terms of
# number-of-sigma relative to the nominal PSF:
dcounts_static = e2d_zapped[:,:,i] - medfilt_ik
stdv_static = np.std( dcounts_static )
dsig_static = np.abs( dcounts_static/stdv_static )
# Flag the outliers:
ixs_static = ( dsig_static>nsig_static )
# Interpolate any flagged pixels:
e2d_zapped[:,:,i][ixs_static] = medfilt_ik[ixs_static]
# Record the pixels that were flagged in the static bad pixel map:
static_bpixs[:,:,i][ixs_static] = 1
nstatic = static_bpixs[:,:,i].sum() # number of transient bad pixels for current frame
e2d_medfilt[:,:,i] = medfilt_ik # record the nominal PSF for the current frame
print( '... frame {0} of {1}: ntransient={2}, nstatic={3}'.format( i+1, nframes, ntransient, nstatic ) )
return e2d_zapped, transient_bpixs, static_bpixs, e2d_medfilt
def Zap1D( ecounts1d, nsig_transient=5, niter=2 ):
# todo=adapt this routine; NOTE THAT I NOW PASS IN THE TRIMMED ECOUNTS1D ARRAY
nframes, ndisp = np.shape( ecounts1d )
x = np.arange( ndisp )
bad_pixs = np.zeros( [ nframes, ndisp ] )
y = np.median( ecounts1d[:-5,:], axis=0 )
ecounts1d_zapped = ecounts1d.copy()
for k in range( niter ):
zk = ecounts1d_zapped.copy()
# Create the normalised common-mode lightcurve:
x0 = np.mean( zk, axis=1 )
x0 /= x0[-1]
# Remove the common-mode signal:
for j in range( ndisp ):
zk[:,j] /= x0
# Update the master spectrum:
#y0 = np.median( zk[-5:,:], axis=0 )
y0 = np.median( zk, axis=0 )
# Compute the relative variations for each individual
# spectrum relative to the master spectrum:
for i in range( nframes ):
zk[i,:] /= y0
zkmed = np.median( zk, axis=0 )
zksig = np.std( zk, axis=0 )
dsig = np.zeros( [ nframes, ndisp ] )
for i in range( nframes ):
dsig[i,:] = np.abs( zk[i,:]-zkmed )/zksig
cixs = dsig>nsig_transient
bad_pixs[cixs] = 1
medfilt = scipy.ndimage.filters.median_filter( ecounts1d_zapped, size=[5,1] )
ecounts1d_zapped[cixs] = medfilt[cixs]
print( 'Iter {0}: max(dsig)={1:.2f}'.format( k+1, dsig.max() ) )
print( 'Zap1D flagged {0:.0f} bad pixels.\n'.format( bad_pixs.sum() ) )
return ecounts1d_zapped, bad_pixs
def WFC3Nreads( hdu ):
nreads = int( ( len( hdu )-1 )/5 )
if nreads!=hdu[0].header['NSAMP']:
nreads = -1 # must be a corrupt/incomplete file
else:
nreads -= 1 # exclude the zeroth read
return nreads
def WFC3JthRead( hdu, nreads, j ):
ix = 1+nreads*5-j*5
read = hdu[ix].data
sampt = hdu[ix].header['SAMPTIME']
# Get as electrons:
if hdu[1].header['BUNIT'].lower()=='electrons':
ecounts = read
elif hdu[1].header['BUNIT'].lower()=='electrons/s':
ecounts = read*sampt
else:
pdb.set_trace()
return ecounts
def SplitHSTOrbixs( thrs ):
tmins = thrs*60.0
n = len( tmins )
ixs = np.arange( n )
dtmins = np.diff( tmins )
a = 1 + np.arange( n-1 )[dtmins>5*np.median( dtmins )]
a = np.concatenate( [ [0], a, [n] ] )
norb = len( a ) - 1
orbixs = []
for i in range( norb ):
orbixs += [ np.arange( a[i], a[i + 1] ) ]
return orbixs
def DERampOLD( thrs, torb, pars ):
"""
Double-exponential ramp function from de Wit et al (2018),
"""
a1 = pars[0]
a2 = pars[1]
a3 = pars[2]
a4 = pars[3]
a5 = pars[4]
a6 = pars[5]
a7 = pars[6]
rvt = rvFunc( thrs, a1, a2 )
r0t = r0Func( thrs, torb, rvt, a3, a4, a5 )
lintrend = a6+a7*thrs
return rvt*r0t*lintrend
def rvFuncOLD( thrs, a1, a2 ):
return 1+a1*np.exp( -thrs/a2 )
def r0FuncOLD( thrs, torb, rvt, a3, a4, a5 ):
return 1+a3*np.exp( -( torb-a5 )/(a4*rvt) )
def BTSettlBinDown( fpathu, fpathb ):
d = np.loadtxt( fpathu )
wav = d[:,0]*(1e-4) # convert A to micr
flux = d[:,1] # leave flux per A for consistency
# Restrict to wavelengths shorter than some threshold:
wu = 20
ixs = ( wav<wu )
wav = wav[ixs]
flux = flux[ixs]
# Define the number of bins per micron:
nbin_per_micron = 300
nbins = wu*nbin_per_micron
# Bin the high-res spectrum:
wavb, fluxb, stdvs, npb = Bin1D( wav, flux, nbins=nbins )
ixs = npb>0
wav_A = wavb[ixs]*(1e4) # convert back to A for consistency
flux = fluxb[ixs]
# Save the binned spectrum:
output = np.column_stack( [ wav_A, flux ] )
np.savetxt( fpathb, output )
print( 'Saved:\n{0}'.format( fpathb ) )
return None
def Bin1D( x, y, nbins=10, shift_left=0.0, shift_right=0.0 ):
"""
shift_left and shift_right are optional arguments that allow you to shift the
bins either to the left or right by a specified fraction of a bin width.
"""
x = x.flatten()
y = y.flatten()
if len( x )!=len( y ):
raise ValueError( 'vector dimensions do not match' )
binw = (x.max()-x.min())/float(nbins)
if nbins>1:
# Have half-bin overlap at start:
wmin = x.min()-binw/2.-binw*shift_left+binw*shift_right
elif nbins==1:
wmin = x.min()
else:
pdb.set_trace()
wmax = x.max()
wrange = wmax - wmin
if nbins>1:
xbin = list(np.r_[wmin+binw/2.:wmax-binw/2.:nbins*1j])
elif nbins==1:
xbin = [wmin+0.5*wrange]
else:
pdb.set_trace()
ybin = list(np.zeros(nbins))
ybinstdvs = np.zeros(nbins)
nperbin = np.zeros(nbins)
for j in range(nbins):
l = (abs(x - xbin[j]) <= binw/2.)
if l.any():
nperbin[j] = len(y[l])
ybin[j] = np.mean(y[l])
ybinstdvs[j] = np.std(y[l])
return np.array(xbin), np.array(ybin), np.array(ybinstdvs), np.array(nperbin)
def GetStrs( prelim_fit, beta_free ):
if prelim_fit==True:
prelimstr = 'prelim'
else:
prelimstr = 'final'
if beta_free==True:
betastr = 'beta_free'
else:
betastr = 'beta_fixed'
return prelimstr, betastr
def GetLDKey( ld ):
if ld=='ldatlas_nonlin_fixed':
ldkey = 'ld_nonlin_fixed'
elif ld=='ldatlas_nonlin_free':
ldkey = 'ld_nonlin_free'
elif ld=='ldatlas_linear_fixed':
ldkey = 'ld_linear_fixed'
elif ld=='ldatlas_linear_free':
ldkey = 'ld_linear_free'
elif ld=='ldatlas_quad_free':
ldkey = 'ld_quad_free'
elif ld=='ldatlas_quad_fixed':
ldkey = 'ld_quad_fixed'
elif ld=='ldsing_nonlin_fixed':
ldkey = 'ldsing_nonlin_fixed'
elif ld=='ldtk_free':
ldkey = 'ldtk_quad'
elif ld=='ldsing_free':
ldkey = 'ldsing_quad'
else:
pdb.set_trace()
return ldkey
def LinTrend( jd, tv, flux ):
delt = jd-jd[0]
orbixs = SplitHSTOrbixs( delt*24 )
#t1 = tv[orbixs[0]][-1]
#t2 = tv[orbixs[-1]][-1]
#f1 = flux[orbixs[0]][-1]
#f2 = flux[orbixs[-1]][-1]
#t1 = np.mean( tv[orbixs[0]] )
#t2 = np.mean( tv[orbixs[-1]] )
#f1 = np.mean( flux[orbixs[0]] )
#f2 = np.mean( flux[orbixs[-1]] )
n1 = int( np.floor( 0.5*len( orbixs[0] ) ) )
n2 = int( np.floor( 0.5*len( orbixs[-1] ) ) )
t1 = np.mean( tv[orbixs[0][n1:]] )
t2 = np.mean( tv[orbixs[-1][n2:]] )
f1 = np.median( flux[orbixs[0][n1:]] )
f2 = np.median( flux[orbixs[-1][n2:]] )
v1 = [ 1, t1 ]
v2 = [ 1, t2 ]
A = np.row_stack( [ v1, v2 ] )
c = np.reshape( [f1,f2], [2,1] )
z = np.linalg.lstsq( A, c, rcond=None )[0].flatten()
return z
def MVNormalWhiteNoiseLogP( r, u, n ):
term1 = -np.sum( numexpr.evaluate( 'log( u )' ) )
term2 = -0.5*np.sum( numexpr.evaluate( '( r/u )**2.' ) )
return term1 + term2 - 0.5*n*np.log( 2*np.pi )
def NormalLogP( x, mu, sig ):
term1 = -0.5*np.log( 2*np.pi*( sig**2. ) )
term2 = -0.5*( ( ( x-mu )/sig )**2. )
return term1+term2
def GetVarKey( y ):
if y=='hstphase':
return 'hstphase', 'phi'
if y=='loghstphase':
return 'hstphase', 'logphi'
if y=='wavshift':
return 'wavshift_pix', 'wavshift'
if y=='cdshift':
return 'cdcs', 'cdshift'
if y=='t':
return 'tv', 't'
def GetWalkerState( mcmc ):
keys = list( mcmc.model.free.keys() )
walker_state = {}
for k in keys:
walker_state[k] = mcmc.walker_chain[k][-1,:]
return walker_state
def GetGPStr( gpinputs ):
gpstr = ''
for k in gpinputs:
gpstr += '{0}_'.format( k )
gpstr = gpstr[:-1]
if gpstr=='hstphase_t_wavshift_cdshift':
gpstr = 'gp1111'
elif gpstr=='hstphase_wavshift_cdshift':
gpstr = 'gp1011'
elif gpstr=='hstphase_wavshift':
gpstr = 'gp1010'
elif gpstr=='hstphase':
gpstr = 'gp1000'
elif gpstr=='hstphase_t':
gpstr = 'gp1100'
else:
pdb.set_trace()
return gpstr
def MaxLogLikePoint( walker_chain, mbundle ):
ixs0 = np.isfinite( walker_chain['logp'] )
ix = np.argmax( walker_chain['logp'][ixs0] )
ix = np.unravel_index( ix, walker_chain['logp'][ixs0].shape )
print( '\nLocating maximum likelihood values...' )
parVals = {}
mp = pyhm.MAP( mbundle )
for key in mp.model.free.keys():
parVals[key] = walker_chain[key][ixs0][ix]
logLikeMax = walker_chain['logp'][ixs0][ix]
return parVals, logLikeMax
def RefineMLE( walker_chain, mbundle ):
"""
Takes a walker group chain and refines the MLE.
"""
#ixs0 = np.isfinite( walker_chain['logp'] )
#ix = np.argmax( walker_chain['logp'][ixs0] )
#ix = np.unravel_index( ix, walker_chain['logp'][ixs0].shape )
parVals, logLikeMax = MaxLogLikePoint( walker_chain, mbundle )
print( '\nRefining the best-fit solution...' )
mp = pyhm.MAP( mbundle )
for key in mp.model.free.keys():
#mp.model.free[key].value = walker_chain[key][ixs0][ix]
mp.model.free[key].value = parVals[key]
mp.fit( xtol=1e-4, ftol=1e-4, maxfun=10000, maxiter=10000 )
print( 'Done.\nRefined MLE values:' )
mle_refined = {}
for key in mp.model.free.keys():
mle_refined[key] = mp.model.free[key].value
print( '{0} = {1}'.format( key, mp.model.free[key].value ) )
return mle_refined
def RefineMLE_PREVIOUS( walker_chain, mbundle ):
"""
Takes a walker group chain and refines the MLE.
"""
ix = np.argmax( walker_chain['logp'] )
ix = np.unravel_index( ix, walker_chain['logp'].shape )
print( '\nRefining the best-fit solution...' )
mp = pyhm.MAP( mbundle )
for key in mp.model.free.keys():
mp.model.free[key].value = walker_chain[key][ix]
mp.fit( xtol=1e-4, ftol=1e-4, maxfun=10000, maxiter=10000 )
print( 'Done.\nRefined MLE values:' )
mle_refined = {}
for key in mp.model.free.keys():
mle_refined[key] = mp.model.free[key].value
print( '{0} = {1}'.format( key, mp.model.free[key].value ) )
return mle_refined
def DefineLogiLprior( z, vark, label, priortype='uniform' ):
if priortype=='uniform':
nrupp = 100
zrange = z.max()-z.min()
#dz = np.median( np.abs( np.diff( z ) ) )
dzarr = np.abs( np.diff( z ) )
dz = np.min( dzarr[dzarr>0] )
#zlow = -10
zlow = np.log( 1./( nrupp*zrange ) )
if vark!='t':
#zupp = 10
zupp = np.log( 1/( dz ) )
else: # prevent t having short correlation length scale
zupp = np.log( 1./zrange )
#zupp = np.log( 1/( dz ) )
logiL_prior = pyhm.Uniform( label, lower=zlow, upper=zupp )
#print( '\n', label )
#print( 'zlow = ', zlow )
#print( 'zupp = ', zupp )
#print( ' dz = ', dz )
#print( np.shape( z ) )
#pdb.set_trace()
elif priortype=='gamma':
logiL_prior = pyhm.Gamma( label, alpha=1, beta=1 )
#print( label, zlow, zupp )
#print( z.max()-z.min() )
#pdb.set_trace()
return logiL_prior
def GetChainFromWalkers( walker_chains, nburn=0 ):
ngroups = len( walker_chains )
for i in range( ngroups ):
walker_chain = walker_chains[i]
keys = list( walker_chain.keys() )
keys.remove( 'logp' )
npar = len( keys )
chain_dicts = []
chain_arrs = []
for i in range( ngroups ):
chain_i = pyhm.collapse_walker_chain( walker_chains[i], nburn=nburn )
try:
chain_i['incl'] = np.rad2deg( np.arccos( chain_i['b']/chain_i['aRs'] ) )
except:
pass
chain_dicts += [ chain_i ]
grs = pyhm.gelman_rubin( chain_dicts, nburn=0, thin=1 )
chain = pyhm.combine_chains( chain_dicts, nburn=nburn, thin=1 )
return chain, grs
def BestFitsEval( mle, evalmodels ):
dsets = list( evalmodels.keys() )
bestfits = {}
batpars = {}
pmodels = {}
for k in dsets:
scankeys = list( evalmodels[k].keys() )
bestfits[k] = {}
batpars[k] = {}
pmodels[k] = {}
for j in scankeys:
z = evalmodels[k][j][0]( mle )
bestfits[k][j] = z['arrays']
batpars[k][j] = z['batpar']
pmodels[k][j] = z['pmodel']
#print( 'rrrr', z['arrays'].keys() )
#pdb.set_trace()
return bestfits, batpars, pmodels
def MultiColors():
z = [ [31,120,180], \
[166,206,227], \
[178,223,138], \
[51,160,44], \
[251,154,153], \
[227,26,28], \
[253,191,111], \
[255,127,0], \
[202,178,214], \
[106,61,154], \
[177,89,40] ]
rgbs = []
for i in range( len( z ) ):
rgbs += [ np.array( z[i] )/256. ]
return rgbs
def NaturalSort( iterable, key=None, reverse=False):
"""
Return a new naturally sorted list from the items in *iterable*.
The returned list is in natural sort order. The string is ordered
lexicographically (using the Unicode code point number to order individual
characters), except that multi-digit numbers are ordered as a single
character.
Has two optional arguments which must be specified as keyword arguments.
*key* specifies a function of one argument that is used to extract a
comparison key from each list element: ``key=str.lower``. The default value
is ``None`` (compare the elements directly).
*reverse* is a boolean value. If set to ``True``, then the list elements are
sorted as if each comparison were reversed.
The :func:`natural_sorted` function is guaranteed to be stable. A sort is
stable if it guarantees not to change the relative order of elements that
compare equal --- this is helpful for sorting in multiple passes (for
example, sort by department, then by salary grade).
Taken from:
https://github.com/bdrung/snippets/blob/master/natural_sorted.py
"""
prog = re.compile(r"(\d+)")
def alphanum_key(element):
"""Split given key in list of strings and digits"""
return [int(c) if c.isdigit() else c for c in prog.split(key(element)
if key else element)]
return sorted(iterable, key=alphanum_key, reverse=reverse)
def ScanVal( x ):
if x=='f':
return 1
elif x=='b':
return -1
else:
return None
def RefineMLEfromGroups( walker_chains, mbundle ):
ngroups = len( walker_chains )
# Identify which walker group hits the highest logp:
logp = np.zeros( ngroups )
for i in range( ngroups ):
logp[i] = np.max( walker_chains[i]['logp'] )
ix = np.argmax( logp )
# Restrict to this walker group:
return RefineMLE( walker_chains[ix], mbundle )
def MaxLogLikefromGroups( walker_chains, mbundle ):
ngroups = len( walker_chains )
# Identify which walker group hits the highest logp:
parVals = []
logp = np.zeros( ngroups )
for i in range( ngroups ):
z = MaxLogLikePoint( walker_chains[i], mbundle )
parVals += [ z[0] ]
logp[i] = z[1]
ix = np.argmax( logp )
# Restrict to this walker group:
#return RefineMLE( walker_chains[ix], mbundle )
return parVals[ix]
def GetInitWalkers( mcmc, nwalkers, init_par_ranges ):
init_walkers = {}
for key in list( mcmc.model.free.keys() ):
init_walkers[key] = np.zeros( nwalkers )
for i in range( nwalkers ):
for key in mcmc.model.free.keys():
startpos_ok = False
counter = 0
while startpos_ok==False:
startpos = init_par_ranges[key].random()
mcmc.model.free[key].value = startpos
if np.isfinite( mcmc.model.free[key].logp() )==True:
startpos_ok = True
else:
counter += 1
if counter>100:
print( '\n\nTrouble initialising walkers!\n\n' )
for key in mcmc.model.free.keys():
print( key, mcmc.model.free[key].value, \
mcmc.model.free[key].parents, \
mcmc.model.free[key].logp() )
pdb.set_trace()
init_walkers[key][i] = startpos
return init_walkers
#def CrossCorrSol( self, x0, ymeas, xtarg, ytarg, ix0, ix1, dx_max=1, nshifts=1000 )
def CrossCorrSol( x0, ymeas, xtarg, ytarg, ix0, ix1, dx_max=1, nshifts=1000 ):
"""
The mapping is: [ x0-shift, ymeas ] <--> [ xtarg, ytarg ]
[ix0,ix1] are the indices defining where to compute residuals along dispersion axis.
"""
dw = np.median( np.diff( xtarg ) )
wlow = x0.min()-dx_max-dw
wupp = x0.max()+dx_max+dw
# Extend the target array at both edges:
dwlow = np.max( [ xtarg.min()-wlow, 0 ] )
dwupp = np.max( [ wupp-xtarg.max(), 0 ] )
wbuff_lhs = np.r_[ xtarg.min()-dwlow:xtarg.min():dw ]
wbuff_rhs = np.r_[ xtarg.max()+dw:xtarg.max()+dwupp:dw ]
xtarg_ext = np.concatenate( [ wbuff_lhs, xtarg, wbuff_rhs ] )
fbuff_lhs = np.zeros( len( wbuff_lhs ) )
fbuff_rhs = np.zeros( len( wbuff_rhs ) )
ytarg_ext = np.concatenate( [ fbuff_lhs, ytarg, fbuff_rhs ] )
# Interpolate the extended target array:
interpf = scipy.interpolate.interp1d( xtarg_ext, ytarg_ext )
shifts = np.linspace( -dx_max, dx_max, nshifts )
vstretches = np.zeros( nshifts )
rms = np.zeros( nshifts )
# Loop over the wavelength shifts, where for each shift we move
# the target array and compare it to the measured array:
A = np.ones( [ ymeas.size, 2 ] )
b = np.reshape( ymeas/ymeas.max(), [ ymeas.size, 1 ] )
ss_fits = []
diffsarr = []
for i in range( nshifts ):
# Assuming the default x-solution is x0, shift the model
# array by dx. If this provides a good match to the data,
# it means that the default x-solution x0 is off by dx.
ytarg_shifted_i = interpf( x0 - shifts[i] )
A[:,1] = ytarg_shifted_i/ytarg_shifted_i.max()
res = np.linalg.lstsq( A, b, rcond=None )
c = res[0].flatten()
vstretches[i] = c[1]
fit = np.dot( A, c )
diffs = b.flatten() - fit.flatten()
rms[i] = np.mean( diffs[ix0:ix1+1]**2. )
ss_fits +=[ fit.flatten() ]
diffsarr += [ diffs ]
ss_fits = np.row_stack( ss_fits )
diffsarr = np.row_stack( diffsarr )
rms -= rms.min()
# Because the rms versus shift is well-approximated as parabolic,
# refine the shift corresponding to the minimum rms by fitting
# a parabola to the shifts evaluated above:
offset = np.ones( nshifts )
phi = np.column_stack( [ offset, shifts, shifts**2. ] )
nquad = min( [ nshifts, 15 ] )
ixmax = np.arange( nshifts )[np.argsort( rms )][nquad]
ixs = rms<rms[ixmax]
coeffs = np.linalg.lstsq( phi[ixs,:], rms[ixs], rcond=None )[0]
nshiftsf = 100*nshifts
offsetf = np.ones( nshiftsf )
shiftsf = np.linspace( shifts.min(), shifts.max(), nshiftsf )
phif = np.column_stack( [ offsetf, shiftsf, shiftsf**2. ] )
rmsf = np.dot( phif, coeffs )
vstretchesf = np.interp( shiftsf, shifts, vstretches )
ixf = np.argmin( rmsf )
ix = np.argmin( rms )
ix0 = ( shifts==0 )
diffs0 = diffsarr[ix0,:].flatten()
return shiftsf[ixf], vstretchesf[ixf], ss_fits[ix,:], diffsarr[ix,:], diffs0
def PrepRampPars( datasets, data, data_ixs, scankeys, baseline, \
rampScanShare ):
# For each scan direction, the systematics model consists of a
# double-exponential ramp (a1,a2,a3,a4,a5):
rlabels0 = [ 'a1', 'a2', 'a3', 'a4', 'a5' ]
# Initial values for systematics parameters:
rlabels = []
rfixed = []
rinit = []
rixs = {}
fluxc = {}
c = 0 # counter
ndsets = len( datasets )
# fluxc is split by dataset, not scan direction; however, it isn't
# actually used for specLC fits as a good estimate is already
# available for the psignal from the whiteLC fit:
for k in range( ndsets ):
rparsk, fluxck = PrelimRPars( datasets[k], data, data_ixs, scankeys, \
baseline, rampScanShare )
fluxc[datasets[k]] = fluxck
#print( fluxck.keys() )
#pdb.set_trace()
rlabels += [ rparsk['rlabels'] ]
rfixed = np.concatenate( [ rfixed, rparsk['rfixed'] ] )
rinit = np.concatenate( [ rinit, rparsk['rpars_init'] ] )
for i in list( rparsk['rixs'].keys() ):
rixs[i] = rparsk['rixs'][i]+c
c += len( rparsk['rlabels'] )
rlabels = np.concatenate( rlabels ).flatten()
r = { 'labels':rlabels, 'fixed':rfixed, 'pars_init':rinit, 'ixs':rixs }
return r, fluxc
def PrelimRPars( dataset, data, data_ixs, scankeys, baseline, rampScanShare ):
"""
"""
if len( scankeys[dataset] )>1:
if rampScanShare==True:
r, fluxc = PrelimRParsScanShared( dataset, data, data_ixs, \
scankeys, baseline )
else:
r, fluxc = PrelimRParsScanSeparate( dataset, data, data_ixs, \
scankeys, baseline )
else:
r, fluxc = PrelimRParsScanSeparate( dataset, data, data_ixs, \
scankeys, baseline )
if 0: # DELETE
plt.figure()
for k in scankeys[dataset]:
idkey = '{0}{1}'.format( dataset, k )
plt.plot( data[:,1][data_ixs[dataset][k]], fluxc[k], 'o' )
pdb.set_trace()
return r, fluxc
def PrelimRParsScanShared( dataset, data, data_ixs, scankeys, baseline ):
ixsd = data_ixs
thrs = data[:,1]
torb = data[:,2]
dwav = data[:,3]
bvar = thrs # TODO allow this to be another variable
flux = data[:,4]
# Must loop over scan directions to get fluxc right
# for each scan direction:
rpars0 = {}
fluxFit = flux*np.ones_like( flux )
for k in scankeys[dataset]:
ixsk = ixsd[dataset][k] # data ixs for current dataset + scan direction
fluxFit[ixsk] = fluxFit[ixsk]/np.median( fluxFit[ixsk][-3:] )
# Run a quick double-exponential ramp fit on the first and last HST
# orbits to get reasonable starting values for the parameters:
rpars0, bfit, rfit = PrelimDEFit( dataset, bvar, thrs, \
torb, fluxFit, baseline )
# Note that the above ramp fit 'rfit' will perform fit with self.baseline,
# then return only the ramp parameters, but fluxc will be the flux
# corrected by model=ramp*baseline, which is then used for a
# preliminary planet signal fit.
# For dataset, one set of ramp parameters for both scan directions:
rlabels = [ 'a1_{0}'.format( dataset ), 'a2_{0}'.format( dataset ), \
'a3_{0}'.format( dataset ), 'a4_{0}'.format( dataset ), \
'a5_{0}'.format( dataset ) ]
rlabels = np.array( rlabels, dtype=str )
# The ramp parameter ixs are split by scan direction, however:
nrpar = len( rpars0 )
rfixed = np.zeros( nrpar ) # all parameters free
rixs = {}
fluxc = {}
for k in scankeys[dataset]:
idkey = '{0}{1}'.format( dataset, k )
rixs[idkey] = np.arange( 0, 0+nrpar )
ixsk = ixsd[dataset][k]
#fluxc[idkey] = flux[ixsk]/( bfit[ixsk]*rfit[ixsk] )
fluxc[k] = flux[ixsk]/rfit[ixsk] # only remove ramp; preserve offset
r = { 'rlabels':rlabels, 'rfixed':rfixed, 'rpars_init':rpars0, 'rixs':rixs }
return r, fluxc
def PrelimRParsScanSeparate( dataset, data, data_ixs, scankeys, baseline ):
rlabels = []
rfixed = []
rinit = []
rixs = {}
fluxc = {}
c = 0 # counter
ixsd = data_ixs
for k in scankeys[dset]:
ixsdk = ixsd[dset][k] # data ixs for current dataset + scan direction
thrsdk = data[:,1][ixsdk]
torbdk = data[:,2][ixsdk]
dwavdk = data[:,3][ixsdk]
bvardk = thrsdk # TODO allow this to be another variable
fluxdk = data[:,4][ixsdk]
idkey = '{0}{1}'.format( dset, k )
# Run a quick double-exponential ramp fit on the first
# and last HST orbits to get reasonable starting values
# for the parameters:
rpars0k, bfitk, rfitk = PrelimDEFit( dset, bvardk, thrsdk, torbdk, \
fluxdk, baseline )
rinit = np.concatenate( [ rinit, rpars0 ] )
nrpar = len( rpars0 )
rixs[idkey] = np.arange( c*nrpar, (c+1)*nrpar )
fluxc[idkey] = fluxcik # TODO = fix this in future...
rfixed = np.concatenate( [ rfixed, np.zeros( nrpar ) ] )
rlabels_ik = []
for j in range( nrpar ):
rlabels_ik += [ '{0}_{1}{2}'.format( rlabels0[j], dset, k ) ]
rlabels += [ np.array( rlabels_ik, dtype=str ) ]
c += 1
r = { 'rlabels':rlabels, 'rfixed':rfixed, 'rpars_init':rinit, 'rixs':rixs }
# NOTE: This hasn't been tested in current format (2020-Nov-10th).
return r, fluxc
def PrelimDEFit( dset, bvar, thrs, torb, flux, baseline ):
"""
Performs preliminary fit for the ramp systematics, only
fitting to the first and last HST orbits.
"""
print( '\nRunning preliminary DE ramp fit for {0}'.format( dset ) )
print( '(using only the first and last orbits)' )
if ( baseline=='linearT' )+( baseline=='linearX' ):
rfunc = DERampLinBase
nbase = 2
elif baseline=='quadratic':
rfunc = DERampQuadBase
nbase = 3
elif baseline=='exponential':
rfunc = DERampExpBase
nbase = 3
else:
pdb.set_trace()
orbixs = SplitHSTOrbixs( thrs )
ixs = np.concatenate( [ orbixs[0], orbixs[-1] ] )
def CalcRMS( pars ):
baseline, ramp = rfunc( bvar[ixs], thrs[ixs], torb[ixs], pars )
resids = flux[ixs]-baseline*ramp
rms = np.sqrt( np.mean( resids**2. ) )
return rms
ntrials = 30
rms = np.zeros( ntrials )
pfit = []
for i in range( ntrials ):
print( '... trial {0:.0f} of {1:.0f}'.format( i+1, ntrials ) )
b0i = flux[-1]
#b0i = np.median( flux )
b1i = 0
# These starting values seem to produce reasonable results:
a1b = 1e-3
a2i = 1
a3b = 1e-3
a4i = 0.01
a5i = 0.001
bb = 0.1
pinit = [ a1b*np.random.randn(), a2i*( 1+bb*np.random.randn() ), \
a3b*np.random.randn(), a4i*( 1+bb*np.random.randn() ), \
a5i*( 1+bb*np.random.randn() ), b0i, b1i ]
#pinit = [ (1e-3)*np.random.randn(), 0.1+0.005*np.random.random(), \
# (1e-3)*np.random.randn(), 0.1+0.005*np.random.random(), \
# (1.+0.005*np.random.random() )/60., flux[-1], 0 ]
if nbase==3:
pinit += [ 0 ]
pfiti = scipy.optimize.fmin( CalcRMS, pinit, maxiter=1e4, xtol=1e-3, \
ftol=1e-4, disp=False )
rms[i] = CalcRMS( pfiti )
pfit += [ pfiti ]
pbest = pfit[
|
np.argmin(rms)
|
numpy.argmin
|
'''
ssc_cvx_test.py
@author agitlin
Unit tests for the spider.cluster.ssc_cvx module
'''
import unittest
import numpy as np
import numpy.matlib as ml
from sklearn.preprocessing import normalize
from scipy.linalg import orth
from spider.cluster.ssc_cvx import SSC_CVX
class Test_SSC_CVX(unittest.TestCase):
def test_ssc_cvx(self):
""" Test runnability and verify clustering instance
"""
# parameters
D = 100 # dimension of ambient space
K = 5 # number of subspaces
Nk = 100 # points per subspace
d = 1 # dimension of subspace
varn = 0.01 # noise variance
B = 10 # number of base clusterings
q = 10 # threshold parameter
N = K * Nk
# generate data
X = np.zeros((D, N))
true_labels = np.zeros(N)
true_U = np.zeros((K, D, d))
for kk in range(K):
true_U[kk] = orth(
|
np.random.randn(D, d)
|
numpy.random.randn
|
# -*- coding: utf-8 -*-
__all__ = ["StellarPopulation"]
import os
import numpy as np
from ._fsps import driver
from .filters import FILTERS
class StellarPopulation(object):
r"""
This is the main interface to use when interacting with FSPS from Python.
Most of the Fortran API is exposed through Python hooks with various
features added for user friendliness. It is recommended to only
instantiate one StellarPopulation object in a given program. When
initializing, you can set any of the parameters of the system using keyword
arguments. Below, you'll find a list of the options that you can include
(with the comments taken directly from the `FSPS docs
<https://github.com/cconroy20/fsps/blob/master/doc/MANUAL.pdf>`_). Unless
otherwise noted, you can change these values later using the ``params``
property—which is ``dict``-like. For example:
::
sp = StellarPopulation(imf_type=2, zcontinuous=1)
sp.params["imf_type"] = 1
sp.params["logzsol"] = -0.3
sp.params["sfh"] = 1
:param compute_vega_mags: (default: False)
A switch that sets the zero points of the magnitude system: ``True``
uses Vega magnitudes versus AB magnitudes. Can only be changed during
initialization.
:param vactoair_flag: (default: False)
If ``True``, output wavelengths in air (rather than vac). Can only be
changed during initialization.
:param zcontinuous: (default: 0)
Flag specifying how interpolation in metallicity of the simple stellar
populations (SSPs) is performed before computing composite stellar
population (CSP) models:
* 0: No interpolation, use the metallicity index specified by ``zmet``.
* 1: The SSPs are interpolated to the value of ``logzsol`` before the
spectra and magnitudes are computed, and the value of ``zmet`` is
ignored.
* 2: The SSPs are convolved with a metallicity distribution function
specified by the ``logzsol`` and ``pmetals`` parameters. The value of
``zmet`` is ignored.
* 3: Use all available SSP metallicities when computing the composite
model, for use exclusively with tabular SFHs where the metallicity
evolution as function of age is given (see `set_tabular_sfh()`). The
values of ``zmet`` and ``logzsol`` are ignored. Furthermore
``add_neb_emission`` must be set to False.
Can only be changed during initialization.
:param add_agb_dust_model: (default: True)
Switch to turn on/off the AGB circumstellar dust model presented in
Villaume (2014). NB: The AGB dust emission is scaled by the parameter
`agb_dust`.
:param add_dust_emission: (default: True)
Switch to turn on/off the Draine & Li 2007 dust emission model.
:param add_igm_absorption: (default: False)
Switch to include IGM absorption via Madau (1995). The ``zred``
parameter must be non-zero for this switch to have any effect. The
optical depth can be scaled using the ``igm_factor`` parameter.
:param add_neb_emission: (default: False)
Switch to turn on/off a nebular emission model (both continuum and line
emission), based on Cloudy models from Nell Byler. Contrary to FSPS,
this option is turned off by default.
:param add_neb_continuum: (default: True)
Switch to turn on/off the nebular continuum component (automatically
turned off if ``add_neb_emission`` is ``False``).
:param add_stellar_remnants: (default: True)
Switch to add stellar remnants in the stellar mass computation.
:param redshift_colors: (default: False)
Flag specifying how to compute magnitudes. This has no effect in
python-FSPS. Magnitudes are always computed at a fixed redshift
specified by ``zred`` or the ``redshift`` parameter of ``get_mags``.
See `get_mags` for details.
:param compute_light_ages: (default: False)
Flag specifying whether to compute light- and mass-weighted ages. If
``True`` then the returned spectra are actually light-weighted ages (in
Gyr) at every wavelength, the returned magnitudes are filter
transmission weighted averages of these, the ``log_lbol`` attribute is
the bolometric luminosity weighted age, and the ``stellar_mass``
attribute gives the mass-weighted age.
:param nebemlineinspec: (default: True)
Flag to include the emission line fluxes in the spectrum. Turning this off
is a significant speedup in model calculation time. If not set, the line luminosities
are still computed.
:param smooth_velocity: (default: True)
Switch to choose smoothing in velocity space (``True``) or wavelength
space.
:param smooth_lsf: (default: False)
Switch to apply smoothing of the SSPs by a wavelength dependent line
spread function. See the ``set_lsf()`` method for details. Only takes
effect if ``smooth_velocity`` is True.
:param cloudy_dust: (default: False)
Switch to include dust in the Cloudy tables.
:param agb_dust: (default: 1.0)
Scales the circumstellar AGB dust emission.
:param tpagb_norm_type: (default: 2)
Flag specifying TP-AGB normalization scheme:
* 0: default Padova 2007 isochrones
* 1: Conroy & Gunn 2010 normalization
* 2: Villaume, Conroy, Johnson 2015 normalization
:param dell: (default: 0.0)
Shift in :math:`\log L_\mathrm{bol}` of the TP-AGB isochrones. Note
that the meaning of this parameter and the one below has changed to
reflect the updated calibrations presented in Conroy & Gunn (2009).
That is, these parameters now refer to a modification about the
calibrations presented in that paper. Only has effect if
``tpagb_norm_type=1``.
:param delt: (default: 0.0)
Shift in :math:`\log T_\mathrm{eff}` of the TP-AGB isochrones. Only
has effect if ``tpagb_norm_type=1``.
:param redgb: (default: 1.0)
Modify weight given to RGB. Only available with BaSTI isochrone set.
:param agb: (default: 1.0)
Modify weight given to TP-AGB. This only has effect for FSPS v3.1 or
higher.
:param fcstar: (default: 1.0)
Fraction of stars that the Padova isochrones identify as Carbon stars
that FSPS assigns to a Carbon star spectrum. Set this to 0.0 if for
example the users wishes to turn all Carbon stars into regular M-type
stars.
:param sbss: (default: 0.0)
Specific frequency of blue straggler stars. See Conroy et al. (2009a)
for details and a plausible range.
:param fbhb: (default: 0.0)
Fraction of horizontal branch stars that are blue. The blue HB stars
are uniformly spread in :math:`\log T_\mathrm{eff}` to :math:`10^4`
K. See Conroy et al. (2009a) for details and a plausible range.
:param pagb: (default: 1.0)
Weight given to the post–AGB phase. A value of 0.0 turns off post-AGB
stars; a value of 1.0 implies that the Vassiliadis & Wood (1994) tracks
are implemented as–is.
:param zred: (default: 0.0)
Redshift. If this value is non-zero and if ``redshift_colors=1``, the
magnitudes will be computed for the spectrum placed at redshift
``zred``.
:param zmet: (default: 1)
The metallicity is specified as an integer ranging between 1 and nz. If
``zcontinuous > 0`` then this parameter is ignored.
:param logzsol: (default: 0.0)
Parameter describing the metallicity, given in units of :math:`\log
(Z/Z_\odot)`. Only used if ``zcontinuous > 0``.
:param pmetals: (default: 2.0)
The power for the metallicty distribution function. The MDF is given by
:math:`(Z \, e^{-Z})^{\mathrm{pmetals}}` where :math:`Z =
z/(z_\odot \, 10^{\mathrm{logzsol}})` and z is the metallicity in
linear units (i.e., :math:`z_\odot = 0.019`). Using a negative value
will result in smoothing of the SSPs by a three-point triangular kernel
before linear interpolation (in :math:`\log Z`) to the requested
metallicity. Only used if ``zcontinuous = 2``.
:param imf_type: (default: 2)
Common variable defining the IMF type:
* 0: Salpeter (1955)
* 1: Chabrier (2003)
* 2: Kroupa (2001)
* 3: <NAME> (2008)
* 4: Dave (2008)
* 5: tabulated piece-wise power law IMF, specified in ``imf.dat`` file
located in the data directory
:param imf_upper_limit: (default: 120)
The upper limit of the IMF, in solar masses. Note that if this is
above the maximum mass in the isochrones then those stars will not
contribute to the spectrum but will affect the overall IMF
normalization.
:param imf_lower_limit: (default: 0.08)
The lower limit of the IMF, in solar masses. Note that if this is
below the minimum mass in the isochrones then those stars will not
contribute to the spectrum but will affect the overall IMF
normalization.
:param imf1: (default: 1.3)
Logarithmic slope of the IMF over the range :math:`0.08 < M < 0.5
M_\odot`. Only used if ``imf_type=2``.
:param imf2: (default: 2.3)
Logarithmic slope of the IMF over the range :math:`0.5 < M < 1
M_\odot`. Only used if ``imf_type=2``.
:param imf3: (default: 2.3)
Logarithmic slope of the IMF over the range :math:`1.0 < M < \mathrm{imf\_upper\_limit}
M_\odot`. Only used if ``imf_type=2``.
:param vdmc: (default: 0.08)
IMF parameter defined in van Dokkum (2008). Only used if
``imf_type=3``.
:param mdave: (default: 0.5)
IMF parameter defined in Dave (2008). Only used if ``imf_type=4``.
:param evtype: (default: -1)
Compute SSPs for only the given evolutionary type. All phases used when
set to -1.
:param use_wr_spectra: (default: 1)
Turn on/off the WR spectral library. If off (0), will use the main
default library instead
:param logt_wmb_hot: (default: 0.0)
Use the Eldridge (2017) WMBasic hot star library above this value of
:math:`\log T_\mathrm{eff}` or 25,000K, whichever is larger.
:param masscut: (default: 150.0)
Truncate the IMF above this value.
:param sigma_smooth: (default: 0.0)
If smooth_velocity is True, this gives the velocity dispersion in km/s.
Otherwise, it gives the width of the gaussian wavelength smoothing in
Angstroms. These widths are in terms of :math:`\sigma`, *not* FWHM.
:param min_wave_smooth: (default: 1e3)
Minimum wavelength to consider when smoothing the spectrum.
:param max_wave_smooth: (default: 1e4)
Maximum wavelength to consider when smoothing the spectrum.
:param gas_logu: (default: -2)
Log of the gas ionization parameter; relevant only for the nebular
emission model.
:param gas_logz: (default: 0.0)
Log of the gas-phase metallicity; relevant only for the nebular
emission model. In units of :math:`\log (Z/Z_\odot)`.
:param igm_factor: (default: 1.0)
Factor used to scale the IGM optical depth.
:param sfh: (default: 0)
Defines the type of star formation history, normalized such that one
solar mass of stars is formed over the full SFH. Default value is 0.
* 0: Compute a simple stellar population (SSP).
* 1: Tau-model. A six parameter SFH (tau model plus a constant
component and a burst) with parameters ``tau``, ``const``,
``sf_start``, ``sf_trunc``, ``tburst``, and ``fburst`` (see below).
* 2: This option is not supported in Python-FSPS.
* 3: Compute a tabulated SFH, which is supplied through the
``set_tabular_sfh`` method. See that method for details.
* 4: Delayed tau-model. This is the same as option 1 except that the
tau-model component takes the form :math:`t\,e^{−t/\tau}`.
* 5: Delayed tau-model with a transition at a time ``sf_trunc`` to a
linearly decreasing SFH with the slope specified by ``sf_slope``. See
Simha et al. 2014 for details.
:param tau: (default: 1.0)
Defines e-folding time for the SFH, in Gyr. Only used if ``sfh=1`` or
``sfh=4``. The range is :math:`0.1 < \tau < 10^2`.
:param const: (default: 0.0)
Defines the constant component of the SFH. This quantity is defined as
the fraction of mass formed in a constant mode of SF; the range is
therefore :math:`0 \le C \le 1`. Only used if ``sfh=1`` or ``sfh=4``.
:param sf_start: (default: 0.0)
Start time of the SFH, in Gyr. Only used if ``sfh=1`` or ``sfh=4`` or
``sfh=5``.
:param sf_trunc: (default: 0.0)
Truncation time of the SFH, in Gyr. If set to 0.0, there is no
trunction. Only used if ``sfh=1`` or ``sfh=4`` or ``sfh=5``.
:param tage: (default: 0.0)
If set to a non-zero value, the
:func:`fsps.StellarPopulation.compute_csp` method will compute the
spectra and magnitudes only at this age, and will therefore only output
one age result. The units are Gyr. (The default is to compute and
return results from :math:`t \approx 0` to the maximum age in the
isochrones).
:param fburst: (default: 0.0)
Defines the fraction of mass formed in an instantaneous burst of star
formation. Only used if ``sfh=1`` or ``sfh=4``.
:param tburst: (default: 11.0)
Defines the age of the Universe when the burst occurs. If ``tburst >
tage`` then there is no burst. Only used if ``sfh=1`` or ``sfh=4``.
:param sf_slope: (default: 0.0)
For ``sfh=5``, this is the slope of the SFR after time ``sf_trunc``.
:param dust_type: (default: 0)
Common variable defining the attenuation curve for dust around 'old' stars:
* 0: power law with index dust index set by ``dust_index``.
* 1: Milky Way extinction law (with the :math:`R = A_V /E(B - V)` value
given by ``mwr``) parameterized by Cardelli et al. (1989), with
variable UV bump strength (see ``uvb`` below).
* 2: Calzetti et al. (2000) attenuation curve. Note that if this value
is set then the dust attenuation is applied to all starlight equally
(not split by age), and therefore the only relevant parameter is
``dust2``, which sets the overall normalization (you must set
``dust1=0.0`` for this to work correctly).
* 3: allows the user to access a variety of attenuation curve models
from Witt & Gordon (2000) using the parameters ``wgp1`` and
``wgp2``. In this case the parameters ``dust1`` and ``dust2`` have no
effect because the WG00 models specify the full attenuation curve.
* 4: Kriek & Conroy (2013) attenuation curve. In this model the slope
of the curve, set by the parameter ``dust_index``, is linked to the
strength of the UV bump and is the *offset* in slope from Calzetti.
* 5: The SMC bar extinction curve from Gordon et al. (2003)
* 6: The Reddy et al. (2015) attenuation curve.
:param dust_tesc: (default: 7.0)
Stars younger than ``dust_tesc`` are attenuated by both ``dust1`` and
``dust2``, while stars older are attenuated by ``dust2`` only. Units
are :math:`\log (\mathrm{yrs})`.
:param dust1: (default: 0.0)
Dust parameter describing the attenuation of young stellar light,
i.e. where ``t <= dust_tesc`` (for details, see Conroy et al. 2009a).
:param dust2: (default: 0.0)
Dust parameter describing the attenuation of old stellar light,
i.e. where ``t > dust_tesc`` (for details, see Conroy et al. 2009a).
:param dust_clumps: (default: -99.)
Dust parameter describing the dispersion of a Gaussian PDF density
distribution for the old dust. Setting this value to -99.0 sets the
distribution to a uniform screen. See Conroy et al. (2009b) for
details. Values other than -99 are no longer supported.
:param frac_nodust: (default: 0.0)
Fraction of starlight that is not attenuated by the diffuse dust
component (i.e. that is not affected by ``dust2``).
:param frac_obrun: (default: 0.0)
Fraction of the young stars (age < dust_tesc) that are not attenuated
by ``dust1``, representing runaway OB stars. These stars are still
attenuated by ``dust2``.
:param dust_index: (default: -0.7)
Power law index of the attenuation curve. Only used when
``dust_type=0``.
:param dust1_index: (default: -1.0)
Power law index of the attenuation curve affecting stars younger than
dust_tesc corresponding to ``dust1``. Used for all dust types.
:param mwr: (default: 3.1)
The ratio of total to selective absorption which characterizes the MW
extinction curve: :math:`R = A_V /E(B - V)`. Only used when
``dust_type=1``.
:param uvb: (default: 1.0)
Parameter characterizing the strength of the 2175A extinction feature
with respect to the standard Cardelli et al. determination for the
MW. Only used when ``dust_type=1``.
:param wgp1: (default: 1)
Integer specifying the optical depth in the Witt & Gordon (2000)
models. Values range from 1 − 18, corresponding to optical depths of
0.25, 0.50, 0.75, 1.00, 1.50, 2.00, 2.50, 3.00, 3.50, 4.00, 4.50,
5.00, 5.50, 6.00, 7.00, 8.00, 9.00, 10.0. Note that these optical
depths are defined differently from the optical depths defined by
the parameters ``dust1`` and ``dust2``. See Witt & Gordon (2000)
for details.
:param wgp2: (default: 1)
Integer specifying the type of large-scale geometry and extinction
curve. Values range from 1-6, corresponding to MW+dusty, MW+shell,
MW+cloudy, SMC+dusty, SMC+shell, SMC+cloudy. Dusty, shell, and cloudy
specify the geometry and are described in Witt & Gordon (2000).
:param wgp3: (default: 1)
Integer specifying the local geometry for the Witt & Gordon (2000)
dust models. A value of 0 corresponds to a homogeneous distribution,
and a value of 1 corresponds to a clumpy distribution. See Witt &
Gordon (2000) for details.
:param duste_gamma: (default: 0.01)
Parameter of the Draine & Li (2007) dust emission model. Specifies the
relative contribution of dust heated at a radiation field strength of
:math:`U_\mathrm{min}` and dust heated at :math:`U_\mathrm{min} < U \le
U_\mathrm{max}`. Allowable range is 0.0 – 1.0.
:param duste_umin: (default: 1.0)
Parameter of the Draine & Li (2007) dust emission model. Specifies the
minimum radiation field strength in units of the MW value. Valid range
is 0.1 – 25.0.
:param duste_qpah: (default: 3.5)
Parameter of the Draine & Li (2007) dust emission model. Specifies the
grain size distribution through the fraction of grain mass in
PAHs. This parameter has units of % and a valid range of 0.0 − 10.0.
:param fagn: (default: 0.0)
The total luminosity of the AGN, expressed as a fraction of the
bolometric stellar luminosity (so it can be greater than 1). The shape
of the AGN SED is from the Nenkova et al. 2008 templates.
:param agn_tau: (default: 10)
Optical depth of the AGN dust torus, which affects the shape of the AGN
SED. Outside the range (5, 150) the AGN SED is an
extrapolation.
"""
def __init__(
self, compute_vega_mags=False, vactoair_flag=False, zcontinuous=0, **kwargs
):
# Set up the parameters to their default values.
self.params = ParameterSet(
add_agb_dust_model=True,
add_dust_emission=True,
add_igm_absorption=False,
add_neb_emission=False,
add_neb_continuum=True,
add_stellar_remnants=True,
redshift_colors=False,
compute_light_ages=False,
nebemlineinspec=True,
smooth_velocity=True,
smooth_lsf=False,
cloudy_dust=False,
agb_dust=1.0,
tpagb_norm_type=2,
dell=0.0,
delt=0.0,
redgb=1.0,
agb=1.0,
fcstar=1.0,
fbhb=0.0,
sbss=0.0,
pagb=1.0,
zred=0.0,
zmet=1,
logzsol=0.0,
pmetals=2.0,
imf_type=2,
imf_upper_limit=120,
imf_lower_limit=0.08,
imf1=1.3,
imf2=2.3,
imf3=2.3,
vdmc=0.08,
mdave=0.5,
evtype=-1,
use_wr_spectra=1,
logt_wmb_hot=0.0,
masscut=150.0,
sigma_smooth=0.0,
min_wave_smooth=1e3,
max_wave_smooth=1e4,
gas_logu=-2,
gas_logz=0.0,
igm_factor=1.0,
sfh=0,
tau=1.0,
const=0.0,
sf_start=0.0,
sf_trunc=0.0,
tage=0.0,
dust_tesc=7.0,
fburst=0.0,
tburst=11.0,
sf_slope=0.0,
dust_type=0,
dust1=0.0,
dust2=0.0,
dust_clumps=-99.0,
frac_nodust=0.0,
frac_obrun=0.0,
dust_index=-0.7,
dust1_index=-1.0,
mwr=3.1,
uvb=1.0,
wgp1=1,
wgp2=1,
wgp3=1,
duste_gamma=0.01,
duste_umin=1.0,
duste_qpah=3.5,
fagn=0.0,
agn_tau=10.0,
)
# Parse any input options.
for k, v in self.params.iteritems():
self.params[k] = kwargs.pop(k, v)
# Make sure that we didn't get any unknown options.
if len(kwargs):
raise TypeError(
"__init__() got an unexpected keyword argument "
"'{0}'".format(list(kwargs)[0])
)
# Before the first time we interact with the FSPS driver, we need to
# run the ``setup`` method.
if not driver.is_setup:
driver.setup(compute_vega_mags, vactoair_flag)
else:
cvms, vtaflag = driver.get_setup_vars()
assert compute_vega_mags == bool(cvms)
assert vactoair_flag == bool(vtaflag)
self._zcontinuous = zcontinuous
# Caching.
self._wavelengths = None
self._emwavelengths = None
self._zlegend = None
self._ssp_ages = None
self._stats = None
self._libraries = None
def _update_params(self):
if self.params.dirtiness == 2:
driver.set_ssp_params(*[self.params[k] for k in self.params.ssp_params])
if self.params.dirtiness >= 1:
driver.set_csp_params(*[self.params[k] for k in self.params.csp_params])
self.params.dirtiness = 0
def _compute_csp(self):
self._update_params()
NSPEC = driver.get_nspec()
NTFULL = driver.get_ntfull()
driver.compute_zdep(NSPEC, NTFULL, self._zcontinuous)
self._stats = None
def get_spectrum(self, zmet=None, tage=0.0, peraa=False):
r"""
Return spectra for the current CSP.
:param zmet: (default: None)
The (integer) index of the metallicity to use. By default, use
the current value of ``self.params["zmet"]``.
:param tage: (default: 0.0)
The age of the stellar population in Gyr) for which to obtain a
spectrum. By default, this will compute a grid of ages from
:math:`t \approx 0` to the maximum age in the isochrones.
:param peraa: (default: False)
If ``True``, return the spectrum in :math:`L_\odot/A`. Otherwise,
return the spectrum in the FSPS standard
:math:`L_\odot/\mathrm{Hz}`.
:returns wavelengths:
The wavelength grid in Angstroms.
:returns spectrum:
The spectrum in :math:`L_\odot/\mathrm{Hz}` or :math:`L_\odot/A`.
If an age was provided by the ``tage`` parameter then the result
is a 1D array with ``NSPEC`` values. Otherwise, it is a 2D array
with shape ``(NTFULL, NSPEC)``.
"""
self.params["tage"] = tage
if zmet is not None:
self.params["zmet"] = zmet
if self.params.dirty:
self._compute_csp()
wavegrid = self.wavelengths
if peraa:
factor = 3e18 / wavegrid ** 2
else:
factor = np.ones_like(wavegrid)
NSPEC = driver.get_nspec()
if (tage > 0.0) or (tage == -99):
return wavegrid, driver.get_spec(NSPEC, 1)[0] * factor
NTFULL = driver.get_ntfull()
return wavegrid, driver.get_spec(NSPEC, NTFULL) * factor[None, :]
def get_mags(self, zmet=None, tage=0.0, redshift=None, bands=None):
r"""
Get the magnitude of the CSP.
:param zmet: (default: None)
The (integer) index of the metallicity to use. By default, use the
current value of ``self.params["zmet"]``.
:param tage: (default: 0.0)
The age of the stellar population in Gyr. By default, this will
compute a grid of ages from :math:`t \approx 0` to the maximum age
in the isochrones.
:param redshift: (default: None)
Optionally redshift the spectrum first. If not supplied, the
redshift given by ``StellarPopulation.params["zred"]`` is assumed.
If supplied, the value of ``zred`` is ignored (and IGM attenuation
will not work properly).
:param bands: (default: None)
The names of the filters that you would like to compute the
magnitude for. This should correspond to the result of
:func:`fsps.find_filter`.
:returns mags:
The magnitude grid. If an age was was provided by the ``tage``
parameter then the result is a 1D array with ``NBANDS`` values.
Otherwise, it is a 2D array with shape ``(NTFULL, NBANDS)``. If a
particular set of bands was requested then this return value will
be properly compressed along that axis, ordered according to the
``bands`` argument. If ``redshift`` is not 0, the units are
apparent observed frame magnitude :math:`m` assuming
:math:`\Omega_m=0.3, \Omega_\Lambda=0.7`
"""
if redshift is None:
zr = self.params["zred"]
elif (self.params["zred"] > 0) & (redshift != self.params["zred"]):
zr = redshift
print("Warning: redshift is different than 'zred'.")
else:
zr = redshift
self.params["tage"] = tage
if zmet is not None:
self.params["zmet"] = zmet
if self.params.dirty:
self._compute_csp()
if tage > 0.0:
NTFULL = 1
else:
NTFULL = driver.get_ntfull()
NBANDS = driver.get_nbands()
NSPEC = driver.get_nspec()
band_array = np.ones(NBANDS, dtype=bool)
if bands is not None:
user_sorted_inds = np.array([FILTERS[band.lower()].index for band in bands])
band_array[
np.array(
[i not in user_sorted_inds for i in range(NBANDS)],
dtype=bool,
)
] = False
inds = np.array(band_array, dtype=int)
mags = driver.get_mags(NSPEC, NTFULL, zr, inds)
if tage > 0.0:
if bands is not None:
return mags[0, user_sorted_inds]
else:
return mags[0, :]
else:
if bands is not None:
return mags[:, user_sorted_inds]
else:
return mags
def _ztinterp(self, zpos, tpos, peraa=False):
r"""
Return an SSP spectrum, mass, and luminosity interpolated to a target
metallicity and age. This effectively wraps the ZTINTERP subroutine.
Only the SSPs bracketing a given metallicity will be regenerated, if
parameters are dirty.
:param zpos:
The metallicity, in units of :math:`\log(Z/Z_\odot)`
:param tpos:
The desired age, in Gyr.
:param peraa: (default: False)
If true, return spectra in units of :math:`L_\odot/A`, otherwise
:math:`L_\odot/\mathrm{Hz}`
:returns spec:
The SSP spectrum, interpolated to zpos and tpos.
:returns mass:
The stellar mass of the SSP at tpos.
:returns lbol:
The bolometric luminosity of the returned SSP.
"""
if self.params.dirtiness == 2:
self._update_params()
NSPEC = driver.get_nspec()
spec, mass, lbol = np.zeros(NSPEC), np.zeros(1), np.zeros(1)
logt_yrs = np.log10(tpos * 1e9)
driver.interp_ssp(zpos, logt_yrs, spec, mass, lbol)
if peraa:
wavegrid = self.wavelengths
factor = 3e18 / wavegrid ** 2
spec *= factor
return spec, mass, lbol
def _all_ssp_spec(self, update=True, peraa=False):
r"""
Return the contents of the ssp_spec_zz array.
:param update: (default: True)
If True, forces an update of the SSPs if the ssp parameters have
changed. Otherwise simply dumps the current contents of the
``ssp_spec_zz`` array.
:param peraa: (default: False)
If true, return spectra in units of :math:`L_\odot/A`, otherwise
:math:`L_\odot/\mathrm{Hz}`
:returns spec:
The spectra of the SSPs, having shape (nspec, ntfull, nz).
:returns mass:
The mass of the SSPs, having shape (ntfull, nz).
:returns lbol:
The bolometric luminosity of the SSPs, having shape (ntfull, nz).
"""
if (self.params.dirtiness == 2) and update:
self._update_params()
NSPEC = driver.get_nspec()
NTFULL = driver.get_ntfull()
NZ = driver.get_nz()
spec = np.zeros([NSPEC, NTFULL, NZ], order="F")
mass = np.zeros([NTFULL, NZ], order="F")
lbol = np.zeros([NTFULL, NZ], order="F")
driver.get_ssp_spec(spec, mass, lbol)
if peraa:
wavegrid = self.wavelengths
factor = 3e18 / wavegrid ** 2
spec *= factor[:, None, None]
return spec, mass, lbol
def _get_stellar_spectrum(
self,
mact,
logt,
lbol,
logg,
phase,
comp,
mdot=0,
weight=1,
zmet=None,
peraa=True,
):
r"""
Get the spectrum of a star with a given set of physical parameters.
This uses the metallicity given by the current value of ``zmet``.
:param mact:
Actual stellar mass (after taking into account mass loss). Used to
calculate surface gravity.
:param logt:
The log of the effective temperature.
:param lbol:
Stellar luminosity, in units of :math:`L_\odot`
:param logg:
Log of the surface gravity g. Note that this variable is actually
ignored, and logg is calculated internally using ``mact``,
``lbol``, and ``logt``.
:param phase:
The evolutionary phase, 0 through 6.
:param comp:
Composition, in terms of C/O ratio. Only used for AGB stars
(phase=5), where the division between carbon and oxyygen rich stars
is :math:`C/O = 1`.
:param mdot:
The log of the mass loss rate.
:param weight:
The IMF weight
:returns outspec:
The spectrum of the star, in :math:`L_\odot/\mathrm{Hz}`
"""
if zmet is not None:
self.params["zmet"] = zmet
if self.params.dirty:
self._update_params()
NSPEC = driver.get_nspec()
outspec = np.zeros(NSPEC)
driver.stellar_spectrum(
mact, logt, lbol, logg, phase, comp, mdot, weight, outspec
)
if peraa:
wavegrid = self.wavelengths
factor = 3e18 / wavegrid ** 2
outspec *= factor
return outspec
def isochrones(self, outfile="pyfsps_tmp"):
r"""
Write the isochrone data (age, mass, weights, phases, magnitudes, etc.)
to a .cmd file, then read it into a huge numpy array. Only parameters
listed in ``StellarPopulation.params.ssp_params`` affect the output of
this method.
:param outfile: (default: 'pyfsps_tmp')
The file root name of the .cmd file, which will be placed in the
$SPS_HOME/OUTPUTS/ directory
:returns dat:
A huge numpy structured array containing information about every
isochrone point for the current metallicity. In general the
columns may be isochrone specific, but for Padova they are
* age: log age, yrs
* log(Z): log metallicity
* mini: initial stellar mass in solar masses
* mact: actual stellar mass (accounting for mass loss)
* logl: log bolometric luminosity, solar units
* logt: log temperature (K)
* logg: log gravity
* phase: (see evtype)
* log(weight): IMF weight corresponding to a total of 1 Msol formed.
* log(mdot): mass loss rate (Msol/yr)
"""
if self.params.dirty:
self._compute_csp()
from . import list_filters
absfile = os.path.join(os.environ["SPS_HOME"], "OUTPUTS", outfile + ".cmd")
driver.write_isoc(outfile)
with open(absfile, "r") as f:
# drop the comment hash and mags field
header = f.readline().split()[1:-1]
header += list_filters()
cmd_data = np.loadtxt(
absfile,
comments="#",
dtype=np.dtype([(n, np.float) for n in header]),
)
return cmd_data
def set_tabular_sfh(self, age, sfr, Z=None):
r"""
Set a tabular SFH for use with the ``sfh=3`` option. See the FSPS
documentation for information about tabular SFHs. This SFH will be
piecewise linearly interpolated.
:param age:
Time since the beginning of the universe in Gyr. Must be
increasing. ndarray of shape (ntab,)
:param sfr:
The SFR at each ``age``, in Msun/yr. Must be an ndarray same
length as ``age``, and contain at least one non-zero value.
:param Z: (optional)
The metallicity at each age, in units of absolute metallicity
(e.g. Z=0.019 for solar with the Padova isochrones and MILES
stellar library).
"""
assert len(age) == len(sfr), "age and sfr have different size."
assert
|
np.all(age[1:] > age[:-1])
|
numpy.all
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 10:38:44 2020
@author: jsalm
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
# from sklearn import svm, datasets
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
from scipy.ndimage import convolve,distance_transform_edt,label, find_objects
from sklearn.metrics import auc
from skimage.feature import hog
from matplotlib.colors import ListedColormap
from matplotlib.patches import Patch, Rectangle
import os
import time
import csv
from ..datmgmt import DataManager
from ..disp import LabelMaker
from ..preproc import Filters
# import xlwings as xw
from IPython import get_ipython
# plt.rcParams['figure.dpi'] = 100
# plt.rcParams['figure.figsize'] = (10,10)
# get_ipython().run_line_magic('matplotlib','qt5')
dirname = os.path.dirname(__file__)
save_bin = os.path.join(dirname,"save-bin")
global data,labels
def generate_train_sert_ID(boolim,image):
if type(boolim[0,0]) != np.bool_:
raise TypeError("args need to be type bool and tuple respectively")
'end if'
count = 0
data = np.zeros((2,boolim.shape[0]*boolim.shape[1]))
point_data = np.zeros((2,boolim.shape[0]*boolim.shape[1]))
#generate list of points
for i,row in enumerate(boolim):
for j,col in enumerate(row):
if col == True:
data[0,count] = image[i,j]
data[1,count] = 1
point_data[0,count] = i
point_data[1,count] = j
count+=1
else:
data[0,count] = image[i,j]
data[1,count] = 0
point_data[0,count] = i
point_data[1,count] = j
count+=1
'end if'
'end for'
'end for'
return data,point_data
'end def'
def generate_test_sert_ID(boolim,image):
if type(boolim[0,0]) != np.bool_:
raise TypeError("args need to be type bool and tuple respectively")
'end if'
count = 0
t_data = np.sum(boolim)
data = np.zeros((2,t_data))
point_data = np.zeros((2,t_data))
for i,row in enumerate(boolim):
for j,col in enumerate(row):
if col == True:
data[0,count] = image[i,j]
data[1,count] = 0
point_data[0,count] = i
point_data[1,count] = j
count+=1
return data,point_data
'end def'
def get_coef(generator):
weights = []
for clf in generator:
weights.append(clf.coef_)
'end for'
return weights
'end def'
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
def gen_point_vector(image):
point_data = np.zeros((image.shape[0]*image.shape[1],2))
count = 0
for i in range(0,image.shape[0]):
for j in range(0,image.shape[1]):
point_data[count,:] = [i,j]
count += 1
'end for'
'end for'
return point_data
'end def'
def img_to_data(image,mask,keep_all = True,*kwargs):
"""
Parameters
----------
image : TYPE
DESCRIPTION.
**params : image data type float32[:,:]
DESCRIPTION.
Returns
------
array of data of shape [image.shape[0]*image.shape[1],number_of_parameters + image_data] represents
all the parameters to be enetered into SVM image analysis
"""
#initialize with original image data
img_d = image.ravel()
img_d = img_d.reshape(img_d.shape[0],1)
con_data = img_d
param_c = 0
for data in kwargs:
new_d = data.ravel()
new_d = new_d.reshape(new_d.shape[0],1)
con_data = np.concatenate((con_data,new_d),axis = 1)
param_c += 1
'end for'
nonzero = np.sum(mask)
mask_r = mask.ravel()
mask_r = mask_r.reshape(mask_r.shape[0],1)
point_data = gen_point_vector(image)
if keep_all:
data = con_data
bool_set = mask_r.astype(int)
else:
masked = np.multiply(con_data,mask_r)
masked_new = np.zeros((nonzero,con_data.shape[1]))
point_new = np.zeros((nonzero,2))
bool_set = np.zeros((nonzero,con_data.shape[1]))
count = 0
for i,x in enumerate(masked):
if x.any() != 0:
masked_new[count,:] = x
bool_set[count,:] = mask_r[i,:]
point_new[count,:] = point_data[i,:]
count += 1
'end if'
'end for'
data = masked_new
bool_set = bool_set.astype(int)
point_data = point_new
return data,bool_set,point_data
'end def'
def data_to_img(mask,predicitons,positions):
newim = np.zeros((mask.shape[0],mask.shape[1]))
count = 0
for i,row in enumerate(mask):
for j,col in enumerate(row):
if col == True:
newim[i,j] = predictions[count]
count += 1
'end if'
'end for'
'end for'
return newim
def get_nonzeros(image,val_vector,mask,tru_type = True):
mask = mask.ravel()
mask = mask.reshape(mask.shape[0],1)
masklen = np.sum(mask.astype(int))
mask_new =
|
np.zeros((masklen,mask.shape[1]))
|
numpy.zeros
|
import numpy as np
from outputs import *
def zero_pad_test(target):
# Test 1
np.random.seed(1)
x = np.random.randn(4, 3, 3, 2)
x_pad = target(x, 3)
print ("x.shape =\n", x.shape)
print ("x_pad.shape =\n", x_pad.shape)
print ("x[1,1] =\n", x[1, 1])
print ("x_pad[1,1] =\n", x_pad[1, 1])
assert type(x_pad) == np.ndarray, "Output must be a np array"
assert x_pad.shape == (4, 9, 9, 2), f"Wrong shape: {x_pad.shape} != (4, 9, 9, 2)"
print(x_pad[0, 0:2,:, 0])
assert np.allclose(x_pad[0, 0:2,:, 0], [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 1e-15), "Rows are not padded with zeros"
assert np.allclose(x_pad[0, :, 7:9, 1].transpose(), [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 1e-15), "Columns are not padded with zeros"
assert np.allclose(x_pad[:, 3:6, 3:6, :], x, 1e-15), "Internal values are different"
# Test 2
np.random.seed(1)
x = np.random.randn(5, 4, 4, 3)
pad = 2
x_pad = target(x, pad)
assert type(x_pad) == np.ndarray, "Output must be a np array"
assert x_pad.shape == (5, 4 + 2 * pad, 4 + 2 * pad, 3), f"Wrong shape: {x_pad.shape} != {(5, 4 + 2 * pad, 4 + 2 * pad, 3)}"
assert np.allclose(x_pad[0, 0:2,:, 0], [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], 1e-15), "Rows are not padded with zeros"
assert np.allclose(x_pad[0, :, 6:8, 1].transpose(), [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], 1e-15), "Columns are not padded with zeros"
assert np.allclose(x_pad[:, 2:6, 2:6, :], x, 1e-15), "Internal values are different"
print("\033[92mAll tests passed!")
def conv_single_step_test(target):
np.random.seed(3)
a_slice_prev = np.random.randn(5, 5, 3)
W = np.random.randn(5, 5, 3)
b = np.random.randn(1, 1, 1)
Z = target(a_slice_prev, W, b)
expected_output = np.float64(-3.5443670581382474)
assert (type(Z) == np.float64 or type(Z) == np.float32), "You must cast the output to float"
assert np.isclose(Z, expected_output), f"Wrong value. Expected: {expected_output} got: {Z}"
print("\033[92mAll tests passed!")
def conv_forward_test(target):
# Test 1
np.random.seed(3)
A_prev = np.random.randn(2, 5, 7, 4)
W = np.random.randn(3, 3, 4, 8)
b = np.random.randn(1, 1, 1, 8)
Z, cache_conv = target(A_prev, W, b, {"pad" : 3, "stride": 1})
Z_shape = Z.shape
assert Z_shape[0] == A_prev.shape[0], f"m is wrong. Current: {Z_shape[0]}. Expected: {A_prev.shape[0]}"
assert Z_shape[1] == 9, f"n_H is wrong. Current: {Z_shape[1]}. Expected: 9"
assert Z_shape[2] == 11, f"n_W is wrong. Current: {Z_shape[2]}. Expected: 11"
assert Z_shape[3] == W.shape[3], f"n_C is wrong. Current: {Z_shape[3]}. Expected: {W.shape[3]}"
# Test 2
Z, cache_conv = target(A_prev, W, b, {"pad" : 0, "stride": 2})
assert(Z.shape == (2, 2, 3, 8)), "Wrong shape. Don't hard code the pad and stride values in the function"
# Test 3
W =
|
np.random.randn(5, 5, 4, 8)
|
numpy.random.randn
|
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""
Markov Chain Montecarlo Simulator of the daily customer flux in a supermarket
""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
# import built-in libraries
import os
import datetime as dt
import time
from colorama import Fore, Style, init
init()
import logging
logging.basicConfig(level=logging.WARNING, format="%(message)s")
# import other libraries
import numpy as np
import pandas as pd
from faker import Faker
from pyfiglet import Figlet
# import scripts
import proba
class Customer:
"""
a single customer that moves through the supermarket
in a MCMC simulation
"""
def __init__(self, name, state="entrance", budget=100):
self.name = name
self.state = state
self.budget = budget
def __repr__(self):
return f"{self.name} is in {self.state}."
def next_state(self):
"""
Propagates the customer to the next state.
Returns nothing.
"""
# WARNING: CHECK THE ORDER OF THE AISLES WHEN THE ACTUAL MATRIX ARRIVES
aisles = ["checkout", "dairy", "drinks", "fruit", "spices"]
if self.state in aisles:
if self.state == "dairy":
initial_state = np.array([0.0, 1.0, 0.0, 0.0, 0.0])
elif self.state == "drinks":
initial_state = np.array([0.0, 0.0, 1.0, 0.0, 0.0])
elif self.state == "fruit":
initial_state = np.array([0.0, 0.0, 0.0, 1.0, 0.0])
elif self.state == "spices":
initial_state =
|
np.array([0.0, 0.0, 0.0, 0.0, 1.0])
|
numpy.array
|
# Created by <NAME>
# Date: 11/09/2020
import pygame as pg
import numpy as np
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
GRAY = (200, 200, 200)
# The goal of this class is to just collect all the screen variables at one place,
# that all other classes can refer to and update.
class Basic:
def __init__(self, frameside=400, frame_grid=(1, 1), title='', id_gui=-1, buttons=()):
pg.init()
screen = pg.display.set_mode((frame_grid[0] * frameside, frame_grid[1] * frameside))
screen.fill(WHITE)
self.n_frames = frame_grid[0] * frame_grid[1]
self.has_canvas = np.zeros(self.n_frames)
self.screen = screen # A shared screen everyone can update
self.frameside = frameside # The length of the side of a square frame
self.frame_grid = frame_grid #
self.canvas_prop = 0.9013 # Canvas side size relative to frame side
self.circ_thick = 5 # How thick line to draw around matrixes
if len(buttons) != 0:
self.buttons = buttons
self.n_buttons = len(self.buttons)
self.w_button = 0.5 * self.frameside
self.h_button = 0.5 * self.frameside * 0.9 / self.n_buttons
self.b_offset = (0.5, 0.25)
self.draw_gui()
pg.display.set_caption(title)
pg.display.update()
def get_displacement(self, frame_id):
i_x = frame_id % self.frame_grid[0]
i_y = frame_id / self.frame_grid[0]
return (i_x * self.frameside, i_y * self.frameside)
def get_canvas_displacement(self, frame_id):
frame_displace = np.array(self.get_displacement(frame_id))
canvas_displace = np.array([(1. - self.canvas_prop) * self.frameside / 2, self.circ_thick])
return np.int32(frame_displace + canvas_displace + 0.5)
def get_x_from_frame(self, frame_pos):
frame_pos = np.array(frame_pos)
canvas_displace = np.array([(1. - self.canvas_prop) * self.frameside / 2, self.circ_thick])
x = 1. * (frame_pos - canvas_displace) / (self.canvas_prop * self.frameside)
return x
def clean_frame(self, frame_id):
# Constants
circ_thick = self.circ_thick
canvas_side = 1. * self.canvas_prop * self.frameside
# Erase old
frame_displace = self.get_displacement(frame_id)
canvas_displace = np.array([(1. - self.canvas_prop) * self.frameside / 2, circ_thick])
# Clear up old
framerect = np.append(frame_displace, np.array([self.frameside, self.frameside]))
pg.draw.rect(self.screen, WHITE, framerect)
# Draw background
circumfence = np.append(frame_displace + canvas_displace - circ_thick, np.array([canvas_side, canvas_side]) + circ_thick * 2)
inside = np.append(frame_displace + canvas_displace, np.array([canvas_side, canvas_side]))
pg.draw.rect(self.screen, BLACK, circumfence)
pg.draw.rect(self.screen, WHITE, inside)
#
# Draw a 2D image - Ad 3D capability later
def draw_matrix(self, matrix, frame_id, v_min=1337, v_max=1337, matrix_text=''):
frame_displace = self.get_displacement(frame_id)
# Clear up old
self.clean_frame(frame_id)
# Draw matrix
i_max = matrix.shape[0]
j_max = matrix.shape[1]
if v_min == 1337:
min_value = matrix.min()
else:
min_value = v_min
if v_max == 1337:
max_value = matrix.max()
else:
max_value = v_max
if min_value == max_value:
max_value += 1
# compute the size of a pixel
circ_thick = self.circ_thick
canvas_side = 1. * self.canvas_prop * self.frameside
pixel_side = 1. * self.canvas_prop * self.frameside / max(i_max, j_max)
canvas_displace = np.array([(1. - self.canvas_prop) * self.frameside / 2, circ_thick])
rectangle = np.array([0, 0, pixel_side, pixel_side])
for i in range(i_max):
for j in range(j_max):
grid_displace = np.array([i * pixel_side, j * pixel_side])
# Sum upp all displacements
displace = frame_displace + canvas_displace + grid_displace
# Change to rectangle format
rec_disp = np.append(displace, np.array([0, 0]))
# The shape to draw: [x_start, y_start, width, height]
rec_xywh = np.int32(rec_disp + rectangle + 0.5)
# Color of shape
rel_value = 1. * (matrix[i, j] - min_value) / (max_value - min_value)
rec_color = np.uint8(rel_value * np.array([255, 255, 255]) + 0.5)
# Draw it!
pg.draw.rect(self.screen, rec_color, rec_xywh)
# Plot titles
text_center = np.array([self.frameside / 2, (2 * self.frameside + canvas_side) / 3])
text_center = np.int32(frame_displace + text_center)
self.display_message(matrix_text, text_center)
# pg.display.update()
# state_coords: array with 2D x-coord of every state-center
# state_color: array with gray scale value of corresponding state
def draw_free_states(self, frame_id, state_coord, state_color, size=4):
new_color = 1. * state_color - np.min(state_color)
max_value = np.max(new_color)
if max_value > 0:
new_color /= max_value
for i in range(len(state_coord)):
color = np.uint8(new_color[i] * np.array(WHITE))
self.draw_x_coord(frame_id, state_coord[i], color, size)
def draw_x_states(self, frame_id, x_now=[], x_goal=[], color_goal=RED):
if len(x_now) > 0:
self.draw_x_coord(frame_id, x_now, BLUE, 8)
if len(x_goal) > 0:
self.draw_x_coord(frame_id, x_goal, color_goal, 4)
pg.display.update()
def draw_x_coord(self, frame_id, x, color, size=4):
frame_displace = self.get_displacement(frame_id)
canvas_displace = np.array([(1. - self.canvas_prop) * self.frameside / 2, self.circ_thick])
pos_now_relative = x * self.frameside * self.canvas_prop
pos_now = frame_displace + canvas_displace + pos_now_relative
pos_now = np.int32(pos_now + 0.5) # round
pg.draw.circle(self.screen, color, pos_now, size)
def draw_gui(self):
n_buttons = self.n_buttons
w_button = self.w_button
h_button = self.h_button
x_c = self.b_offset[0] * self.frameside
y_cs = np.linspace(self.b_offset[1], 1. - self.b_offset[1], n_buttons) * self.frameside
for i in range(n_buttons):
y_c = y_cs[i]
# Plot box
x = x_c - 0.5 * w_button
y = y_c - 0.5 * h_button
pg.draw.rect(self.screen, GRAY, (x, y, w_button, h_button))
# Plot text
text_center = np.int32((x_c, y_c))
text_size = np.uint8(min(0.8 * h_button, 0.1 * self.frameside))
self.display_message(self.buttons[i], text_center, size=text_size)
# Some methods for handling text
def text_objects(self, text, font):
text_surface = font.render(text, True, BLACK)
return text_surface, text_surface.get_rect()
def display_message(self, text, text_center, size=30):
font = pg.font.SysFont(None, size)
text_surface, text_rectangle = self.text_objects(text, font)
text_rectangle.center = text_center
self.screen.blit(text_surface, text_rectangle)
def get_frame_and_pos(self, window_target):
frame_position =
|
np.array(window_target)
|
numpy.array
|
import pandas as pd
import glob, ipaddress
import numpy as np, os
pd.set_option('display.max_colwidth', None)
import argparse
import configparser
from utils.data import read_data
from utils.ip_replace import remap_ip_dataframe
from utils.distribution import distribute_dataframe_np
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from multiprocessing import Process, Manager
# Add functions to compute TPR, TNR etc. and prec./recall
def count_correct(y,y_hat):
count_correct=0.0
tp=0.0
fp=0.0
fn=0.0
total_positives=len(np.where(y!=0)[0])
total_negatives=len(np.where(y==0)[0])
for i in range(len(y)):
if y_hat[i]==y[i]:
count_correct+=1
if y_hat[i]==y[i] and y[i]!=0:
tp+=1
if y_hat[i]!=y[i] and y[i]!=0:
fn+=1
if y_hat[i]!=y[i] and y[i]==0:
fp+=1
recall=tp/(tp+fn)
precision=tp/(tp+fp)
accuracy=count_correct/len(y)
print('True positives: %s' % (tp))
print('False positives: %s' % (fp))
print('Recall: %s' % (recall))
print('Precision: %s' %(precision))
print('Accuracy: %s' % (accuracy))
return accuracy,precision,recall
def classify_get_performance(item, i, X_test, y_test, classifier, return_dict, seed=None):
print('Agent num: %s' % i)
X=item[0]
y=item[1]
if i==0:
print('Size of split:%s' % len(X))
if classifier == 'RF':
clf = RandomForestClassifier(max_depth=3, random_state=seed)
elif classifier == 'SVM':
clf = SGDClassifier(loss='hinge', penalty='l2',max_iter=10000,shuffle=True,random_state=seed)
elif classifier == 'NB':
clf = GaussianNB()
elif classifier == 'DT':
clf = DecisionTreeClassifier(random_state=seed)
clf.fit(X, y)
y_test_predict=clf.predict(X_test)
accuracy,precision,recall=count_correct(y_test,y_test_predict)
return_dict[str(i)]=[accuracy,precision,recall]
return
if __name__ == "__main__":
seed = 777
rng = np.random.default_rng(seed) # can be called without a seed
config = configparser.ConfigParser()
config.read_file(open('configs/default.cfg'))
parser = argparse.ArgumentParser()
# Input args
if 'CIC-IDS-2017' in config['DATA']['input_type'] or 'CIC-IDS-2018' in config['DATA']['input_type']:
parser.add_argument('--day_name',type=str,default='Friday')
parser.add_argument('--attack_types',nargs='+')
# Classification args
parser.add_argument('--classifier', type=str, default='RF')
# Federation args
parser.add_argument('--num_agents', type=int,default=1)
parser.add_argument('--maintain_ratio', dest='maintain_ratio', action='store_true')
parser.add_argument('--seq_select', dest='seq_select', action='store_true')
args = parser.parse_args()
if 'CIC-IDS-2017' in config['DATA']['input_type']:
input_file=config['DATA']['input_dir']+'/'+config['DATA']['input_type']+'/'+args.day_name
for item in args.attack_types:
input_file+='_'+item
input_file+='.csv'
elif 'CIC-IDS-2018' in config['DATA']['input_type']:
input_file=config['DATA']['input_dir_large']+'/'+args.day_name
for item in args.attack_types:
input_file+='_'+item
input_file+='.csv'
# Load data
print('Loading data')
X_train, y_train, X_test, y_test = read_data(config['DATA']['input_type'], input_file, float(config['DATA']['train_test_split']),rng,attack_types=args.attack_types)
if config['REMAP']['remap_mode']!='None':
print("remapping ip addresses")
ips_to_remap_list = ["192.168.1.1", "192.168.1.2"]
# print(df.head(10))
df = remap_ip_dataframe(df, ips_to_remap_list, args)
# print(df.head(10)[["sa", "da", "sa_remapped", "da_remapped"]])
if args.num_agents>1:
df_list=distribute_dataframe_np(X_train, y_train, args.num_agents, args.maintain_ratio, args.seq_select, rng)
else:
df_list=[(X_train,y_train)]
# Run classification
manager = Manager()
return_dict = manager.dict()
print('Classifying data')
processes=[Process(target=classify_get_performance, args=(item, i, X_test, y_test, args.classifier, return_dict, seed)) for i, item in enumerate(df_list)]
for p in processes:
p.start()
for p in processes:
p.join()
test_accs=[]
test_precs=[]
test_recalls=[]
for k,v in return_dict.items():
test_accs.append(v[0])
test_precs.append(v[1])
test_recalls.append(v[2])
# test_accs,test_precs,test_recalls=classify_get_performance(df_list, X_test, y_test,args.classifier,seed)
if 'CIC-IDS-2017' in config['DATA']['input_type'] or 'CIC-IDS-2018' in config['DATA']['input_type']:
output_dir_name=config['DATA']['output_dir']+'/'+config['DATA']['input_type']+'/'+args.day_name
for item in args.attack_types:
output_dir_name+='_'+item
if not os.path.exists(output_dir_name):
os.makedirs(output_dir_name)
out_file_name=args.classifier
if args.maintain_ratio:
out_file_name+='_mr'
if args.seq_select:
out_file_name+='_seq'
out_file_name=output_dir_name+'/'+out_file_name+'.txt'
f = open(out_file_name, mode='a')
if os.path.getsize(out_file_name) == 0:
f.write('Num agents, acc_mean, acc_var, prec_mean, prec_var, recall_mean, recall_var \n')
f.write('{}, {}, {}, {}, {}, {}, {} \n'.format(args.num_agents, np.mean(test_accs), np.sqrt(np.var(test_accs)), np.mean(test_precs), np.sqrt(
|
np.var(test_precs)
|
numpy.var
|
"""Trim audio clips based on a VAD algorithm. One algorithm implemented
is adapted from [1]. The other uses the librosa.effects.trim() method.
[1] <NAME> and <NAME>, "A simple but efficient
real-time Voice Activity Detection algorithm," 2009 17th European Signal
Processing Conference, Glasgow, 2009, pp. 2549-2553.
https://ieeexplore.ieee.org/abstract/document/7077834
"""
from pathlib import Path
import click
import librosa
import numpy as np
import soundfile
from click_option_group import optgroup
from ertk.dataset import get_audio_paths
from ertk.utils import PathlibPath
def filter_silence_speech(
speech: np.ndarray, min_speech: int = 5, min_silence: int = 10
):
silence = True
i = 0
while i < len(speech):
if silence and speech[i]:
j = next(
(j for j in range(i + 1, len(speech)) if not speech[j]), len(speech)
)
if j - i >= min_speech:
silence = False
else:
speech[i:j] = False
elif not silence and not speech[i]:
j = next((j for j in range(i + 1, len(speech)) if speech[j]), len(speech))
if j - i >= min_silence:
silence = True
else:
speech[i:j] = True
else:
j = i + 1
i = j
def mh2009_vad(
path: Path,
energy_thresh: float,
freq_thresh: float,
sf_thresh: float,
window: float,
debug: bool,
):
audio, sr = librosa.load(path, sr=16000)
print(path)
window_samples = int(sr * window)
s = librosa.stft(
audio, n_fft=512, win_length=window_samples, hop_length=window_samples
)
sxx = np.abs(s) ** 2
e = np.log(sxx.mean(0))
min_e = e.min()
freq = librosa.fft_frequencies(sr, 512)[1:]
f = freq[sxx[1:, :].argmax(0)]
min_f = f.min()
sf = librosa.feature.spectral_flatness(audio, S=sxx, power=1)[0]
sf = -10 *
|
np.log10(sf / 10)
|
numpy.log10
|
#!/usr/bin/python3
# put most of our eggs in the gms matching basket:
# https://github.com/JiawangBian/GMS-Feature-Matcher/blob/master/python/gms_matcher.py
import copy
import cv2
import math
from matplotlib import pyplot as plt
import numpy as np
import time
from tqdm import tqdm
import random
import time
from props import getNode
from . import camera
from .find_obj import explore_match
from . import image_list
from .logger import log, qlog
from . import project
from . import smart
from . import transformations
detector_node = getNode('/config/detector', True)
matcher_node = getNode('/config/matcher', True)
detect_scale = 0.40
the_matcher = None
max_distance = None
min_pairs = 25
d2r = math.pi / 180.0
# the flann based matcher uses random starting points so some
# borderline matching results may change from one run to the next.
random.seed(time.time())
# Configure the matching session (setup the values in the property
# tree and call this function before any others. Note, putting the
# parameters in the property tree simplifies the parameter list and
# lets us save a record of these in the config.json file
def configure():
global detect_scale
global the_matcher
global max_distance
global min_pairs
detect_scale = detector_node.getFloat('scale')
detector_str = detector_node.getString('detector')
if detector_str == 'SIFT' or detector_str == 'SURF':
norm = cv2.NORM_L2
max_distance = 270.0
elif detector_str == 'ORB' or detector_str == 'Star':
norm = cv2.NORM_HAMMING
max_distance = 64
else:
log("Detector not specified or not known:", detector_str)
quit()
# work around a feature/bug: flann enums don't exist
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_LSH = 6
if norm == cv2.NORM_L2:
flann_params = {
'algorithm': FLANN_INDEX_KDTREE,
'trees': 5
}
else:
flann_params = {
'algorithm': FLANN_INDEX_LSH,
'table_number': 6, # 12
'key_size': 12, # 20
'multi_probe_level': 1 #2
}
search_params = {
'checks': 100
}
the_matcher = cv2.FlannBasedMatcher(flann_params, search_params)
min_pairs = matcher_node.getFloat('min_pairs')
# Iterate through all the matches for the specified image and
# delete keypoints that don't satisfy the homography (or
# fundamental) relationship. Returns true if match set is clean, false
# if keypoints were removed.
#
# Notice: this tends to eliminate matches that aren't all on the
# same plane, so if the scene has a lot of depth, this could knock
# out a lot of good matches.
def filter_by_transform(K, i1, i2, transform):
clean = True
# tol = float(i1.width) / 200.0 # rejection range in pixels
tol = math.pow(i1.width, 0.25)
if tol < 1.0:
tol = 1.0
# print "tol = %.4f" % tol
matches = i1.match_list[i2.name]
if len(matches) < min_pairs:
i1.match_list[i2.name] = []
return True
p1 = []
p2 = []
for k, pair in enumerate(matches):
use_raw_uv = False
if use_raw_uv:
p1.append( i1.kp_list[pair[0]].pt )
p2.append( i2.kp_list[pair[1]].pt )
else:
# undistorted uv points should be better if the camera
# calibration is known, right?
p1.append( i1.uv_list[pair[0]] )
p2.append( i2.uv_list[pair[1]] )
p1 = np.float32(p1)
p2 = np.float32(p2)
#print "p1 = %s" % str(p1)
#print "p2 = %s" % str(p2)
method = cv2.RANSAC
#method = cv2.LMEDS
if transform == "homography":
M, status = cv2.findHomography(p1, p2, method, tol)
elif transform == "fundamental":
M, status = cv2.findFundamentalMat(p1, p2, method, tol)
elif transform == "essential":
M, status = cv2.findEssentialMat(p1, p2, K, method, threshold=tol)
elif transform == "none":
status = np.ones(len(matches))
else:
# fail
M, status = None, None
log(" %s vs %s: %d / %d inliers/matched" % (i1.name, i2.name, np.sum(status), len(status)))
# remove outliers
for k, flag in enumerate(status):
if not flag:
# print(" deleting: " + str(matches[k]))
clean = False
matches[k] = (-1, -1)
for pair in reversed(matches):
if pair == (-1, -1):
matches.remove(pair)
return clean
# return a count of unique matches
def count_unique(i1, i2, matches_fit):
idx_pairs = []
for m in matches_fit:
idx_pairs.append( [m.queryIdx, m.trainIdx] )
idx_pairs = filter_duplicates(i1, i2, idx_pairs)
return len(idx_pairs)
# Filter duplicate features. SIFT (for example) can detect the same
# feature at different scales/orientations which can lead to duplicate
# match pairs, or possibly one feature in image1 matching two or more
# features in images2. Find and remove these from the set.
def filter_duplicates(i1, i2, idx_pairs):
count = 0
result = []
kp1_dict = {}
kp2_dict = {}
for pair in idx_pairs:
kp1 = i1.kp_list[pair[0]]
kp2 = i2.kp_list[pair[1]]
key1 = "%.2f-%.2f" % (kp1.pt[0], kp1.pt[1])
key2 = "%.2f-%.2f" % (kp2.pt[0], kp2.pt[1])
if key1 in kp1_dict and key2 in kp2_dict:
# print("image1 and image2 key point already used:", key1, key2)
count += 1
elif key1 in kp1_dict:
# print("image1 key point already used:", key1)
count += 1
elif key2 in kp2_dict:
# print( "image2 key point already used:", key2)
count += 1
else:
kp1_dict[key1] = True
kp2_dict[key2] = True
result.append(pair)
if count > 0:
qlog(" removed %d/%d duplicate features" % (count, len(idx_pairs)))
return result
# iterate through idx_pairs1 and mark/remove any pairs that don't
# exist in idx_pairs2. Then recreate idx_pairs2 as the inverse of
# idx_pairs1
def filter_cross_check(idx_pairs1, idx_pairs2):
new1 = []
new2 = []
for k, pair in enumerate(idx_pairs1):
rpair = [pair[1], pair[0]]
for r in idx_pairs2:
#print "%s - %s" % (rpair, r)
if rpair == r:
new1.append( pair )
new2.append( rpair )
break
if len(idx_pairs1) != len(new1) or len(idx_pairs2) != len(new2):
qlog(" cross check: (%d, %d) => (%d, %d)" % (len(idx_pairs1), len(idx_pairs2), len(new1), len(new2)))
return new1, new2
# run the knn matcher for the two sets of keypoints
def raw_matches(i1, i2, k=2):
# sanity check
if i1.des_list is None or i2.des_list is None:
return []
if len(i1.des_list.shape) == 0 or i1.des_list.shape[0] <= 1:
return []
if len(i2.des_list.shape) == 0 or i2.des_list.shape[0] <= 1:
return []
matches = the_matcher.knnMatch(np.array(i1.des_list),
np.array(i2.des_list),
k=k)
qlog(" raw matches:", len(matches))
return matches
def basic_pair_matches(i1, i2):
matches = raw_matches(i1, i2)
match_ratio = matcher_node.getFloat('match_ratio')
sum = 0.0
max_good = 0
sum_good = 0.0
count_good = 0
for m in matches:
sum += m[0].distance
if m[0].distance <= m[1].distance * match_ratio:
sum_good += m[0].distance
count_good += 1
if m[0].distance > max_good:
max_good = m[0].distance
qlog(" avg dist:", sum / len(matches))
if count_good:
qlog(" avg good dist:", sum_good / count_good, "(%d)" % count_good)
qlog(" max good dist:", max_good)
if False:
# filter by absolute distance (for ORB, statistically all real
# matches will have a distance < 64, for SIFT I don't know,
# but I'm guessing anything more than 270.0 is a bad match.
matches_thresh = []
for m in matches:
if m[0].distance < max_distance and m[0].distance <= m[1].distance * match_ratio:
matches_thresh.append(m[0])
qlog(" quality matches:", len(matches_thresh))
if True:
# generate a quality metric for each match, sort and only
# pass along the best 'n' matches that pass the distance
# ratio test. (Testing the idea that 2000 matches aren't
# better than 20 if they are good matches with respect to
# optimizing the fit.)
by_metric = []
for m in matches:
ratio = m[0].distance / m[1].distance # smaller is better
metric = m[0].distance * ratio
by_metric.append( [metric, m[0]] )
by_metric = sorted(by_metric, key=lambda fields: fields[0])
matches_thresh = []
for line in by_metric:
if line[0] < max_distance * match_ratio:
matches_thresh.append(line[1])
qlog(" quality matches:", len(matches_thresh))
# fixme, make this a command line option or parameter?
mymax = 2000
if len(matches_thresh) > mymax:
# clip list to n best rated matches
matches_thresh = matches_thresh[:mymax]
qlog(" clipping to:", mymax)
if len(matches_thresh) < min_pairs:
# just quit now
return []
w, h = camera.get_image_params()
if not w or not h:
log("Zero image sizes will crash matchGMS():", w, h)
log("Recommend removing all meta/*.feat files and")
log("rerun the matching step.")
log("... or do some coding to add this information to the")
log("ImageAnalysis/meta/<image_name>.json files")
quit()
size = (w, h)
matchesGMS = cv2.xfeatures2d.matchGMS(size, size, i1.kp_list, i2.kp_list, matches_thresh, withRotation=True, withScale=False, thresholdFactor=5.0)
#matchesGMS = cv2.xfeatures2d.matchGMS(size, size, i1.uv_list, i2.uv_list, matches_thresh, withRotation=True, withScale=False)
#print('matchesGMS:', matchesGMS)
idx_pairs = []
for i, m in enumerate(matchesGMS):
idx_pairs.append( [m.queryIdx, m.trainIdx] )
# check for duplicate matches (based on different scales or attributes)
idx_pairs = filter_duplicates(i1, i2, idx_pairs)
qlog(" initial matches =", len(idx_pairs))
if len(idx_pairs) < min_pairs:
# so sorry
return []
else:
return idx_pairs
# do initial feature matching (both ways) for the specified image
# pair.
def bidirectional_pair_matches(i1, i2, review=False):
if i1 == i2:
log("We shouldn't see this, but i1 == i2", i1.name, i2.name)
return [], []
# all vs. all match between overlapping i1 keypoints and i2
# keypoints (forward match)
idx_pairs1 = basic_pair_matches(i1, i2)
if len(idx_pairs1) >= min_pairs:
idx_pairs2 = basic_pair_matches(i2, i1)
else:
# save some time
idx_pairs2 = []
idx_pairs1, idx_pairs2 = filter_cross_check(idx_pairs1, idx_pairs2)
if False:
plot_matches(i1, i2, idx_pairs1)
plot_matches(i2, i1, idx_pairs2)
if review:
if len(idx_pairs1):
status, key = self.showMatchOrient(i1, i2, idx_pairs1)
# remove deselected pairs
for k, flag in enumerate(status):
if not flag:
print(" deleting: " + str(idx_pairs1[k]))
idx_pairs1[k] = (-1, -1)
for pair in reversed(idx_pairs1):
if pair == (-1, -1):
idx_pairs1.remove(pair)
if len(idx_pairs2):
status, key = self.showMatchOrient(i2, i1, idx_pairs2)
# remove deselected pairs
for k, flag in enumerate(status):
if not flag:
print(" deleting: " + str(idx_pairs2[k]))
idx_pairs2[k] = (-1, -1)
for pair in reversed(idx_pairs2):
if pair == (-1, -1):
idx_pairs2.remove(pair)
return idx_pairs1, idx_pairs2
def gen_grid(w, h, steps):
grid_list = []
u_list = np.linspace(0, w, steps + 1)
v_list = np.linspace(0, h, steps + 1)
for v in v_list:
for u in u_list:
grid_list.append( [u, v] )
return grid_list
def smart_pair_matches(i1, i2, review=False, est_rotation=False):
# common camera parameters
K = camera.get_K()
IK = np.linalg.inv(K)
dist_coeffs = camera.get_dist_coeffs()
w, h = camera.get_image_params()
diag = int(math.sqrt(h*h + w*w))
print("h:", h, "w:", w, "diag:", diag)
grid_steps = 8
grid_list = gen_grid(w, h, grid_steps)
# consider estimated yaw error and estimated surface elevation
# from previous successful matches.
if matcher_node.hasChild("ground_m"):
ground_m = matcher_node.getFloat("ground_m")
log("Forced ground:", ground_m)
else:
ground_m = smart.get_surface_estimate(i1, i2)
# if ground_m is None:
# g1 = i1.node.getFloat("srtm_surface_m")
# g2 = i2.node.getFloat("srtm_surface_m")
# ground_m = (g1 + g2) * 0.5
# qlog(" SRTM ground (no triangulation yet): %.1f" % ground_m)
# else:
log(" Ground estimate: %.1f" % ground_m)
i1_yaw_error = smart.get_yaw_error_estimate(i1)
i2_yaw_error = smart.get_yaw_error_estimate(i2)
# inherit partner yaw error if none computed yet.
if abs(i1_yaw_error) < 0.0001 and abs(i2_yaw_error) > 0.0001:
i1_yaw_error = i2_yaw_error
if abs(i1_yaw_error) > 0.0001 and abs(i2_yaw_error) < 0.0001:
i2_yaw_error = i1_yaw_error
print("smart yaw errors:", i1_yaw_error, i2_yaw_error)
R2 = transformations.rotation_matrix(i2_yaw_error*d2r, [1, 0, 0])[:3,:3]
print("R2:\n", R2)
match_ratio = matcher_node.getFloat("match_ratio")
if review:
rgb1 = i1.load_rgb()
rgb2 = i2.load_rgb()
# project a grid of uv coordinates from image 2 out onto the
# supposed ground plane. Then back project these 3d world points
# into image 1 uv coordinates. Compute an estimated 'ideal'
# homography relationship between the two images as a starting
# search point for feature matches.
if est_rotation:
print("body2ned:\n", i2.get_body2ned())
smart_body2ned = np.dot(i2.get_body2ned(), R2)
print("smart body2ned:\n", smart_body2ned)
else:
smart_body2ned = i2.get_body2ned()
proj_list = project.projectVectors( IK, smart_body2ned,
i2.get_cam2body(),
grid_list )
ned2, ypr2, quat2 = i2.get_camera_pose()
if -ned2[2] < ground_m:
ground_m = -ned2[2] - 2
pts_ned = project.intersectVectorsWithGroundPlane(ned2, ground_m,
proj_list)
if False and review:
plot_list = []
for p in pts_ned:
plot_list.append( [p[1], p[0]] )
plot_list = np.array(plot_list)
plt.figure()
plt.plot(plot_list[:,0], plot_list[:,1], 'ro')
plt.show()
if est_rotation:
rvec1, tvec1 = i1.get_proj(opt=False, yaw_error_est=i1_yaw_error)
else:
rvec1, tvec1 = i1.get_proj(opt=False)
reproj_points, jac = cv2.projectPoints(np.array(pts_ned), rvec1, tvec1,
K, dist_coeffs)
reproj_list = reproj_points.reshape(-1,2).tolist()
# print("reprojected points:", reproj_list)
# print("Should filter points outside of 2nd image space here and now!")
# affine, status = \
# cv2.estimateAffinePartial2D(np.array([reproj_list]).astype(np.float32),
# np.array([grid_list]).astype(np.float32))
# (rot, tx, ty, sx, sy) = decomposeAffine(affine)
# print("Affine:")
# print("Rotation (deg):", rot)
# print("Translation (pixels):", tx, ty)
# print("Skew:", sx, sy)
H, status = cv2.findHomography(np.array([reproj_list]).astype(np.float32),
np.array([grid_list]).astype(np.float32),
0)
if review:
# draw what we estimated
print("Preliminary H:", H)
i1_new = cv2.warpPerspective(rgb1, H, (rgb1.shape[1], rgb1.shape[0]))
blend = cv2.addWeighted(i1_new, 0.5, rgb2, 0.5, 0)
blend = cv2.resize(blend, (int(w*detect_scale), int(h*detect_scale)))
cv2.imshow('blend', blend)
print("Press a key:")
cv2.waitKey()
matches = raw_matches(i1, i2, k=3)
print("Raw matches:", len(matches))
best_fitted_matches = 20 # don't proceed if we can't beat this value
matches_best = []
src_pts = np.float32([i1.kp_list[i].pt for i in range(len(i1.kp_list))]).reshape(-1, 1, 2)
dst_pts = np.float32([i2.kp_list[i].pt for i in range(len(i2.kp_list))]).reshape(-1, 1, 2)
while True:
# print('H:', H)
trans_pts = cv2.perspectiveTransform(src_pts, H)
print("collect stats...")
match_stats = []
for i, m in enumerate(matches):
best_index = -1
best_metric = 9
best_angle = 0
best_size = 0
best_dist = 0
for j in range(len(m)):
if m[j].distance >= 300:
break
ratio = m[0].distance / m[j].distance
if ratio < match_ratio:
break
p1 = trans_pts[m[j].queryIdx]
p2 = dst_pts[m[j].trainIdx]
#print(p1, p2)
raw_dist = np.linalg.norm(p2 - p1)
s1 = np.array(i1.kp_list[m[j].queryIdx].size)
s2 = np.array(i2.kp_list[m[j].trainIdx].size)
if s1 > s2:
size_diff = s1 / s2
else:
size_diff = s2 / s1
if size_diff > 1.25:
continue
metric = raw_dist * size_diff / ratio
#print(" ", j, m[j].distance, size_diff, metric)
if best_index < 0 or metric < best_metric:
best_metric = metric
best_index = j
best_dist = raw_dist
if best_index >= 0:
match_stats.append( [ m[best_index], best_dist ] )
tol = int(diag*0.005)
if tol < 5: tol = 5
cutoffs = [ 32, 64, 128, 256, 512, 1024, 2048 ]
dist_bins = [[] for i in range(len(cutoffs))]
print("bins:", len(dist_bins))
for line in match_stats:
m = line[0]
best_dist = line[1]
for i, d in enumerate(cutoffs):
if best_dist < cutoffs[i]:
dist_bins[i].append(m)
done = True
for i, dist_matches in enumerate(dist_bins):
print("bin:", i, "cutoff:", cutoffs[i], "len:", len(dist_matches))
if len(dist_matches) >= min_pairs:
src = np.float32([src_pts[m.queryIdx] for m in dist_matches]).reshape(1, -1, 2)
dst = np.float32([dst_pts[m.trainIdx] for m in dist_matches]).reshape(1, -1, 2)
H_test, status = cv2.findHomography(src, dst, cv2.RANSAC, tol)
num_fit = np.count_nonzero(status)
matches_fit = []
matches_dist = []
for i, m in enumerate(dist_matches):
if status[i]:
matches_fit.append(m)
matches_dist.append(m.distance)
num_unique = count_unique(i1, i2, matches_fit)
print(" fit:", num_fit, "unique:", num_unique)
if num_unique > best_fitted_matches:
done = False
# affine, astatus = \
# cv2.estimateAffinePartial2D(np.array([src]).astype(np.float32),
# np.array([dst]).astype(np.float32))
# (rot, tx, ty, sx, sy) = decomposeAffine(affine)
# print("Affine:")
# print("Rotation (deg):", rot)
# print("Translation (pixels):", tx, ty)
# print("Skew:", sx, sy)
H = np.copy(H_test)
matches_best = list(matches_fit) # copy
# print("H:", H)
best_fitted_matches = num_unique
print("Filtered matches:", len(dist_matches),
"Fitted matches:", len(matches_fit),
"Unique matches:", num_unique)
#print("metric cutoff:", best_metric)
matches_dist = np.array(matches_dist)
print("avg match quality:", np.average(matches_dist))
print("max match quality:", np.max(matches_dist))
if review:
i1_new = cv2.warpPerspective(rgb1, H, (rgb1.shape[1], rgb1.shape[0]))
blend = cv2.addWeighted(i1_new, 0.5, rgb2, 0.5, 0)
blend = cv2.resize(blend, (int(w*detect_scale), int(h*detect_scale)))
#draw_inlier(rgb1, rgb2, i1.kp_list, i2.kp_list, matches_fit, 'ONLY_LINES', args.scale)
cv2.imshow('blend', blend)
# check for diminishing returns and bail early
#print(best_fitted_matches)
#if best_fitted_matches > 50:
# break
if review:
print("Press a key:")
cv2.waitKey()
if done:
break
if len(matches_best) >= min_pairs:
idx_pairs = []
for m in matches_best:
idx_pairs.append( [m.queryIdx, m.trainIdx] )
idx_pairs = filter_duplicates(i1, i2, idx_pairs)
if len(idx_pairs) >= min_pairs:
rev_pairs = []
for p in idx_pairs:
rev_pairs.append( [p[1], p[0]] )
qlog(" found matches =", len(idx_pairs))
return idx_pairs, rev_pairs
return [], []
def ratio_pair_matches(i1, i2, review=False, est_rotation=False):
if review:
rgb1 = i1.load_rgb()
rgb2 = i2.load_rgb()
matches = raw_matches(i1, i2, k=2)
print("Raw matches:", len(matches))
best_fitted_matches = 20 # don't proceed if we can't beat this value
matches_best = []
w, h = camera.get_image_params()
diag = int(math.sqrt(h*h + w*w))
print("h:", h, "w:", w, "diag:", diag)
tol = int(round(diag*0.005))
if tol < 5: tol = 5
src_pts = np.float32([i1.kp_list[i].pt for i in range(len(i1.kp_list))]).reshape(-1, 1, 2)
dst_pts = np.float32([i2.kp_list[i].pt for i in range(len(i2.kp_list))]).reshape(-1, 1, 2)
print("collect stats...")
match_stats = []
for i, m in enumerate(matches):
ratio = m[0].distance / m[1].distance
match_stats.append( [ m[0], ratio ] )
cutoffs = [ 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85 ]
dist_bins = [[] for i in range(len(cutoffs))]
print("bins:", len(dist_bins))
for line in match_stats:
m = line[0]
ratio = line[1]
for i, d in enumerate(cutoffs):
if ratio <= cutoffs[i]:
dist_bins[i].append(m)
for i, dist_matches in enumerate(dist_bins):
print("bin:", i, "cutoff:", cutoffs[i], "len:", len(dist_matches))
if len(dist_matches) >= min_pairs:
src = np.float32([src_pts[m.queryIdx] for m in dist_matches]).reshape(1, -1, 2)
dst = np.float32([dst_pts[m.trainIdx] for m in dist_matches]).reshape(1, -1, 2)
H_test, status = cv2.findHomography(src, dst, cv2.RANSAC, tol)
num_fit = np.count_nonzero(status)
matches_fit = []
matches_dist = []
for i, m in enumerate(dist_matches):
if status[i]:
matches_fit.append(m)
matches_dist.append(m.distance)
num_unique = count_unique(i1, i2, matches_fit)
print(" fit:", num_fit, "unique:", num_unique)
if num_unique > best_fitted_matches:
# affine, astatus = \
# cv2.estimateAffinePartial2D(np.array([src]).astype(np.float32),
# np.array([dst]).astype(np.float32))
# (rot, tx, ty, sx, sy) = decomposeAffine(affine)
# print("Affine:")
# print("Rotation (deg):", rot)
# print("Translation (pixels):", tx, ty)
# print("Skew:", sx, sy)
H = np.copy(H_test)
matches_best = list(matches_fit) # copy
# print("H:", H)
best_fitted_matches = num_unique
print("Filtered matches:", len(dist_matches),
"Fitted matches:", len(matches_fit),
"Unique matches:", num_unique)
#print("metric cutoff:", best_metric)
matches_dist = np.array(matches_dist)
print("avg match quality:", np.average(matches_dist))
print("max match quality:", np.max(matches_dist))
if review:
i1_new = cv2.warpPerspective(rgb1, H, (rgb1.shape[1], rgb1.shape[0]))
blend = cv2.addWeighted(i1_new, 0.5, rgb2, 0.5, 0)
blend = cv2.resize(blend, (int(w*detect_scale), int(h*detect_scale)))
#draw_inlier(rgb1, rgb2, i1.kp_list, i2.kp_list, matches_fit, 'ONLY_LINES', args.scale)
cv2.imshow('blend', blend)
# check for diminishing returns and bail early
#print(best_fitted_matches)
#if best_fitted_matches > 50:
# break
if review:
print("Press a key:")
cv2.waitKey()
if len(matches_best) >= min_pairs:
idx_pairs = []
for m in matches_best:
idx_pairs.append( [m.queryIdx, m.trainIdx] )
idx_pairs = filter_duplicates(i1, i2, idx_pairs)
if len(idx_pairs) >= min_pairs:
rev_pairs = []
for p in idx_pairs:
rev_pairs.append( [p[1], p[0]] )
qlog(" found matches =", len(idx_pairs))
return idx_pairs, rev_pairs
return [], []
def bruteforce_pair_matches(i1, i2, review=False):
match_ratio = matcher_node.getFloat('match_ratio')
w, h = camera.get_image_params()
diag = int(math.sqrt(h*h + w*w))
print("h:", h, "w:", w)
print("scaled diag:", diag)
if review:
rgb1 = i1.load_rgb()
rgb2 = i2.load_rgb()
matches = raw_matches(i1, i2, k=3)
qlog(" collect stats...")
match_stats = []
for i, m in enumerate(matches):
best_index = -1
best_metric = 9
best_angle = 0
best_size = 0
best_dist = 0
best_vangle = 0
for j in range(len(m)):
if m[j].distance >= 290:
break
ratio = m[0].distance / m[j].distance
if ratio < match_ratio:
break
p1 = np.float32(i1.kp_list[m[j].queryIdx].pt)
p2 = np.float32(i2.kp_list[m[j].trainIdx].pt)
v = p2 - p1
raw_dist = np.linalg.norm(v)
vangle = math.atan2(v[1], v[0])
if vangle < 0: vangle += 2*math.pi
# angle difference mapped to +/- 90
a1 = np.array(i1.kp_list[m[j].queryIdx].angle)
a2 = np.array(i2.kp_list[m[j].trainIdx].angle)
angle_diff = abs((a1-a2+90) % 180 - 90)
s1 = np.array(i1.kp_list[m[j].queryIdx].size)
s2 = np.array(i2.kp_list[m[j].trainIdx].size)
if s1 > s2:
size_diff = s1 / s2
else:
size_diff = s2 / s1
if size_diff > 1.25:
continue
metric = size_diff / ratio
#print(" ", j, m[j].distance, size_diff, metric)
if best_index < 0 or metric < best_metric:
best_metric = metric
best_index = j
best_angle = angle_diff
best_size = size_diff
best_dist = raw_dist
best_vangle = vangle
if best_index >= 0:
#print(i, best_index, m[best_index].distance, best_size, best_metric)
match_stats.append( [ m[best_index], best_index, ratio, best_metric,
best_angle, best_size, best_dist, best_vangle ] )
maxdist = int(diag*0.55)
maxrange = int(diag*0.02)
divs = 40
step = maxdist / divs # 0.1
tol = int(diag*0.005)
if tol < 5: tol = 5
best_fitted_matches = 0
dist_bins = [[] for i in range(divs + 1)]
print("bins:", len(dist_bins))
for line in match_stats:
best_dist = line[6]
bin = int(round(best_dist / step))
if bin < len(dist_bins):
dist_bins[bin].append(line)
if bin > 0:
dist_bins[bin-1].append(line)
if bin < len(dist_bins) - 1:
dist_bins[bin+1].append(line)
matches_fit = []
for i, dist_matches in enumerate(dist_bins):
print("bin:", i, "len:", len(dist_matches))
best_of_bin = 0
divs = 20
step = 2*math.pi / divs
angle_bins = [[] for i in range(divs + 1)]
for line in dist_matches:
match = line[0]
vangle = line[7]
bin = int(round(vangle / step))
angle_bins[bin].append(match)
if bin == 0:
angle_bins[-1].append(match)
angle_bins[bin+1].append(match)
elif bin == divs:
angle_bins[bin-1].append(match)
angle_bins[0].append(match)
else:
angle_bins[bin-1].append(match)
angle_bins[bin+1].append(match)
for angle_matches in angle_bins:
if len(angle_matches) >= min_pairs:
src = []
dst = []
for m in angle_matches:
src.append( i1.kp_list[m.queryIdx].pt )
dst.append( i2.kp_list[m.trainIdx].pt )
H, status = cv2.findHomography(np.array([src]).astype(np.float32),
np.array([dst]).astype(np.float32),
cv2.RANSAC,
tol)
num_fit = np.count_nonzero(status)
if num_fit > best_of_bin:
best_of_bin = num_fit
if num_fit > best_fitted_matches:
matches_fit = []
matches_dist = []
for i, m in enumerate(angle_matches):
if status[i]:
matches_fit.append(m)
matches_dist.append(m.distance)
best_fitted_matches = num_fit
print("Filtered matches:", len(angle_matches),
"Fitted matches:", num_fit)
matches_dist = np.array(matches_dist)
print("avg match quality:", np.average(matches_dist))
print("max match quality:", np.max(matches_dist))
if review:
i1_new = cv2.warpPerspective(rgb1, H, (rgb1.shape[1], rgb1.shape[0]))
blend = cv2.addWeighted(i1_new, 0.5, rgb2, 0.5, 0)
blend = cv2.resize(blend, (int(w*detect_scale), int(h*detect_scale)))
cv2.imshow('blend', blend)
#draw_inlier(rgb1, rgb2, i1.kp_list, i2.kp_list, matches_fit, 'ONLY_LINES', detect_scale)
# check for diminishing returns and bail early
print("bin:", i, "len:", len(dist_matches),
best_fitted_matches, best_of_bin)
if best_fitted_matches > 50 and best_of_bin < 10:
break
if review:
cv2.waitKey()
if len(matches_fit) >= min_pairs:
idx_pairs = []
for m in matches_fit:
idx_pairs.append( [m.queryIdx, m.trainIdx] )
idx_pairs = filter_duplicates(i1, i2, idx_pairs)
if len(idx_pairs) >= min_pairs:
rev_pairs = []
for p in idx_pairs:
rev_pairs.append( [p[1], p[0]] )
qlog(" initial matches =", len(idx_pairs))
return idx_pairs, rev_pairs
return [], []
def find_matches(proj, K, strategy="smart", transform="homography",
sort=False, review=False):
n = len(proj.image_list) - 1
n_work = float(n*(n+1)/2)
t_start = time.time()
intervals = []
for i in range(len(proj.image_list)-1):
ned1, ypr1, q1 = proj.image_list[i].get_camera_pose()
ned2, ypr2, q2 = proj.image_list[i+1].get_camera_pose()
dist = np.linalg.norm(np.array(ned2) - np.array(ned1))
intervals.append(dist)
print(i, dist)
median = np.median(intervals)
log("Median pair interval: %.1f m" % median)
median_int = int(round(median))
if median_int == 0:
median_int = 1
if matcher_node.hasChild("min_dist"):
min_dist = matcher_node.getFloat("min_dist")
else:
min_dist = 0
if matcher_node.hasChild("max_dist"):
max_dist = matcher_node.getFloat("max_dist")
else:
max_dist = median_int * 4
log('Generating work list for range:', min_dist, '-', max_dist)
work_list = []
for i, i1 in enumerate(tqdm(proj.image_list, smoothing=0.05)):
ned1, ypr1, q1 = i1.get_camera_pose()
for j, i2 in enumerate(proj.image_list):
if j <= i:
continue
# camera pose distance check
ned2, ypr2, q2 = i2.get_camera_pose()
dist = np.linalg.norm(
|
np.array(ned2)
|
numpy.array
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 22:53:43 2018
@author: weixijia
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas
import math
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, concatenate, LSTM, TimeDistributed
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA,TruncatedSVD
from sklearn.metrics import mean_squared_error
from keras.optimizers import Adam, RMSprop
from keras.utils import plot_model
from keras.callbacks import EarlyStopping, Callback, TensorBoard
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
class BatchTensorBoard(TensorBoard):
def __init__(self,log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=False):
super(BatchTensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.merged = None
self.write_graph = write_graph
self.write_images = write_images
self.batch = 0
self.batch_queue = set()
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_end(self,batch,logs=None):
logs = logs or {}
self.batch = self.batch + 1
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = float(value)
summary_value.tag = "batch_" + name
if (name,self.batch) in self.batch_queue:
continue
self.writer.add_summary(summary, self.batch)
self.batch_queue.add((name,self.batch))
self.writer.flush()
def moving_average(x, n, type='simple'):
x = np.asarray(x)
if type == 'simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
def load_file(filepath, time_step):
dataframe = pandas.read_csv(filepath, usecols=[2,3,4,5,6,7,8,9,10,11,12,13,14,15], engine='python',skipfooter=0)
skipfooter = len(dataframe)-((len(dataframe)//time_step)*time_step)
dataframe = pandas.read_csv(filepath, usecols=[2,3,4,5,6,7,8,9,10,11,12,13,14,15], engine='python',skipfooter=skipfooter)
dataset = dataframe.values
dataset = dataset.astype('float64')
sensordata = dataset[:,0:(dataset.shape[1]-2)]
#sample_num=dataframe.shape[0]//time_step
if time_step==1:
lat=np.array(dataframe['lat']).reshape(-1, 1)
lng=np.array(dataframe['lng']).reshape(-1, 1)
else:
lat=(dataframe.lat.unique())[:(dataframe.shape[0]//time_step)].reshape(-1, 1)
lng=(dataframe.lng.unique())[:(dataframe.shape[0]//time_step)].reshape(-1, 1)
location=np.column_stack((lat,lng))
return sensordata, location
def normolization(sensordata, location, time_step):
scaler = MinMaxScaler(feature_range=(0, 1))
sensordata = scaler.fit_transform(sensordata)
#lat = scaler.fit_transform(location[:,0].reshape(-1,1)) #lat=location[:,0].reshape(-1,1)
#lng = scaler.fit_transform(location[:,1].reshape(-1,1)) #lng=location[:,1].reshape(-1,1)
location=scaler.fit_transform(location)
#sensordata = dataset[:,0:(dataset.shape[1]-2)]#get acc,gyr,mag
SensorTrain=np.reshape(sensordata, ((sensordata.shape[0]//time_step),time_step,sensordata.shape[1]))
return SensorTrain, location
def dataprocessing(filepath, feature_num, time_step):#integrate load_file and normolization functions together, just for convenience
dataframe = pandas.read_csv(filepath, engine='python')
skipfooter = len(dataframe)-((len(dataframe)//time_step)*time_step)
df_length = dataframe.shape[1]
if feature_num==0:
usecols = [i for i in range(2,df_length)]
dataframe = pandas.read_csv(filepath, usecols=usecols, engine='python', skipfooter=skipfooter)
dataset = dataframe.values
dataset = dataset.astype('float64')
sensordata = dataset[:,0:(dataset.shape[1]-2)]
lat=np.array(dataframe['lat']).reshape(-1, 1)
lng=np.array(dataframe['lng']).reshape(-1, 1)
location=np.column_stack((lat,lng))
scaler = MinMaxScaler(feature_range=(0, 1))
sensordata = scaler.fit_transform(sensordata)
location=scaler.fit_transform(location)
SensorTrain=np.reshape(sensordata, ((sensordata.shape[0]//time_step),time_step,sensordata.shape[1]))
elif feature_num==1:
dataframe = pandas.read_csv(filepath, engine='python',skipfooter=0)
skipfooter = len(dataframe)-((len(dataframe)//time_step)*time_step)
df_length = dataframe.shape[1]
usecols = [i for i in range(2,df_length)]
dataframe = pandas.read_csv(filepath, usecols=usecols, engine='python',skipfooter=skipfooter)
dataset = dataframe.values
dataset = dataset.astype('float64')
sensordata = dataset[:,0:(dataset.shape[1]-2)]
lat=np.array(dataframe['lat']).reshape(-1, 1)
lng=np.array(dataframe['lng']).reshape(-1, 1)
location=np.column_stack((lat,lng))
scaler = MinMaxScaler(feature_range=(0, 1))
sensordata = scaler.fit_transform(sensordata)
location=scaler.fit_transform(location)
SensorTrain=np.reshape(sensordata, ((sensordata.shape[0]//time_step), time_step,sensordata.shape[1]))
location=np.reshape(location, ((location.shape[0]//time_step), time_step, location.shape[1]))
else:
if feature_num==3:
usecols=[13,14,15]
elif feature_num==9:
usecols=[2,3,4,5,6,7,8,9,10,11,12]
elif feature_num==12:
usecols=[2,3,4,5,6,7,8,9,10,11,12,13,14,15]
dataframe = pandas.read_csv(filepath, usecols=usecols, engine='python', skipfooter=skipfooter)
dataset = dataframe.values
dataset = dataset.astype('float64')
sensordata = dataset[:,0:(dataset.shape[1]-2)]
if time_step==1:
lat=np.array(dataframe['lat']).reshape(-1, 1)
lng=np.array(dataframe['lng']).reshape(-1, 1)
else:
lat=(dataframe.lat.unique())[:(dataframe.shape[0]//time_step)].reshape(-1, 1)
lng=(dataframe.lng.unique())[:(dataframe.shape[0]//time_step)].reshape(-1, 1)
location=np.column_stack((lat,lng))
scaler = MinMaxScaler(feature_range=(0, 1))
sensordata = scaler.fit_transform(sensordata)
location=scaler.fit_transform(location)
SensorTrain=np.reshape(sensordata, ((sensordata.shape[0]//time_step),time_step,sensordata.shape[1]))
return SensorTrain, location
#include return sequence=True which reshape the label to 3d
def dataprocessing_overlap(filepath, time_step):#integrate load_file and normolization functions together, just for convenience
dataframe = pandas.read_csv(filepath, engine='python',skipfooter=0)
skipfooter = len(dataframe)-((len(dataframe)//time_step)*time_step)
df_length = dataframe.shape[1]
usecols = [i for i in range(2,df_length)]
dataframe = pandas.read_csv(filepath, usecols=usecols, engine='python',skipfooter=skipfooter)
dataset = dataframe.values
dataset = dataset.astype('float64')
sensordata = dataset[:,0:(dataset.shape[1]-2)]
lat=np.array(dataframe['lat']).reshape(-1, 1)
lng=np.array(dataframe['lng']).reshape(-1, 1)
location=np.column_stack((lat,lng))
scaler = MinMaxScaler(feature_range=(0, 1))
sensordata = scaler.fit_transform(sensordata)
location=scaler.fit_transform(location)
lat=scaler.fit_transform(lat)
lng=scaler.fit_transform(lng)
SensorTrain=np.reshape(sensordata, ((sensordata.shape[0]//time_step), time_step,sensordata.shape[1]))
location=np.reshape(location, ((location.shape[0]//time_step), time_step, location.shape[1]))
lat=np.reshape(lat, ((lat.shape[0]//time_step), time_step, lat.shape[1]))
lng=np.reshape(lng, ((lng.shape[0]//time_step), time_step, lng.shape[1]))
return SensorTrain, location, lat, lng
def overlapping(filepath,feature_num, time_step):
dataframe = pandas.read_csv(filepath, engine='python',skipfooter=0)
df_length = dataframe.shape[1]
usecols = [i for i in range(2,df_length)]
dataframe = pandas.read_csv(filepath, usecols=usecols, engine='python',skipfooter=0)
dataset = dataframe.values
dataset = dataset.astype('float64')
sensordata = dataset[:,0:(dataset.shape[1]-2)]
lat=np.array(dataframe['lat']).reshape(-1, 1)
lng=np.array(dataframe['lng']).reshape(-1, 1)
location=np.column_stack((lat,lng))
ttt=np.zeros(feature_num)
if feature_num==3:
for i in range (len(sensordata)):
k=sensordata[i,time_step*9:time_step*10];l=sensordata[i,time_step*10:time_step*11];m=sensordata[i,time_step*11:time_step*12]
k=k.reshape(-1,1);l=l.reshape(-1,1);m=m.reshape(-1,1);
abc=np.column_stack((k,l,m))
ttt=np.vstack((ttt,abc))
ttt=ttt[1:,:]
elif feature_num==12:
for i in range (len(sensordata)):
a=sensordata[i,0:time_step];b=sensordata[i,time_step:time_step*2];c=sensordata[i,time_step*2:time_step*3]
d=sensordata[i,time_step*3:time_step*4];e=sensordata[i,time_step*4:time_step*5];f=sensordata[i,time_step*5:time_step*6]
g=sensordata[i,time_step*6:time_step*7];h=sensordata[i,time_step*7:time_step*8];j=sensordata[i,time_step*8:time_step*9]
k=sensordata[i,time_step*9:time_step*10];l=sensordata[i,time_step*10:time_step*11];m=sensordata[i,time_step*11:time_step*12]
a=a.reshape(-1,1);b=b.reshape(-1,1);c=c.reshape(-1,1);
d=d.reshape(-1,1);e=e.reshape(-1,1);f=f.reshape(-1,1);
g=g.reshape(-1,1);h=h.reshape(-1,1);j=j.reshape(-1,1);
k=k.reshape(-1,1);l=l.reshape(-1,1);m=m.reshape(-1,1);
abc=np.column_stack((a,b,c,d,e,f,g,h,j,k,l,m))
ttt=np.vstack((ttt,abc))
ttt=ttt[1:,:]
scaler = MinMaxScaler(feature_range=(0, 1))
ttt = scaler.fit_transform(ttt)
location=scaler.fit_transform(location)
lat=scaler.fit_transform(lat)
lng=scaler.fit_transform(lng)
SensorTrain=np.reshape(ttt, ((ttt.shape[0]//time_step), time_step, ttt.shape[1]))
return SensorTrain, location
def PCA_compress(SensorTrain,n_components):
pca = PCA(n_components=n_components)
sensor1=SensorTrain[:,:,0]
sensor2=SensorTrain[:,:,1]
sensor3=SensorTrain[:,:,2]
newData1=pca.fit_transform(sensor1)
newData2=pca.fit_transform(sensor2)
newData3=pca.fit_transform(sensor3)
SensorTrain=
|
np.dstack((newData1,newData2,newData3))
|
numpy.dstack
|
# ------------------------------- Information ------------------------------- #
# Author: <NAME> <<EMAIL>> #
# Created: Nov. 15th, 2016 #
# Description: We analyze the output of the WavMixer. #
# We compute the number of photons generated in different #
# experimental configurations: #
# - On-axis parabolic mirrors (HNA) #
# - On-axis parabolic mirrors with hole (HNA-h) #
# - Off-axis parabolic mirrors (OFF) #
# - Transmission parabolic mirrors (TRA) #
# - Transmission parabolic mirrors with hole (TRA-h) #
# Dependencies: - NumPy #
# - SciPy #
# - H5Py #
# - matplotlib #
# --------------------------------------------------------------------------- #
# --------------------------- Modules Importation --------------------------- #
import numpy as np
import matplotlib
matplotlib.use('pgf')
import matplotlib.pyplot as plt
from matplotlib import ticker
import scipy.signal as signal
import scipy.integrate as integration
import scipy.interpolate as interp
import argparse
import h5py
import time
import math
import configparser
from mpl_toolkits.axes_grid1 import make_axes_locatable
import vphys
# ------------------------------ Configuration ------------------------------ #
pgf_with_pdflatex = {
"font.family": "serif", # use serif/main font for text elements
"text.usetex": True, # use inline math for ticks
"pgf.rcfonts": False, # don't setup fonts from rc parameters
"pgf.preamble": [
r"\usepackage{amsmath}",
r"\usepackage{siunitx}",
#r"\usepackage{mathspec}",
r"\usepackage[charter]{mathdesign}",
r"\usepackage{fontspec}",
#r"\setmathfont{Fira Sans}",
r"\setmainfont{Oswald}",
]
}
matplotlib.rcParams.update(pgf_with_pdflatex)
# -- Fonts
matplotlib.rcParams['font.size'] = 8
matplotlib.rcParams['font.family'] = 'serif'
# -- Plots
#matplotlib.rcParams['axes.labelsize'] = 'large'
#matplotlib.rcParams['xtick.labelsize'] = 'large'
#matplotlib.rcParams['ytick.labelsize'] = 'large'
#matplotlib.rcParams['legend.numpoints'] = 5
#matplotlib.rcParams['figure.figsize'] = '4,2'
matplotlib.rcParams['axes.grid'] = True
# -------------------------------- Functions ------------------------------- #
def _infunc(x,func,gfun,hfun,more_args):
a = gfun(x)
b = hfun(x)
myargs = (x,) + more_args
return integration.quad(func,a,b,args=myargs)[0]
def custom_dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8, maxp1=100, limit=100):
return integration.quad(_infunc, a, b, (func, gfun, hfun, args),epsabs=epsabs, epsrel=epsrel, maxp1=maxp1, limit=limit)
def fmt(x, pos):
a, b = '{:1.0e}'.format(x).split('e')
b = int(b)
return r'${} \times\, 10^{{{}}}$'.format(a, b)
# -------------------------------- Constants -------------------------------- #
UNIT_MASS = 9.109382914e-31
UNIT_LENGTH = 3.86159e-13
UNIT_TIME = 1.2880885e-21
SPEED_OF_LIGHT = 299792458
EPSILON_0 = 8.85418782e-12
MU_0 = 4*np.pi*1.0e-7
ALPHA = 1.0/137.035999074
UNIT_E_FIELD = 1.3e18*np.sqrt(4*np.pi*ALPHA)
UNIT_B_FIELD = UNIT_E_FIELD/SPEED_OF_LIGHT
# -------------------- Analysis of the Number of Photons -------------------- #
# -- We analyze the number of photons generated in a given geometry. -- #
# --------------------------------------------------------------------------- #
# -- We parse the arguments.
parser = argparse.ArgumentParser()
parser.add_argument("min", type=int, help="Minimum index of simulation to analyze.")
parser.add_argument("max", type=int, help="Maximum index of simulation to analyze.")
parser.add_argument("dim", type=int, help='Dimension of the focal region.')
parser.add_argument("--geometry", dest='geom', help="Geometry under consideration.")
parser.add_argument("--prefix", dest='prefix', help="Folder prefix.")
parser.add_argument("--config", dest='configFile', help="INI file containing the parameters of the simualtion.")
args = parser.parse_args()
# We analyze the simulation in between min and max.
simu_dir = args.prefix+"_{0:05}.BQ".format(1)+"/../"
# -- Global analysis.
n_photons_file = open(simu_dir+args.geom+"_data.txt", 'w')
max_angle_file = open(simu_dir+args.geom+"_max_angle.txt", 'w')
# We determine if we analyze the shadow.
analyze_shadow_bool = (args.geom=="hna-h-artifical" or args.geom=="tra-h" or args.geom=="hna-h" or args.geom == "off-axis-hole")
if (analyze_shadow_bool):
n_photons_shadow_file = open(simu_dir+args.geom+"_shadow_data.txt", 'w+')
for i in range(args.min,args.max+1):
# -- We open the files.
simu_prefix = args.prefix+"_{0:05d}.BQ/{0:05d}.BQ/".format(i)
try:
n_photons_first_file = h5py.File(simu_prefix+"number_of_photons_first_harmonic.hdf5", 'r')
spatial_dist_first_file = h5py.File(simu_prefix+"spatial_dist_first_harmonic.hdf5", 'r')
n_photons_third_file = h5py.File(simu_prefix+"number_of_photons_third_harmonic.hdf5", 'r')
spatial_dist_third_file = h5py.File(simu_prefix+"spatial_dist_third_harmonic.hdf5", 'r')
config = configparser.ConfigParser(inline_comment_prefixes=";")
config.read(simu_prefix+"/"+args.configFile)
except:
continue
focal_length = float(config['Parabola']['focal_length'])
rmax = float(config['Parabola']['r_max'])
# -- We plot the total spectrum of photons for both harmonics.
n_photons_first = n_photons_first_file['/spectrum/Number of photons'][:]
wavelengths_first = n_photons_first_file['/spectrum/wavelength (m)'][:]
freqs_first = n_photons_first_file['/spectrum/frequency (Hz)'][:]
n_photons_third = n_photons_third_file['/spectrum/Number of photons'][:]
wavelengths_third = n_photons_third_file['/spectrum/wavelength (m)'][:]
freqs_third = n_photons_third_file['/spectrum/frequency (Hz)'][:]
phi_first = spatial_dist_first_file['/coordinates/phi'][:]
phi_first_deg = np.degrees(phi_first)
if args.dim == 3:
theta_first = spatial_dist_first_file['/coordinates/theta'][:]
theta_first_deg = np.degrees(theta_first)
# -- Support older versions of the WaveMixer.
try:
n_density_first = spatial_dist_first_file['/field/Component0'][:]
except:
n_density_first = spatial_dist_first_file['/field/ScalarField'][:]
phi_third = spatial_dist_third_file['/coordinates/phi'][:]
phi_third_deg = np.degrees(phi_third)
if args.dim == 3:
theta_third = spatial_dist_third_file['/coordinates/theta'][:]
theta_third_deg = np.degrees(theta_third)
# -- Support older versions of the WaveMixer.
try:
n_density_third = spatial_dist_third_file['/field/Component0'][:]
except:
n_density_third = spatial_dist_third_file['/field/ScalarField'][:]
# -- Determine the phi at which the emission is maximum.
max_idx_f = np.argmax(n_density_first)
if args.dim == 3:
max_phi = phi_first_deg[
|
np.unravel_index(max_idx_f, n_density_first.shape)
|
numpy.unravel_index
|
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
import h5py
from ..h5_utils import read_basic_element
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
def read_conm2(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('EID', 'G', 'CID', 'M', 'X1', 'X2', 'X3', 'I1', 'I2', 'I3', 'DOMAIN_ID')
EID = group['EID']
NID = group['G']
CID = group['CID']
MASS = group['M']
X1 = group['X1']
X2 = group['X2']
X3 = group['X3']
I1 = group['I1']
I2 = group['I2']
I3 = group['I3']
X = np.stack([X1, X2, X3], axis=1)
DOMAIN_ID = group['DOMAIN_ID']
for eid, nid, cid, mass, x, i1, i2, i3 in zip(EID, NID, CID, MASS, X, I1, I2, I3):
i11 = i1
i21, i22 = i2
i31, i32, i33 = i3
i = [i11, i21, i22, i31, i32, i33]
obj = geom_model.add_conm2(eid, nid, mass, cid=cid, X=x, I=i, comment='')
obj.validate()
def read_celas1(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('EID', 'PID', 'G1', 'G2', 'C1', 'C2', 'DOMAIN_ID')
EID = group['EID']
PID = group['PID']
G1 = group['G1']
G2 = group['G2']
C1 = group['C1']
C2 = group['C2']
NIDS = np.stack([G1, G2], axis=1)
assert NIDS.shape[1] == 2, NIDS.shape
DOMAIN_ID = group['DOMAIN_ID']
for eid, pid, nids, c1, c2 in zip(EID, PID, NIDS, C1, C2):
obj = geom_model.add_celas1(eid, pid, nids, c1=c1, c2=c2, comment='')
obj.validate()
def read_celas2(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('EID', 'K', 'G1', 'G2', 'C1', 'C2', 'DOMAIN_ID')
EID = group['EID']
K = group['K']
G1 = group['G1']
G2 = group['G2']
C1 = group['C1']
C2 = group['C2']
NIDS =
|
np.stack([G1, G2], axis=1)
|
numpy.stack
|
import tensorflow as tf
import numpy as np
from PIL import Image
import dataset
#creates a dataset from images in "earth_engine_good" directory using the first 4 landslides
#as evaluation set, don't have to run create_dataset every time
eval_sets = [i for i in range(0,4)]
dataset.create_dataset("earth_engine_good", eval_sets)
train_xs, train_ys, eval_xs, eval_ys = dataset.load()
def print_statistics(curr_l, curr_preds, curr_y):
'''Prints accuracy on each class as well as overall accuracy and balanced accuracy'''
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(len(curr_preds)):
if curr_preds[i] == 1 and curr_y[i] == 1:
tp += 1
elif curr_preds[i] == 1 and curr_y[i] == 0:
fp += 1
elif curr_preds[i] == 0 and curr_y[i] == 1:
fn += 1
elif curr_preds[i] == 0 and curr_y[i] == 0:
tn += 1
try:
prec = tp/(tp+fp)
except(ZeroDivisionError):
prec = 0
try:
recall = tp/(tp+fn)
except(ZeroDivisionError):
recall = 0
try:
f1 = 2*prec*recall/(prec+recall)
except(ZeroDivisionError):
f1 = 0
print("Eval: Loss:{:.3f}, landslide:{}/{}, no landslide:{}/{}, accur:{:.3f}, Mean accuracy:{:.3f}".format(curr_l,
tp, tp+fn, tn, fp+tn,
(tp+tn)/(tp+tn+fp+fn),
0.5*(tp/(tp+fn)+tn/(fp+tn))))
def rotate_flip_batch(x, noise_factor=0):
"""
Randomly rotates and flips examples in given batch
X is 5D array where the x and y image axes are axes 2 and 3,
noise_factor is a multiplier of how much random noise we want to add to the image,
using nonzero values of noise_factor significantly reduces performance
Return augmented 5D array
"""
#high = np.amax(x)
#print("High: ", high)
#print(x[0,0,:,:,0])
#print(x[0,0,:,:,0]*float(256/high))
#im = Image.fromarray(x[0,0,:,:,0]*256/float(high))
#im.show()
batch = x.shape[0]
rotate_degree = np.random.choice([0,1,2,3])
flip_axis = np.random.choice([0,2,3])
to_select =
|
np.random.randint(batch, size=batch//2)
|
numpy.random.randint
|
from tensorboardX import SummaryWriter
import airsim
from collections import deque
import torch
import random
import numpy as np
import time
import os
import pickle
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset
from model.baseline import ResNet34, ResNet50
import copy
from tqdm import tqdm
from ddpg import DDPG, OUNoise
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def compute_center_distance(pt, car_obs_num):
pt_obs = client.simGetObjectPose("Car_"+car_obs_num).position
dist = np.sqrt((pt.x_val-pt_obs.x_val)**2 + (pt.y_val-pt_obs.y_val)**2)
return dist
def compute_distance_element(pt, A, B):
v1 = B - A
v2 = pt - A
u1 = v1/(np.sqrt(v1[0]**2+v1[1]**2))
projection_dis = v2[0]*u1[0] + v2[1]*u1[1]
AB_dis =
|
np.sqrt(v1[0]**2+v1[1]**2)
|
numpy.sqrt
|
from __future__ import absolute_import
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.datasets.mnist import \
extract_images, extract_labels
from tensorflow.python.framework import dtypes
from .dataset import DataSet
def process_mnist(images, dtype=dtypes.float32, reshape=True):
if reshape:
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(np.float32)
images =
|
np.multiply(images, 1.0 / 255.0)
|
numpy.multiply
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import wcs
from astropy import units as u
from pyregion.parser_helper import Shape
from astropy.tests.helper import pytest, remote_data
from .. import utils
def test_pyregion_subset():
header = dict(naxis=2, crpix1=15, crpix2=15, crval1=0.1, crval2=0.1,
cdelt1=-1./3600, cdelt2=1./3600., ctype1='GLON-CAR',
ctype2='GLAT-CAR')
mywcs = wcs.WCS(header)
# circle with radius 10" at 0.1, 0.1
shape = Shape('circle', (0.1, 0.1, 10./3600.))
shape.coord_format = 'galactic'
shape.coord_list = (0.1, 0.1, 10./3600.)
shape.attr = ([], {})
data = np.ones([40,40])
(xlo,xhi,ylo,yhi), d = utils.pyregion_subset(shape, data, mywcs)
assert d.sum() == 314 # approximately pi
np.testing.assert_almost_equal(xlo, data.shape[0]/2-mywcs.wcs.crpix[0]-1)
|
np.testing.assert_almost_equal(xhi, data.shape[0]-mywcs.wcs.crpix[0]-1)
|
numpy.testing.assert_almost_equal
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Distributed under terms of the MIT license.
# Copyright 2021 <NAME>.
import numpy as np
import time
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, precision_recall_curve, accuracy_score, confusion_matrix, f1_score
from numpy.linalg import norm
from sklearn.svm import LinearSVC
from GrahamScan import GrahamScan
from platt import *
from hsvm import *
from collections import Counter
from sklearn.preprocessing import label_binarize
from scipy.special import softmax
"""
This file contains all functions for hyperbolic perceptrons and SVM. A random data generator is included (Poincare_Uniform_Data) for synthetic experiments. Two methods (ConvexHull and QuickHull) are included to learn the reference point for tangent space.
For perceptron algorithms, we implement
1. Our hyperbolic perceptron: HP
2. Euclidean perceptron: EP
3. Hyperbolic perceptron from Weber et al. 2020: WeberHP
For SVM algorithms, we implement
1. Our hyperbolic SVM: tangent_hsvm
2. SVM from Cho et al. 2019: cho_hsvm
3. Euclidean SVM: euclidean_svm, based on sklearn.svm.LinearSVC
"""
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray',
'tab:olive']
def Parrallel_transport(v,p):
"""
Parrallel transport of v in T_0 to T_p.
"""
return (1-np.linalg.norm(p)**2)*v
def Mobius_add(x,y,c=1.):
"""
Mobius addition x \oplus y.
c is the negative curvature, which we set default as 1.
"""
DeNom = 1+2*c*np.dot(x,y)+c**2*np.dot(x,x)*np.dot(y,y)
Nom = (1+2*c*np.dot(x,y)+c*np.dot(y,y))*x + (1-c*np.dot(x,x))*y
return Nom/DeNom
def Exp_map(v,p,c=1.):
"""
Exp map. v is the tangent vector in T_p and c is the negative curvature.
"""
lbda = 2/(1-c*np.dot(p,p))
temp = np.tanh(np.sqrt(c)*lbda*np.sqrt(np.dot(v,v))/2)*v/np.sqrt(c)/np.sqrt(np.dot(v,v))
return Mobius_add(p,temp,c)
def Log_map(x,p,c=1.):
"""
Log map. x is the vector in hyperbolic space, p is the reference point and c is the negative curvature.
"""
lbda = 2/(1-c*np.dot(p,p))
temp = Mobius_add(-p,x,c)
return 2/np.sqrt(c)/lbda*np.arctanh(np.sqrt(c)*np.sqrt(np.dot(temp,temp)))*temp/np.sqrt(np.dot(temp,temp))
def poincare_dist(x, y):
"""
Poincare distance of two hyperbolic points x,y.
"""
return np.arccosh(1 + 2*(norm(x-y)**2)/(1-norm(x)**2)/(1-
|
norm(y)
|
numpy.linalg.norm
|
import random
import cv2
import numpy as np
import os
from copy import deepcopy
import matplotlib.pyplot as plt
from collections import Counter
def get_histogram(array):
arr = np.array(array)
vec = arr.flatten()
count = Counter(vec)
total_pixels = np.sum(list(count.values()))
n_k = []
for i in np.arange(256): # RGB
if not count[i]:
n_k.append(0)
else:
n_k.append(count[i])
n_k = n_k / total_pixels
p_rk = np.array(n_k)
dictgram = {}
for i, val in enumerate(p_rk):
dictgram[i] = val
return p_rk, dictgram
def get_rhomb(M=200, N=200):
black_img = np.zeros((M, N)) # Create a black image with Rhombus
center = (round(M / 2.0), round(N / 2.0))
R = N // 2
start_y, end_y = -1 * (R // 2) + R, (R // 2) + R
top_sizer = 1
for i in range(start_y, R): # Create the shape, need to find shape edges
black_img[i][R - top_sizer: R + top_sizer] = 120
top_sizer += 1
bottom_sizer = int(top_sizer)
for j in range(R, end_y + 1):
black_img[j][R - bottom_sizer: R + bottom_sizer] = 120
bottom_sizer -= 1
return black_img
def third(M=300, N=300):
rhom = get_rhomb(M, N)
kernel_blur = np.ones((5, 5)) / 25 # BLUR IT
tmp = cv2.filter2D(rhom, -1, kernel_blur)
plt.title('Rhombus with blur')
plt.imshow(tmp, cmap='gray')
plt.show()
edges = tmp.copy()
cutter = np.array([-1, 0, 1]) * 0.5 # Derivative
edges = cv2.filter2D(edges, -1, cutter)
edges = np.abs(edges) # Abs for edges
cv2.imshow('Edges', edges)
plt.title('Rhombus Edges')
plt.imshow(edges, cmap='gray')
plt.show()
def second():
path = os.getcwd()
img = cv2.imread(path + '/bliss.png', 1) # With color
clouds = (180, 180, 180)
blue = (140, 140, 140)
clouds_sky_ground = [0] * 3
for i, row in enumerate(img):
for j, triplet in enumerate(row):
b, g, r = triplet
if b > clouds[0] and g > clouds[1] and r > clouds[2]:
clouds_sky_ground[0] += 1 # Found cloud
elif b > blue[0]:
clouds_sky_ground[1] += 1
else:
clouds_sky_ground[2] += 1
print(f'Clouds:\n\t{clouds_sky_ground[0]}\nSky:\n\t{clouds_sky_ground[1]}\nPlants:\n\t{clouds_sky_ground[2]}')
def cleaner():
command = str(input('Enter a command by the letters: '
'\n\ta. Brightness\n\tb. Contrast\n\tc. cv2.threshold_TOZERO\n\td. Gamma\n\n'))
number = float(input('Enter a number:'
'\n\ta. Brightness:\n\t\t-120 <= number <= 120\n\tb. Contrast:\n\t\t0.1 <= number <= 5\n\tc. '
'cv2.threshold_TOZERO:\n\t\t20 <= number <= 200\n\td. Gamma:\n\t\t0.1 <= number <= 5\n\n'))
if not command or not number:
print('Wrong syntax')
exit(1)
real_cmd = None
real_num = None
if command == 'a':
if -120 <= number <= 120:
real_num = number
real_cmd = '+'
elif command == 'b':
if 0.1 <= number <= 5:
real_num = number
real_cmd = '*'
elif command == 'c':
if 20 <= number <= 200:
real_num = number
real_cmd = 'thresh'
elif command == 'd':
if 0.1 <= number <= 5:
real_num = number
real_cmd = '*'
else:
print('Wrong syntax')
exit(1)
if not real_cmd or not real_num:
print('Wrong syntax')
exit(1)
cmd = real_cmd
num = real_num
path = os.getcwd()
img = cv2.imread(path + '/bliss.png', cv2.IMREAD_GRAYSCALE)
org = deepcopy(img)
if cmd == '+':
for i in range(img.shape[0]):
for j in range(img.shape[1]):
img[i][j] += num
elif cmd == '*':
for i in range(img.shape[0]):
for j in range(img.shape[1]):
img[i][j] *= num
elif cmd == 'g':
for i in range(img.shape[0]):
for j in range(img.shape[1]):
img[i][j] = ((img[i][j] / 255.0) ** num) * 255
elif cmd == 'thresh':
dummy, img = cv2.threshold(img, num, 255, cv2.THRESH_TOZERO)
else:
print('Wrong syntax')
exit(1)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
img[i][j] = round(img[i][j])
img = img.astype(int)
cv2.imwrite(f'pic{str(cmd) + str(num)}.png', img)
plt.title('Before cleaning')
plt.imshow(img, cmap='gray')
plt.show()
cv2.imshow('Origin', org)
bad_histo, dicter1 = get_histogram(img)
entropy1 = 0
for rk in bad_histo:
if rk != 0:
entropy1 += (rk * np.log(rk))
cdf_before = np.cumsum(bad_histo)
cdf_1 = np.ma.masked_equal(cdf_before, 0)
cdf_2 = (cdf_1 - cdf_1.min()) * 255 / (cdf_1.max() - cdf_1.min())
cdf_after = np.ma.filled(cdf_2, 0)
for i in range(cdf_after.shape[0]):
cdf_after[i] = round(cdf_after[i])
cdf_after = cdf_after.astype(int)
img_equal = cdf_after[img]
good_histo, dicter2 = get_histogram(img_equal)
entropy2 = 0
for rk in good_histo:
if rk != 0:
entropy2 += (rk * np.log(rk))
plt.title('After cleaning')
plt.imshow(img_equal, cmap='gray')
plt.show()
dict1 = dict(zip(np.arange(0, 256), cdf_before * 256))
dict2 = dict(zip(
|
np.arange(0, 256)
|
numpy.arange
|
import argparse
import numpy as np
import os
import tabulate
import torch
import torch.nn.functional as F
import torch.nn as nn
from AttackPGD import AttackPGD
import data
import models
import curves
import utils
from tqdm import tqdm
import torchvision
import torchvision.transforms as transforms
parser = argparse.ArgumentParser(description='DNN curve evaluation')
parser.add_argument('--dir', type=str, default='VGG16Para-robust-robust_robust', metavar='DIR',
help='training directory (default: /tmp/eval)')
parser.add_argument('--num_points', type=int, default=31, metavar='N',
help='number of points on the curve (default: 61)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true', default=True,
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default='Data', metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num_workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default='VGG16', metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--curve', type=str, default='Bezier', metavar='CURVE',
help='curve type to use (default: None)')
parser.add_argument('--num_bends', type=int, default=3, metavar='N',
help='number of curve bends (default: 3)')
parser.add_argument('--ckpt', type=str, default='Para128-512/checkpoint-180.pt', metavar='CKPT',
help='checkpoint to eval (default: None)')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
args = parser.parse_args()
os.makedirs(args.dir, exist_ok=True)
torch.backends.cudnn.benchmark = True
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test,
shuffle_train=False
)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
# Normalization messes with l-inf bounds.
])
transform_test = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./Data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./Data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
architecture = getattr(models, args.model)
curve = getattr(curves, args.curve)
model = curves.CurveNet(
num_classes,
curve,
architecture.curve,
args.num_bends,
architecture_kwargs=architecture.kwargs,
)
model.cuda()
checkpoint = torch.load(args.ckpt)
model.load_state_dict(checkpoint['model_state'])
criterion = F.cross_entropy
regularizer = curves.l2_regularizer(args.wd)
T = args.num_points
ts = np.linspace(0.0, 1.0, T)
tr_loss = np.zeros(T)
tr_nll = np.zeros(T)
tr_acc = np.zeros(T)
te_loss = np.zeros(T)
te_exa_loss = np.zeros(T)
te_nll = np.zeros(T)
te_acc = np.zeros(T)
te_exa_acc = np.zeros(T)
tr_err =
|
np.zeros(T)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
from functools import WRAPPER_ASSIGNMENTS
from pyqtgraph import ImageView, InfiniteLine, mkPen, ScatterPlotItem, ImageItem, PlotItem
from qtpy.QtGui import QTransform, QPolygonF
from qtpy.QtWidgets import QLabel, QErrorMessage, QSizePolicy, QPushButton, QHBoxLayout, QVBoxLayout, QComboBox
from qtpy.QtCore import Qt, Signal, Slot, QSize, QPointF, QRectF, QObjectCleanupHandler
import numpy as np
from databroker.core import BlueskyRun
# from pyFAI.geometry import Geometry
from xicam.core import msg
from xicam.core.data import MetaXArray
from xicam.core.data.bluesky_utils import fields_from_stream, streams_from_run, is_image_field
from xicam.gui.widgets.elidedlabel import ElidedLabel
from xicam.gui.widgets.ROI import BetterPolyLineROI
import enum
from typing import Callable
from functools import partial
from xicam.plugins import manager as pluginmanager
import inspect
# NOTE: PyQt widget mixins have pitfalls; note #2 here: http://trevorius.com/scrapbook/python/pyqt-multiple-inheritance/
# NOTE: PyFAI geometry position vector is: x = up
# y = right
# z = beam
def q_from_angles(phi, alpha, wavelength):
r = 2 * np.pi / wavelength
qx = r * np.sin(phi) * np.cos(alpha)
qy = r * np.cos(phi) * np.sin(alpha)
qz = r * (np.cos(phi) * np.cos(alpha) - 1)
return np.array([qx, qy, qz])
def alpha(x, y, z):
return np.arctan2(y, z)
def phi(x, y, z):
return np.arctan2(x, z)
class DisplayMode(enum.Enum):
raw = enum.auto()
cake = enum.auto()
remesh = enum.auto()
class BetterLayout(ImageView):
# Replaces awkward gridlayout with more structured v/hboxlayouts, and removes useless buttons
def __init__(self, *args, **kwargs):
super(BetterLayout, self).__init__(*args, **kwargs)
self.ui.outer_layout = QHBoxLayout()
self.ui.left_layout = QVBoxLayout()
self.ui.right_layout = QVBoxLayout()
self.ui.outer_layout.addLayout(self.ui.left_layout)
self.ui.outer_layout.addLayout(self.ui.right_layout)
for layout in [self.ui.outer_layout, self.ui.left_layout, self.ui.right_layout]:
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.ui.left_layout.addWidget(self.ui.graphicsView)
self.ui.right_layout.addWidget(self.ui.histogram)
# self.ui.right_layout.addWidget(self.ui.roiBtn)
# self.ui.right_layout.addWidget(self.ui.menuBtn)
QObjectCleanupHandler().add(self.ui.layoutWidget.layout())
self.ui.roiBtn.setParent(None)
self.ui.menuBtn.setParent(None)
self.ui.layoutWidget.setLayout(self.ui.outer_layout)
class PixelSpace(ImageView):
def __init__(self, *args, **kwargs):
# Add axes
self.axesItem = PlotItem()
self.axesItem.axes["left"]["item"].setZValue(10)
self.axesItem.axes["top"]["item"].setZValue(10)
if "view" not in kwargs:
kwargs["view"] = self.axesItem
self._transform = QTransform()
self._raw_image = None
super(PixelSpace, self).__init__(*args, **kwargs)
self.imageItem.sigImageChanged.connect(self.updateAxes)
def transform(self, img=None):
# Build Quads
shape = img.shape
a = [(0, shape[-2] - 1), (shape[-1] - 1, shape[-2] - 1), (shape[-1] - 1, 0), (0, 0)]
b = [(0, 1), (shape[-1] - 1, 1), (shape[-1] - 1, shape[-2]), (0, shape[-2])]
quad1 = QPolygonF()
quad2 = QPolygonF()
for p, q in zip(a, b):
quad1.append(QPointF(*p))
quad2.append(QPointF(*q))
transform = QTransform()
QTransform.quadToQuad(quad1, quad2, transform)
for item in self.view.items:
if isinstance(item, ImageItem):
item.setTransform(transform)
self._transform = transform
return img, transform
def setImage(self, img, *args, **kwargs):
if img is None:
return
if getattr(self, "displaymode", DisplayMode.raw) == DisplayMode.raw:
self._raw_image = img
if not kwargs.get("transform", None):
img, transform = self.transform(img)
self.updateAxes()
super(PixelSpace, self).setImage(img, *args, transform=transform, **kwargs)
else:
super(PixelSpace, self).setImage(img, *args, **kwargs)
def setTransform(self):
self.setImage(self._raw_image) # this should loop back around to the respective transforms
def updateAxes(self):
self.axesItem.setLabel("bottom", "x (px)") # , units='s')
self.axesItem.setLabel("left", "z (px)")
class QSpace(PixelSpace):
def __init__(self, *args, geometry=None, **kwargs):
self.displaymode = DisplayMode.raw
self._geometry = None # type: AzimuthalIntegrator
super(QSpace, self).__init__(*args, **kwargs)
self.setGeometry(geometry)
def setGeometry(self, geometry):
if callable(geometry):
geometry = geometry()
self._geometry = geometry
self.setTransform()
class EwaldCorrected(QSpace):
def setDisplayMode(self, mode):
self.displaymode = mode
if hasattr(self, "drawCenter"):
self.drawCenter()
self.setTransform()
def transform(self, img=None):
if not self._geometry or not self.displaymode == DisplayMode.remesh:
return super(EwaldCorrected, self).transform(img) # Do pixel space transform when not calibrated
from camsaxs import remesh_bbox
img, q_x, q_z = remesh_bbox.remesh(np.squeeze(img), self._geometry, reflection=False, alphai=None)
# Build Quads
shape = img.shape
a = shape[-2] - 1, 0 # bottom-left
b = shape[-2] - 1, shape[-1] - 1 # bottom-right
c = 0, shape[-1] - 1 # top-right
d = 0, 0 # top-left
quad1 = QPolygonF()
quad2 = QPolygonF()
for p, q in zip([a, b, c, d], [a, b, c, d]): # the zip does the flip :P
quad1.append(QPointF(*p[::-1]))
quad2.append(QPointF(q_x[q], q_z[q]))
transform = QTransform()
QTransform.quadToQuad(quad1, quad2, transform)
for item in self.view.items:
if isinstance(item, ImageItem):
item.setTransform(transform)
self._transform = transform
return img, self._transform
def setImage(self, img, *args, **kwargs):
if img is None:
return
self._raw_image = img
if self._geometry:
transform_img, transform = self.transform(img)
super(EwaldCorrected, self).setImage(transform_img, *args, transform=transform, **kwargs)
else:
super(EwaldCorrected, self).setImage(img, *args, **kwargs)
def updateAxes(self):
if self.displaymode == DisplayMode.remesh:
self.axesItem.setLabel("bottom", "q<sub>x</sub> (Å⁻¹)") # , units='s')
self.axesItem.setLabel("left", "q<sub>z</sub> (Å⁻¹)")
else:
super(EwaldCorrected, self).updateAxes()
class CenterMarker(QSpace):
def __init__(self, *args, **kwargs):
self.centerplot = ScatterPlotItem(brush="r")
self.centerplot.setZValue(100)
super(CenterMarker, self).__init__(*args, **kwargs)
self.addItem(self.centerplot)
self.drawCenter()
def drawCenter(self):
try:
fit2d = self._geometry.getFit2D()
except (TypeError, AttributeError):
pass
else:
if self.imageItem.image is not None:
if self.displaymode == DisplayMode.raw:
x = fit2d["centerX"]
y = self._raw_image.shape[-2] - fit2d["centerY"]
self.centerplot.setData(x=[x], y=[y])
elif self.displaymode == DisplayMode.remesh:
self.centerplot.setData(x=[0], y=[0])
def setGeometry(self, geometry):
super(CenterMarker, self).setGeometry(geometry)
self.drawCenter()
class Crosshair(ImageView):
def __init__(self, *args, **kwargs):
super(Crosshair, self).__init__(*args, **kwargs)
linepen = mkPen("#FFA500")
self._vline = InfiniteLine((0, 0), angle=90, movable=False, pen=linepen)
self._hline = InfiniteLine((0, 0), angle=0, movable=False, pen=linepen)
self._vline.setVisible(False)
self._hline.setVisible(False)
self.addItem(self._vline)
self.addItem(self._hline)
self.scene.sigMouseMoved.connect(self.moveCrosshair)
def moveCrosshair(self, pos):
if self.view.getViewBox().sceneBoundingRect().contains(pos):
mousePoint = self.view.getViewBox().mapSceneToView(pos)
x, y = mousePoint.x(), mousePoint.y()
if self.imageItem.mapRectToView(self.imageItem.boundingRect()).contains(mousePoint): # within bounds
self._vline.setPos(x)
self._hline.setPos(y)
self._hline.setVisible(True)
self._vline.setVisible(True)
else:
self._hline.setVisible(False)
self._vline.setVisible(False)
class PixelCoordinates(PixelSpace, BetterLayout):
def __init__(self, *args, **kwargs):
super(PixelCoordinates, self).__init__(*args, **kwargs)
self._coordslabel = QLabel(
"<div style='font-size:12pt;background-color:#111111; " "text-overflow: ellipsis; width:100%;'> </div>"
)
# def sizeHint():
# sizehint = QSize(self.ui.graphicsView.width()-10, self._coordslabel.height())
# return sizehint
# self._coordslabel.sizeHint = sizeHint
self._coordslabel.setSizePolicy(
QSizePolicy.Ignored, QSizePolicy.Ignored
) # TODO: set sizehint to take from parent, not text
self.ui.left_layout.addWidget(self._coordslabel, alignment=Qt.AlignHCenter)
self.scene.sigMouseMoved.connect(self.displayCoordinates)
def displayCoordinates(self, pos):
if self.view.sceneBoundingRect().contains(pos):
mousePoint = self.view.getViewBox().mapSceneToView(pos)
pos = QPointF(mousePoint.x(), mousePoint.y())
if self.imageItem.mapRectToView(self.imageItem.boundingRect()).contains(mousePoint): # within bounds
# angstrom=QChar(0x00B5)
pxpos = self.imageItem.mapFromView(pos)
self.formatCoordinates(pxpos, pos)
else:
self._coordslabel.setText("<div style='font-size:12pt;background-color:#111111;'> </div>")
def formatCoordinates(self, pxpos, pos):
"""
when the mouse is moved in the viewer, recalculate coordinates
"""
try:
I = self.imageItem.image[int(pxpos.y()), int(pxpos.x())]
except IndexError:
I = 0
self._coordslabel.setText(
f"<div style='font-size: 12pt;background-color:#111111; color:#FFFFFF;"
f"text-overflow: ellipsis; width:100%;'>"
f"x={pxpos.x():0.1f}, "
f"<span style=''>y={pxpos.y():0.1f}</span>, "
f"<span style=''>I={I:0.0f}</span></div>"
)
class QCoordinates(QSpace, PixelCoordinates):
def formatCoordinates(self, pxpos, pos):
"""
when the mouse is moved in the viewer, recalculate coordinates
"""
try:
I = self.imageItem.image[int(pxpos.y()), int(pxpos.x())]
except IndexError:
I = 0
self._coordslabel.setText(
f"<div style='font-size: 12pt;background-color:#111111; color:#FFFFFF; "
f"text-overflow: ellipsis; width:100%;'>"
f"x={pxpos.x():0.1f}, "
f"<span style=''>y={self.imageItem.image.shape[-2] - pxpos.y():0.1f}</span>, "
f"<span style=''>I={I:0.0f}</span>, "
f"q={np.sqrt(pos.x() ** 2 + pos.y() ** 2):0.3f} \u212B\u207B\u00B9, "
f"q<sub>z</sub>={pos.y():0.3f} \u212B\u207B\u00B9, "
f"q<sub>\u2225</sub>={pos.x():0.3f} \u212B\u207B\u00B9, "
f"d={2 * np.pi / np.sqrt(pos.x() ** 2 + pos.y() ** 2) * 10:0.3f} nm, "
f"\u03B8={np.rad2deg(np.arctan2(pos.y(), pos.x())):.2f}°</div>"
)
class PolygonROI(ImageView):
def __init__(self, *args, **kwargs):
"""
Image view extended with an adjustable polygon region-of-interest (ROI).
When first displayed, the polygon ROI's corners will be set to the image item's corners.
Parameters
----------
args, optional
Positional arguments for the ImageView.
kwargs, optional
Keyword arguments for the ImageView.
"""
super(PolygonROI, self).__init__(*args, **kwargs)
rect = self.imageItem.boundingRect() # type: QRectF
positions = [
(rect.bottomLeft().x(), rect.bottomLeft().y()),
(rect.bottomRight().x(), rect.bottomRight().y()),
(rect.topRight().x(), rect.topRight().y()),
(rect.topLeft().x(), rect.topLeft().y()),
]
self._roiItem = BetterPolyLineROI(positions=positions, closed=True, scaleSnap=True, translateSnap=True)
self.addItem(self._roiItem)
def __repr__(self):
return type(self).__name__ + repr(self._roiItem)
def poly_mask(self):
"""
Gets the mask array for a ROI polygon on the image.
The mask array's shape will match the image's shape.
Any pixel inside both the ROI polygon and the image will be set to 1 in the mask array;
all other values in the mask will be set to 0.
Returns
-------
ndarray:
Mask array of the ROI polygon within image space (mask shape matches image shape).
"""
result, mapped = self._roiItem.getArrayRegion(
np.ones_like(self.imageItem.image), self.imageItem, returnMappedCoords=True
)
# TODO -- move this code to own function and test
# Reverse the result array to make indexing calculations easier, then revert back
result = result[::-1, ::-1]
mapped = mapped[::-1, ::-1]
# Pad result mask rect into bounding rect of mask and image
floorRow = np.floor(mapped[0]).astype(int)
floorCol = np.floor(mapped[1]).astype(int)
# Return empty mask if ROI bounding box does not intersect image bounding box
resultRect = QRectF(QPointF(np.min(floorRow), np.min(floorCol)), QPointF(np.max(floorRow), np.max(floorCol)))
if not self._intersectsImage(resultRect):
# TODO -- is zeros(shape) the right return value for a non-intersecting polygon?
return np.zeros(self.imageItem.image.shape)
# Find the bounds of the ROI polygon
minX = np.min(floorRow)
maxX = np.max(floorRow)
minY = np.min(floorCol)
maxY = np.max(floorCol)
width = self.imageItem.width()
height = self.imageItem.height()
# Pad the ROI polygon into the image shape
# Don't need padding if a polygon boundary is outside of the image shape
padXBefore = minX
if minX < 0:
padXBefore = 0
padXAfter = height - maxX
if padXAfter < 0:
padXAfter = 0
padYBefore = minY
if minY < 0:
padYBefore = 0
padYAfter = width - maxY
if padYAfter < 0:
padYAfter = 0
boundingBox = np.pad(result, ((padYBefore, padYAfter), (padXBefore, padXAfter)), "constant")
# For trimming, any negative minimums need to be shifted into the image shape
offsetX = 0
offsetY = 0
if minX < 0:
offsetX = abs(minX)
if minY < 0:
offsetY = abs(minY)
trimmed = boundingBox[abs(offsetY) : abs(offsetY) + height, abs(offsetX) : abs(offsetX) + width]
# Reorient the trimmed mask array
trimmed = trimmed[::-1, ::-1]
# # TODO remove plotting code below
# from matplotlib import pyplot as plt
# plt.figure('bounding_box, origin="lower"')
# plt.imshow(boundingBox, origin='lower')
# plt.show()
#
#
# plt.figure(f'trimmed, origin="lower", [{abs(offsetY)}:{abs(offsetY)+height}, {abs(offsetX)}:{abs(offsetX)+width}]')
# plt.imshow(trimmed, origin='lower')
# plt.show()
# # TODO remove the plotting code above
return trimmed
def _intersectsImage(self, rectangle: QRectF):
"""
Checks if a rectangle intersects the image's bounding rectangle.
Parameters
----------
rectangle
Rectangle to test intersection with the image item's bounding rectangle.
Returns
-------
bool
True if the rectangle and the image bounding rectangle intersect; otherwise False.
"""
# TODO -- test
return self.imageItem.boundingRect().intersects(rectangle)
import collections
from pyqtgraph import functions as fn
from pyqtgraph import debug
from pyqtgraph import Point
class ComposableItemImageView(ImageView):
"""
Used to compose together different image view mixins that may use different ItemImage subclasses.
See LogScaleIntensity, LogScaleImageItem, ImageViewHistogramOverflowFIx, ImageItemHistorgramOverflowFix.
Note that any imageItem named argument passed into the ImageView mixins above will discard the item and instead
create a composition of imageItem_bases with their respective ImageItem class.
"""
imageItem_bases = tuple()
class LogScaleImageItem(ImageItem):
def __init__(self, *args, **kwargs):
super(LogScaleImageItem, self).__init__(*args, **kwargs)
self.logScale = True
def render(self):
# Convert data to QImage for display.
profile = debug.Profiler()
if self.image is None or self.image.size == 0:
return
if isinstance(self.lut, collections.Callable):
lut = self.lut(self.image)
else:
lut = self.lut
if self.logScale:
image = self.image + 1
with np.errstate(invalid="ignore"):
image = image.astype(np.float)
np.log(image, where=image >= 0, out=image) # map to 0-255
else:
image = self.image
if self.autoDownsample:
# reduce dimensions of image based on screen resolution
o = self.mapToDevice(QPointF(0, 0))
x = self.mapToDevice(QPointF(1, 0))
y = self.mapToDevice(QPointF(0, 1))
w = Point(x - o).length()
h = Point(y - o).length()
if w == 0 or h == 0:
self.qimage = None
return
xds = max(1, int(1.0 / w))
yds = max(1, int(1.0 / h))
axes = [1, 0] if self.axisOrder == "row-major" else [0, 1]
image = fn.downsample(image, xds, axis=axes[0])
image = fn.downsample(image, yds, axis=axes[1])
self._lastDownsample = (xds, yds)
else:
pass
# if the image data is a small int, then we can combine levels + lut
# into a single lut for better performance
levels = self.levels
if levels is not None and levels.ndim == 1 and image.dtype in (np.ubyte, np.uint16):
if self._effectiveLut is None:
eflsize = 2 ** (image.itemsize * 8)
ind =
|
np.arange(eflsize)
|
numpy.arange
|
# Copyright 2021 RangiLyu.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import numpy as np
import torch
from pycocotools.coco import COCO
from .base import BaseDataset
import random
class CocoDataset(BaseDataset):
def get_data_info(self, ann_path):
"""
Load basic information of dataset such as image path, label and so on.
:param ann_path: coco json file path
:return: image info:
[{'license': 2,
'file_name': '000000000139.jpg',
'coco_url': 'http://images.cocodataset.org/val2017/000000000139.jpg',
'height': 426,
'width': 640,
'date_captured': '2013-11-21 01:34:01',
'flickr_url':
'http://farm9.staticflickr.com/8035/8024364858_9c41dc1666_z.jpg',
'id': 139},
...
]
"""
self.coco_api = COCO(ann_path)
self.cat_ids = sorted(self.coco_api.getCatIds())
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.cats = self.coco_api.loadCats(self.cat_ids)
self.img_ids = sorted(self.coco_api.imgs.keys())
img_info = self.coco_api.loadImgs(self.img_ids)
# 增加mosaic数据增强
if (isinstance(self.input_size, int)):
self.mosaic_border = [-self.input_size // 2, -self.input_size // 2]
else:
self.mosaic_border = [-self.input_size[1] // 2, -self.input_size[0] // 2] # 注意按照[H, W]格式,否则random_perspective函数会出现异常。
self.indices = range(len(self.img_ids))
return img_info
def get_per_img_info(self, idx):
img_info = self.data_info[idx]
file_name = img_info["file_name"]
height = img_info["height"]
width = img_info["width"]
id = img_info["id"]
if not isinstance(id, int):
raise TypeError("Image id must be int.")
info = {"file_name": file_name, "height": height, "width": width, "id": id}
return info
def get_img_annotation(self, idx):
"""
load per image annotation
:param idx: index in dataloader
:return: annotation dict
"""
img_id = self.img_ids[idx]
ann_ids = self.coco_api.getAnnIds([img_id])
anns = self.coco_api.loadAnns(ann_ids)
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
if self.use_instance_mask:
gt_masks = []
if self.use_keypoint:
gt_keypoints = []
for ann in anns:
if ann.get("ignore", False):
continue
x1, y1, w, h = ann["bbox"]
if ann["area"] <= 0 or w < 1 or h < 1:
continue
if ann["category_id"] not in self.cat_ids:
continue
# 转化为x1 y1 x2 y2
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get("iscrowd", False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann["category_id"]])
if self.use_instance_mask:
gt_masks.append(self.coco_api.annToMask(ann))
if self.use_keypoint:
gt_keypoints.append(ann["keypoints"])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels =
|
np.array([], dtype=np.int64)
|
numpy.array
|
"""Multi-layer Perceptron
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
from abc import ABCMeta, abstractmethod
from scipy.optimize import fmin_l_bfgs_b
import warnings
from sklearn.utils import check_X_y,check_array
from .base import logistic, softmax, binary_KL_divergence
from .base import ACTIVATIONS, DERIVATIVES, LOSS_FUNCTIONS
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.base import TransformerMixin
from sklearn.externals import six
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import gen_batches, check_random_state
from sklearn.utils import shuffle
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
def _pack(layers_coef_, layers_intercept_):
"""Pack the parameters into a single vector."""
return np.hstack([l.ravel() for l in layers_coef_ + layers_intercept_])
class BaseMultilayerPerceptron(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for MLP classification and regression.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, hidden_layer_sizes, activation, algorithm,
alpha, batch_size, learning_rate, learning_rate_init, power_t,
max_iter, loss, shuffle, beta, sparsity_param, random_state, tol, verbose,
warm_start):
self.activation = activation
self.algorithm = algorithm
self.alpha = alpha
self.beta = beta
self.sparsity_param = sparsity_param
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate_init
self.power_t = power_t
self.max_iter = max_iter
self.loss = loss
self.hidden_layer_sizes = hidden_layer_sizes
self.shuffle = shuffle
self.random_state = random_state
self.tol = tol
self.verbose = verbose
self.warm_start = warm_start
self.layers_coef_ = None
self.layers_intercept_ = None
self.cost_ = None
self.n_iter_ = None
self.learning_rate_ = None
self.classes_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def _unpack(self, packed_parameters):
"""Extract the coefficients and intercepts from packed_parameters."""
for i in range(self.n_layers_ - 1):
start, end, shape = self._coef_indptr[i]
self.layers_coef_[i] = np.reshape(packed_parameters[start:end],
shape)
start, end = self._intercept_indptr[i]
self.layers_intercept_[i] = packed_parameters[start:end]
def _forward_pass(self, activations, with_output_activation=True):
"""Perform a forward pass on the network by computing the values
of the neurons in the hidden layers and the output layer.
Parameters
----------
activations: list, length = n_layers - 1
The ith index of the list holds the values of the ith layer.
with_output_activation : bool, default True
If True, the output passes through the output activation
function, which is either the softmax function or the
logistic function
"""
# Iterate over the hidden layers
for i in range(self.n_layers_ - 1):
activations[i + 1] = safe_sparse_dot(activations[i],
self.layers_coef_[i])
activations[i + 1] += self.layers_intercept_[i]
# For the hidden layers
if i + 1 != self.n_layers_ - 1:
hidden_activation = ACTIVATIONS[self.activation]
activations[i + 1] = hidden_activation(activations[i + 1])
# For the last layer
if with_output_activation:
output_activation = ACTIVATIONS[self.out_activation_]
activations[i + 1] = output_activation(activations[i + 1])
return activations
def _compute_cost_grad(self, layer, n_samples, activations, deltas,
coef_grads, intercept_grads):
"""Compute the cost gradient for the layer."""
coef_grads[layer] = safe_sparse_dot(activations[layer].T,
deltas[layer]) / n_samples
coef_grads[layer] += (self.alpha * self.layers_coef_[layer])
intercept_grads[layer] = np.mean(deltas[layer], 0)
return coef_grads, intercept_grads
def _cost_grad_lbfgs(self, packed_coef_inter, X, y, activations, deltas,
coef_grads, intercept_grads):
"""Compute the MLP cost function and its corresponding derivatives
with respect to the different parameters given in the initialization.
Parameters
----------
packed_parameters : array-like
A vector comprising the flattened coefficients and intercepts.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations: list, length = n_layers - 1
The ith index of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith index of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
coef_grad : list, length = n_layers - 1
The ith index contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith index contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
cost : float
grad : array-like, shape (number of nodes of all layers,)
"""
self._unpack(packed_coef_inter)
cost, coef_grads, intercept_grads = self._backprop(X, y, activations,
deltas, coef_grads,
intercept_grads)
self.n_iter_ += 1
grad = _pack(coef_grads, intercept_grads)
return cost, grad
def _backprop(self, X, y, activations, deltas, coef_grads,
intercept_grads):
"""Compute the MLP cost function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations: list, length = n_layers - 1
The ith index of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith index of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
coef_grad : list, length = n_layers - 1
The ith index contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith index contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
cost : float
"""
n_samples = X.shape[0]
# Step (1/3): Forward propagate
activations = self._forward_pass(activations)
# Step (2/3): Get cost
cost = LOSS_FUNCTIONS[self.loss](y, activations[-1])
# Add L2 regularization term to cost
values = np.sum(np.array([np.sum(s ** 2) for s in self.layers_coef_]))
cost += (0.5 * self.alpha) * values
# Add Sparsity Term (This is for autoencoders)
if self.sparsity_param > 0:
for i in range(self.n_layers_ - 2, 0, -1):
sparsity_param_hat = np.sum(activations[i], 0) / n_samples
cost += self.beta * np.sum(binary_KL_divergence(self.sparsity_param,
sparsity_param_hat))
# Step (3/3): Backward propagate
last = self.n_layers_ - 2
diff = y - activations[-1]
deltas[last] = -diff
# Compute gradient for the last layer
coef_grads, intercept_grads = self._compute_cost_grad(last, n_samples,
activations,
deltas,
coef_grads,
intercept_grads)
# Iterate over the hidden layers
for i in range(self.n_layers_ - 2, 0, -1):
deltas[i - 1] = safe_sparse_dot(deltas[i],
self.layers_coef_[i].T)
# For sparse autoencoders
if self.sparsity_param > 0:
sparsity_param_hat = np.sum(activations[i], 0) / n_samples
deltas[i - 1] += self.beta *((1 - self.sparsity_param) /
(1 - sparsity_param_hat)
- self.sparsity_param /
sparsity_param_hat)
derivative = DERIVATIVES[self.activation]
deltas[i - 1] *= derivative(activations[i])
coef_grads, \
intercept_grads = self._compute_cost_grad(i - 1,
n_samples,
activations,
deltas,
coef_grads,
intercept_grads)
#print 'cost2', coef_grads
return cost, coef_grads, intercept_grads
def _fit(self, X, y, incremental=False):
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
# Validate input parameters.
if np.any(np.array(hidden_layer_sizes) <= 0):
raise ValueError("hidden_layer_sizes must be > 0, got %s." %
hidden_layer_sizes)
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False, got %s." %
self.shuffle)
if self.max_iter <= 0:
raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0, got %s." % self.alpha)
if (self.learning_rate in ["constant", "invscaling"] and
self.learning_rate_init <= 0.0):
raise ValueError("learning_rate_init must be > 0, got %s." %
self.learning_rate)
# raise ValueError if not registered
if self.activation not in ACTIVATIONS:
raise ValueError("The activation %s is not supported. Supported "
"activations are %s." % (self.activation,
ACTIVATIONS))
if self.learning_rate not in ["constant", "invscaling"]:
raise ValueError("learning rate %s is not supported. " %
self.learning_rate)
if self.algorithm not in ["sgd", "l-bfgs"]:
raise ValueError("The algorithm %s is not supported. " %
self.algorithm)
#X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
# multi_output=True)
# This outputs a warning when a 1d array is expected
#if y.ndim == 2 and y.shape[1] == 1:
# y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
# Classification
if isinstance(self, ClassifierMixin):
self.label_binarizer_.fit(y)
if self.classes_ is None or not incremental:
self.classes_ = self.label_binarizer_.classes_
else:
classes = self.label_binarizer_.classes_
if not np.all(
|
np.in1d(classes, self.classes_)
|
numpy.in1d
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
import theano
import theano.tensor as tt
from theano.tests import unittest_tools as utt
from .keplerian import KeplerianOrbit
def test_sky_coords():
from batman import _rsky
t = np.linspace(-100, 100, 1000)
t0, period, a, e, omega, incl = (x.flatten() for x in np.meshgrid(
np.linspace(-5.0, 5.0, 2),
np.exp(np.linspace(np.log(5.0), np.log(50.0), 3)),
np.linspace(50.0, 100.0, 2),
np.linspace(0.0, 0.9, 5),
np.linspace(-np.pi, np.pi, 3),
np.arccos(np.linspace(0, 1, 5)[:-1]),
))
r_batman = np.empty((len(t), len(t0)))
for i in range(len(t0)):
r_batman[:, i] = _rsky._rsky(t, t0[i], period[i], a[i],
incl[i], e[i], omega[i], 1, 1)
m = r_batman < 100.0
assert m.sum() > 0
orbit = KeplerianOrbit(
period=period, a=a, t0=t0, ecc=e, omega=omega, incl=incl)
func = theano.function([], orbit.get_relative_position(t))
x, y, z = func()
r = np.sqrt(x**2 + y**2)
# Make sure that the in-transit impact parameter matches batman
utt.assert_allclose(r_batman[m], r[m], atol=2e-5)
# In-transit should correspond to negative z in our parameterization
assert np.all(z[m] > 0)
# Therefore, when batman doesn't see a transit we shouldn't be transiting
no_transit = z[~m] < 0
no_transit |= r[~m] > 2
assert np.all(no_transit)
def test_center_of_mass():
t = np.linspace(0, 100, 1000)
m_planet = np.array([0.5, 0.1])
m_star = 1.45
orbit = KeplerianOrbit(
m_star=m_star,
r_star=1.0,
t0=np.array([0.5, 17.4]),
period=np.array([100.0, 37.3]),
ecc=np.array([0.1, 0.8]),
omega=np.array([0.5, 1.3]),
Omega=np.array([0.0, 1.0]),
incl=np.array([0.25*np.pi, 0.3*np.pi]),
m_planet=m_planet,
)
planet_coords = theano.function([], orbit.get_planet_position(t))()
star_coords = theano.function([], orbit.get_star_position(t))()
com = np.sum((m_planet[None, :] * np.array(planet_coords) +
m_star * np.array(star_coords)) /
(m_star + m_planet)[None, :], axis=0)
assert np.allclose(com, 0.0)
def test_velocity():
t_tensor = tt.dvector()
t = np.linspace(0, 100, 1000)
m_planet = 0.1
m_star = 1.3
orbit = KeplerianOrbit(
m_star=m_star,
r_star=1.0,
t0=0.5,
period=100.0,
ecc=0.1,
omega=0.5,
Omega=1.0,
incl=0.25*np.pi,
m_planet=m_planet,
)
star_pos = orbit.get_star_position(t_tensor)
star_vel = theano.function([], orbit.get_star_velocity(t))()
star_vel_expect = np.empty_like(star_vel)
for i in range(3):
g = theano.grad(tt.sum(star_pos[i]), t_tensor)
star_vel_expect[i] = theano.function([t_tensor], g)(t)
utt.assert_allclose(star_vel, star_vel_expect)
planet_pos = orbit.get_planet_position(t_tensor)
planet_vel = theano.function([], orbit.get_planet_velocity(t))()
planet_vel_expect = np.empty_like(planet_vel)
for i in range(3):
g = theano.grad(tt.sum(planet_pos[i]), t_tensor)
planet_vel_expect[i] = theano.function([t_tensor], g)(t)
utt.assert_allclose(planet_vel, planet_vel_expect)
pos = orbit.get_relative_position(t_tensor)
vel = np.array(theano.function([], orbit.get_relative_velocity(t))())
vel_expect = np.empty_like(vel)
for i in range(3):
g = theano.grad(tt.sum(pos[i]), t_tensor)
vel_expect[i] = theano.function([t_tensor], g)(t)
utt.assert_allclose(vel, vel_expect)
def test_acceleration():
t_tensor = tt.dvector()
t = np.linspace(0, 100, 1000)
m_planet = 0.1
m_star = 1.3
orbit = KeplerianOrbit(
m_star=m_star,
r_star=1.0,
t0=0.5,
period=100.0,
ecc=0.1,
omega=0.5,
incl=0.25*np.pi,
m_planet=m_planet,
)
star_vel = orbit.get_star_velocity(t_tensor)
star_acc = theano.function([], orbit.get_star_acceleration(t))()
star_acc_expect = np.empty_like(star_acc)
for i in range(3):
g = theano.grad(tt.sum(star_vel[i]), t_tensor)
star_acc_expect[i] = theano.function([t_tensor], g)(t)
utt.assert_allclose(star_acc, star_acc_expect)
planet_vel = orbit.get_planet_velocity(t_tensor)
planet_acc = theano.function([], orbit.get_planet_acceleration(t))()
planet_acc_expect = np.empty_like(planet_acc)
for i in range(3):
g = theano.grad(tt.sum(planet_vel[i]), t_tensor)
planet_acc_expect[i] = theano.function([t_tensor], g)(t)
utt.assert_allclose(planet_acc, planet_acc_expect)
vel = orbit.get_relative_velocity(t_tensor)
acc = theano.function([], orbit.get_relative_acceleration(t))()
acc_expect = np.empty_like(acc)
for i in range(3):
g = theano.grad(tt.sum(vel[i]), t_tensor)
acc_expect[i] = theano.function([t_tensor], g)(t)
utt.assert_allclose(acc, acc_expect)
def test_in_transit():
t =
|
np.linspace(-20, 20, 1000)
|
numpy.linspace
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import numpy as np
import onnx
from uuid import uuid4
from typing import Union
from onnx import numpy_helper, helper
from onnx import onnx_pb as onnx_proto
from ._opt_const_folding import const_folding_optimizer, reserve_node_for_embedded_graph, OnnxGraphContext
class LinkedNode(object):
reserved_names_in_graph = frozenset()
def __init__(self, node=None, in_n=None, out_n=None, tensors_n=None, target_opset=None):
self.origin = node # type: onnx_proto.NodeProto
if in_n is None and node is not None:
in_n = node.input
if out_n is None and node is not None:
out_n = node.output
self.input = {} if in_n is None else {i_: i_ for i_ in in_n}
self.output = {} if out_n is None else {o_: o_ for o_ in out_n}
self.tensors = [] if tensors_n is None else tensors_n
self.initializers = []
self.precedence = []
self.successor = []
self.attributes = {}
self.unique_name = self.origin.name if self.origin and self.origin.name else str(uuid4().hex)
self.target_opset = target_opset
def __repr__(self):
return "name: {}, node: <{}>".format(self.unique_name, str(self.origin) if self.origin else 'None')
@property
def op_type(self):
return None if self.origin is None else self.origin.op_type
@property
def name(self):
return self.unique_name
@property
def is_identity(self):
return False if self.origin is None else self.origin.op_type == 'Identity'
@property
def is_transpose(self):
return False if self.origin is None else self.origin.op_type == 'Transpose'
@property
def in_single_path(self):
"""
Test if a node is not linking to any fan in or out node.
"""
return (len(self.successor) == 1 and not self.successor[0].in_or_out and
len(self.precedence) == 1)
@property
def in_single_path_to_output(self):
return len(self.successor) == 1 and self.successor[0].in_or_out and \
len(self.precedence) == 1 and not self.precedence[0].in_or_out
@property
def element_wise(self):
return False if self.origin is None else \
self.origin.op_type in ['Relu', 'LeakyRelu', 'PRelu', 'Tanh'] + \
['Abs', 'Acos', 'Acosh', 'Log', 'Affine', 'Elu'] + \
['Sigmoid', 'ScaledTanh', 'HardSigmoid', 'Softsign', 'Softplus', 'Identity', 'Neg', 'Clip']
@property
def broadcast(self):
return False if self.origin is None else \
self.origin.op_type in ['Add', 'And', 'Div', 'Equal', 'Max', 'Mean', 'Min', 'Mul', 'Sub', 'Sum']
@property
def in_single_path_and_inner(self):
"""
Test if a node is not linking to any fan in or out node.
"""
return (len(self.successor) == 1 and self.successor[0] is not None and not self.successor[0].in_or_out and
len(self.precedence) == 1 and self.precedence[0] is not None and not self.precedence[0].in_or_out)
@property
def in_simo_and_inner(self):
"""
Test if a node is simo: single input and multiple output
"""
return (len(self.successor) > 1 and self.successor[0] is not None and not self.successor[0].in_or_out and
len(self.precedence) == 1 and self.precedence[0] is not None and not self.precedence[0].in_or_out)
@property
def in_miso_and_inner(self):
"""
Test if a node is miso: multiple input and single output
"""
return (len(self.successor) == 1 and self.successor[0] is not None and not self.successor[0].in_or_out and
len(self.precedence) > 1 and self.get_precedence_by_idx(
0) is not None and not self.get_precedence_by_idx(0).in_or_out)
@property
def in_mi_and_inner(self):
"""
Test if a node is mi: multiple input
"""
if len(self.precedence) < 1:
return False
for pre_ in self.precedence:
if len(pre_.successor) > 1:
return False
return (len(self.successor) >= 1 and
len(self.precedence) > 1 and self.get_precedence_by_idx(0) is not None and not self.successor[
0].in_or_out)
@property
def is_eligible_concat_and_inner(self):
"""
Test if a node is eligible_concat_and_inner: multiple input
"""
if self.origin.op_type != 'Concat':
return (False, None)
perm = None
for pre_ in self.precedence:
if len(pre_.successor) > 1:
return (False, None)
if not hasattr(pre_.origin, 'op_type') or pre_.origin.op_type != 'Transpose':
return (False, None)
cur_perm = Solution.get_perm(pre_.origin)
if perm and cur_perm != perm:
return (False, None)
perm = cur_perm
for suc_ in self.successor:
if suc_.in_or_out:
return (False, None)
axis = next(helper.get_attribute_value(attr) for attr in self.origin.attribute if attr.name == 'axis')
if len(perm) <= axis:
if perm == [] and axis == 0:
return (True, -1)
else:
return (False, None)
return (True, perm[axis])
@property
def is_transpose_switchable(self):
return self.element_wise or self.broadcast
@property
def is_transpose_switchable_single_path(self):
return self.in_single_path_and_inner and self.is_transpose_switchable
@property
def is_transpose_switchable_simo(self):
return self.in_simo_and_inner and self.is_transpose_switchable
@property
def is_transpose_switchable_miso(self):
return self.in_miso_and_inner and self.is_transpose_switchable
@property
def is_transpose_switchable_mi(self):
return self.in_mi_and_inner and self.is_transpose_switchable
@property
def in_or_out(self):
return self.origin is None
@property
def single_input(self):
assert self.origin is not None and len(self.input) == 1
return next(value for (key, value) in self.input.items())
@property
def single_origin_input(self):
assert self.origin is not None and len(self.input) == 1
return self.origin.input[0]
@property
def single_output(self):
assert self.origin is not None and len(self.output) == 1
return next(value for (key, value) in self.output.items())
@property
def single_origin_output(self):
assert self.origin is not None and len(self.output) == 1
return self.origin.output[0]
@property
def is_reserved(self):
if self.origin is None:
return False
for node_output_ in self.origin.output:
if node_output_ in LinkedNode.reserved_names_in_graph:
return True
return False
def in_redirect(self, old_name, name):
if old_name in self.input:
self.input[old_name] = name
else:
key = next(k for k, v in self.input.items() if v == old_name)
self.input[key] = name
def out_redirect(self, old_name, name):
if old_name in self.output:
self.output[old_name] = name
else:
key = next(k for k, v in self.output.items() if v == old_name)
self.output[key] = name
def get_input_by_idx(self, idx=0):
if self.origin is None:
assert idx == 0
return list(self.input.values())[0]
onode_input_name = self.origin.input[idx]
return self.input[onode_input_name]
def get_output_by_idx(self, idx=0):
if self.origin is None:
assert idx == 0
return list(self.output.values())[0]
onode_output_name = self.origin.output[idx]
return self.output[onode_output_name]
def get_precedence_by_idx(self, idx=0):
input_tensor_name = self.get_input_by_idx(idx)
for pred in self.precedence:
if input_tensor_name in pred.output.values():
return pred
return None
def get_precedence_tensor_by_idx(self, idx=0):
input_tensor_name = self.get_input_by_idx(idx)
for initializer_ in self.initializers:
if input_tensor_name == initializer_.name:
return initializer_
for pred in self.precedence:
if input_tensor_name in pred.output.values():
return pred.tensors[0]
return None
def get_attribute(self, attr_name, default_value=None):
if attr_name in self.attributes:
return self.attributes[attr_name]
found = [attr for attr in self.origin.attribute if attr.name == attr_name]
if found:
return helper.get_attribute_value(found[0])
return default_value
def generate(self):
updated = False
if self.attributes:
updated = True
elif len([k for k, v in self.input.items() if k != v]) > 0:
updated = True
elif len([k for k, v in self.output.items() if k != v]) > 0:
updated = True
if not updated:
return [self.origin]
else:
onode = onnx_proto.NodeProto()
onode.name = self.origin.name
onode.op_type = self.origin.op_type
onode.input.extend([self.input.get(i_, i_) for i_ in self.origin.input])
for input_ in self.initializers:
if input_.name not in onode.input:
onode.input.append(input_.name)
onode.output.extend([self.output.get(o_, o_) for o_ in self.origin.output])
onode.doc_string = self.origin.doc_string
onode.domain = self.origin.domain
onode.attribute.extend(
attr for attr in self.origin.attribute if attr.name not in self.attributes)
onode.attribute.extend(
helper.make_attribute(attr, self.attributes[attr]) for attr in self.attributes)
return [onode]
def add_precedence(self, pre, tname):
self.precedence.append(pre)
pre.successor.append(self)
assert tname in self.input.values() and tname in pre.output.values()
@staticmethod
def build_from_onnx(onnx_nodes, nchw_inputs, inputs, outputs, initializers=None, target_opset=None):
view = []
var_map = {}
for o_ in onnx_nodes:
ln = LinkedNode(o_, target_opset=target_opset)
view.append(ln)
for var_ in o_.output:
assert var_map.get(var_) is None
var_map[var_] = ln
additional_nodes = []
count_nchw = 0
initializer_map = None
if initializers is not None:
initializer_map = {k.name: k for k in initializers}
for n_ in view:
for var_ in n_.origin.input:
target = var_map.get(var_)
if target is None:
assert var_ == '' or var_ in inputs
if initializer_map is not None and var_ in initializer_map:
target = LinkedNode(out_n=[var_],
tensors_n=[initializer_map[var_]],
target_opset=target_opset) # create an empty node as input
else:
target = LinkedNode(out_n=[var_], target_opset=target_opset)
new_output = var_ + '_nhwc'
if var_ in nchw_inputs:
nnode = LinkedNode(
helper.make_node(
'Transpose',
[var_],
[new_output],
name='Transpose_nchw_' + str(count_nchw),
perm=[0, 2, 3, 1]),
target_opset=target_opset)
count_nchw = count_nchw + 1
var_map[new_output] = nnode
nnode.add_precedence(target, var_)
n_.in_redirect(var_, new_output)
target = nnode
var_ = new_output
additional_nodes.append(nnode)
n_.add_precedence(target, var_)
for n_ in view: # add a dummy output node.
for var_ in n_.origin.output:
if var_ in outputs:
LinkedNode(in_n=[var_], target_opset=target_opset).add_precedence(n_, var_)
return view + additional_nodes
@staticmethod
def debug_print(node_list):
for n_ in node_list:
input_list = []
output_list = []
for pred in n_.precedence:
if pred.origin is not None and pred.origin.name is not None:
input_list.append(pred.origin.name)
else:
input_list.append("None")
for succ in n_.successor:
if succ.origin is not None and succ.origin.name is not None:
output_list.append(succ.origin.name)
else:
output_list.append("None")
input_list_str = ""
if input_list is not None and input_list:
input_list_str = ", ".join(input_list)
output_list_str = ""
if output_list is not None and output_list:
output_list_str = ", ".join(output_list)
print(
"Node origin name: " + n_.origin.name +
", Input id: " + input_list_str + ", Output id: " + output_list_str)
class Solution(object):
"""
Solution is the base class for solutions, and it has a basic function is to
delete the node range of (begin, begin_n, end_p, end), where 'begin' and 'end' are excluded.
"""
def __init__(self, begin, begin_n, end_p, end):
self.begin = begin
self.begin_n = begin_n
self.end_p = end_p
self.end = end
@staticmethod
def get_perm(onode):
onode = onode.origin if isinstance(onode, LinkedNode) else onode
try:
return next(
helper.get_attribute_value(attr) for attr in onode.attribute if attr.name == 'perm')
except StopIteration:
return []
@staticmethod
def is_useless_transpose(perm):
return perm == list(range(len(perm)))
@staticmethod
def delete_node_nto1(node_list, begin, node, end): # type: ([],LinkedNode, LinkedNode, LinkedNode)->[]
"""
delete the node which has n-input and 1-output
"""
if begin is None:
assert node is not None
begin = node.precedence
elif not isinstance(begin, list):
begin = [begin]
target_var_name = None
if end.in_or_out:
# if the end is output node, the output name will be kept to avoid the model output name updating.
for nb_ in begin:
nb_.out_redirect(node.single_input, node.single_output)
else:
target_var_name = node.get_input_by_idx(0)
for nb_ in begin:
# since the output info never be updated, except the final.
assert target_var_name in nb_.output.values()
end.in_redirect(node.single_output, target_var_name)
for nb_ in begin:
nb_.successor = [end if v_ == node else v_ for v_ in nb_.successor]
end.precedence = [v_ for v_ in end.precedence if v_ != node] + [node.get_precedence_by_idx(0)]
node_list.remove(node)
return node_list
@staticmethod
def delete_node_1ton(node_list, begin, node, end): # type: ([],LinkedNode, LinkedNode, LinkedNode)->[]
"""
delete the node which has 1-input and n-output
"""
if end is None:
end = node.successor
elif not isinstance(end, list):
end = [end]
if any(e_.in_or_out for e_ in end):
# if the end is output node, the output name will be kept to avoid the model output name updating.
begin.out_redirect(node.single_input, node.single_output)
else:
for ne_ in end:
target_var_name = node.single_input
# since the output info never be updated, except the final.
assert target_var_name in begin.output.values()
ne_.in_redirect(node.single_output, target_var_name)
begin.successor = [v_ for v_ in begin.successor if v_ != node] + node.successor
for ne_ in end:
ne_.precedence = [begin if v_ == node else v_ for v_ in ne_.precedence]
node_list.remove(node)
return node_list
@staticmethod
def add_siso_node(node_list, begin, end, begin_output_name, node):
# type: ([], LinkedNode, LinkedNode, str, LinkedNode)->[]
node.in_redirect(node.get_input_by_idx(0), begin_output_name)
end.in_redirect(begin_output_name, node.single_output)
begin.successor[begin.successor.index(end)] = node
end.precedence[end.precedence.index(begin)] = node
node.precedence.append(begin)
node.successor.append(end)
node_list.append(node)
return node_list
def apply(self, node_list):
node = self.begin_n # type: LinkedNode
if node.is_reserved:
return None, False
if len(node.successor) > 1:
node_list = self.delete_node_1ton(node_list, self.begin, node, self.end)
else:
node = self.begin_n
while node != self.end:
assert len(node.successor) == 1
end = node.successor[0]
if node.is_reserved:
return None, False
node = self.end if self.end is None else end
node = self.begin_n
while node != self.end:
end = node.successor[0]
node_list = self.delete_node_nto1(node_list, self.begin, node, end)
node = self.end if self.end is None else end
return node_list, True
# Match two perms where the merge is identity, this is order sensitive.
def match_perm(perm0, perm1):
if len(perm0) != len(perm1):
return False
if perm0 == [] and perm1 == []:
return True
perm_f = [perm0[idx] for idx in perm1]
return Solution.is_useless_transpose(perm_f)
class MergeSolution(Solution):
def apply(self, node_list):
if self.begin_n.is_reserved or self.end_p.is_reserved:
return None, False
perm0 = self.get_perm(self.begin_n.origin)
perm1 = self.get_perm(self.end_p.origin)
assert len(perm0) == len(perm1)
perm_f = [perm0[idx] for idx in perm1]
if self.is_useless_transpose(perm_f):
node = self.begin # type: LinkedNode
while node != self.end and len(node.successor) >= 1:
node = node.successor[0]
node_list = self.delete_node_1ton(node_list, self.begin, self.begin_n, self.begin_n.successor[0])
node_list = self.delete_node_1ton(node_list, self.end_p.get_precedence_by_idx(0), self.end_p, self.end)
else:
node_list = self.delete_node_1ton(node_list, self.end_p.get_precedence_by_idx(0), self.end_p, self.end)
self.begin_n.origin = helper.make_node('Transpose', self.begin_n.origin.input, self.begin_n.origin.output,
self.begin_n.origin.name, perm=perm_f)
return node_list, True
class MoveForwardSolution(Solution):
def apply(self, node_list):
self.begin_n.successor[0].in_redirect(self.begin_n.single_output, self.begin.get_output_by_idx(0))
self.begin_n.in_redirect(self.begin.get_output_by_idx(0), self.end_p.single_output)
self.end.in_redirect(self.end_p.single_output, self.begin_n.single_output)
self.begin_n.successor[0].precedence[0] = self.begin
self.begin.successor[0] = self.begin_n.successor[0]
self.begin_n.precedence[0] = self.end_p
self.end_p.successor[0] = self.begin_n
pre_len = len(self.end.precedence)
for i_ in range(pre_len):
if self.end.precedence[i_].origin and self.end.precedence[i_].origin.name == self.end_p.origin.name:
self.end.precedence[i_] = self.begin_n
break
self.begin_n.successor[0] = self.end
return node_list, True
class FanOutSolution(Solution):
number = 0
def apply(self, node_list):
if self.begin_n.is_reserved:
return None, False
cur_perm = Solution.get_perm(self.begin_n.origin)
# make a copy of self.end_p.successor
successor_list = list(self.end_p.successor)
for suc in successor_list:
if cur_perm == []:
nnode = LinkedNode(
helper.make_node(
'Transpose',
['fan_out_adjustment_in' + str(FanOutSolution.number)],
['fan_out_adjustment_out' + str(FanOutSolution.number)],
name='TransposeFanOut' + str(FanOutSolution.number)))
else:
nnode = LinkedNode(
helper.make_node(
'Transpose',
['fan_out_adjustment_in' + str(FanOutSolution.number)],
['fan_out_adjustment_out' + str(FanOutSolution.number)],
perm=cur_perm,
name='TransposeFanOut' + str(FanOutSolution.number)))
FanOutSolution.number = FanOutSolution.number + 1
node_list = Solution.add_siso_node(node_list, self.end_p, suc, list(self.end_p.output.values())[0], nnode)
node_list = Solution.delete_node_1ton(node_list, self.begin, self.begin_n, self.end)
return node_list, True
class TransposeFanOutSolution(Solution):
def apply(self, node_list):
if self.begin_n.is_reserved:
return None, False
successor_list = list(self.begin_n.successor)
for suc_ in successor_list:
node_list = Solution.delete_node_1ton(node_list, self.begin_n, suc_, suc_.successor[0])
node_list = Solution.delete_node_1ton(node_list, self.begin, self.begin_n, self.begin_n.successor)
return node_list, True
class FanInSolution(Solution):
number = 0
def __init__(self, begin, begin_n, end_p, end, perm):
Solution.__init__(self, begin, begin_n, end_p, end)
self.perm = perm
def apply(self, node_list):
# make a copy of self.begin.precedence
precedence_list = list(self.begin.precedence)
for branch in precedence_list:
if branch.is_reserved:
return None, False
# make a copy of self.end_p.successor
successor_list = list(self.begin.successor)
output_name = ''
for suc in successor_list:
if suc.origin is None:
output_name = list(self.begin.output.values())[0]
fan_in_node_output_name = 'fan_in_adjustment_out' + str(FanInSolution.number)
self.begin.out_redirect(output_name, fan_in_node_output_name)
FanInSolution.number = FanInSolution.number + 1
for suc_2 in successor_list:
suc_2.in_redirect(output_name, fan_in_node_output_name)
for suc in successor_list:
if suc.origin is None:
transpose_output_name = [output_name]
else:
transpose_output_name = ['fan_in_adjustment_out' + str(FanInSolution.number)]
if self.perm == []:
nnode = LinkedNode(
helper.make_node(
'Transpose',
['fan_in_adjustment_in' + str(FanInSolution.number)],
transpose_output_name,
name='TransposeFanIn_succ_' + str(FanInSolution.number)))
else:
nnode = LinkedNode(
helper.make_node(
'Transpose',
['fan_in_adjustment_in' + str(FanInSolution.number)],
transpose_output_name,
perm=self.perm,
name='TransposeFanIn_succ_' + str(FanInSolution.number)))
FanInSolution.number = FanInSolution.number + 1
node_list = Solution.add_siso_node(node_list, self.begin, suc, list(self.begin.output.values())[0], nnode)
for branch in precedence_list:
node_list = Solution.delete_node_1ton(node_list, branch.get_precedence_by_idx(0), branch, self.begin)
return node_list, True
def _get_pad_from_Pad(node):
if len(node.origin.input) == 1:
pads = node.get_attribute('pads')
else:
pad_tensor = node.get_precedence_by_idx(1)
if pad_tensor is None:
pads = numpy_helper.to_array(node.initializers[0]).tolist()
else:
pads = numpy_helper.to_array(node.get_precedence_by_idx(1).tensors[0]).tolist()
return pads
def _get_axes_from_Squeeze_Unsqueeze(node):
axes = node.get_attribute('axes')
if axes is None:
if len(node.origin.input) == 2:
axes_tensor = node.get_precedence_by_idx(1)
if axes_tensor is None:
axes = numpy_helper.to_array(node.initializers[0]).tolist()
else:
axes = numpy_helper.to_array(node.get_precedence_by_idx(1).tensors[0]).tolist()
return axes
class MergePadConvSolution(Solution):
def __init__(self, begin, begin_n, end_p, end):
Solution.__init__(self, begin, begin_n, end_p, end)
def apply(self, node_list):
if self.begin_n.is_reserved:
return None, False
auto_pad_value = self.end_p.get_attribute('mode', 'constant')
if auto_pad_value == b'SAME_UPPER' or auto_pad_value == b'SAME_LOWER':
return None, False
pads = _get_pad_from_Pad(self.begin_n)
half_len_pads = len(pads) // 2
pads_new_list = pads[2:half_len_pads]
pads_new_list.extend(pads[half_len_pads + 2:])
pads_new = np.asarray(pads_new_list, dtype=np.int64)
self.end_p.attributes['auto_pad'] = 'NOTSET'
pads = self.end_p.get_attribute('pads')
if pads:
conv_pads = np.asarray(pads, dtype=np.int64)
pads_new_list = list(pads_new + conv_pads)
self.end_p.attributes['pads'] = pads_new_list
node_list = Solution.delete_node_nto1(node_list, self.begin, self.begin_n, self.end_p)
return node_list, True
class MergePadTransposeConvSolution(Solution):
def __init__(self, begin, begin_n, end_p, end):
Solution.__init__(self, begin, begin_n, end_p, end)
def apply(self, node_list):
if self.begin_n.is_reserved:
return None, False
auto_pad_value = self.end_p.get_attribute('mode', 'constant')
if auto_pad_value == b'SAME_UPPER' or auto_pad_value == b'SAME_LOWER':
return None, False
pads = _get_pad_from_Pad(self.begin_n)
perm = Solution.get_perm(self.end_p.origin)
half_len_pads = len(pads) // 2
pads_1 = pads[0:half_len_pads]
pads_2 = pads[half_len_pads:]
pads_1_transpose = [pads_1[idx] for idx in perm]
pads_2_transpose = [pads_2[idx] for idx in perm]
pads = pads_1_transpose + pads_2_transpose
pads_new_list = pads[2:half_len_pads]
pads_new_list.extend(pads[half_len_pads + 2:])
pads_new = np.asarray(pads_new_list, dtype=np.int64)
self.end.attributes['auto_pad'] = 'NOTSET'
pads = self.end.get_attribute('pads')
if pads:
conv_pads = np.asarray(pads, dtype=np.int64)
pads_new_list = list(pads_new + conv_pads)
self.end.attributes['pads'] = pads_new_list
node_list = Solution.delete_node_nto1(node_list, self.begin, self.begin_n, self.end_p)
return node_list, True
class NextToOutputSolution(Solution):
def apply(self, node_list):
if self.begin_n.is_reserved:
return None, False
for idx_, succ_ in enumerate(self.begin.successor):
if succ_ == self.begin_n:
self.begin.successor[idx_] = self.begin_n.successor[0]
else:
succ_.in_redirect(self.begin.single_output, self.begin_n.single_output)
find_begin_output = False
for k, v in self.begin.output.items():
if v == self.begin_n.single_input:
self.begin.output[k] = self.begin_n.single_output
find_begin_output = True
break
if not find_begin_output:
raise Exception(
"begin output is not found for NextToOutputSolution for tensor " + self.begin_n.single_output)
node_list.remove(self.begin_n)
return node_list, True
class ConvBatchNormSolution(Solution):
def __init__(self, begin, begin_n, end_p, end):
Solution.__init__(self, begin, begin_n, end_p, end)
def apply(self, node_list):
if self.end_p.is_reserved:
return None, False
conv_ori_weight = numpy_helper.to_array(self.begin_n.get_precedence_by_idx(1).tensors[0])
conv_ori_bias = 0
if len(self.begin_n.precedence) > 2:
conv_ori_bias = numpy_helper.to_array(self.begin_n.get_precedence_by_idx(2).tensors[0])
scale = numpy_helper.to_array(self.end_p.get_precedence_by_idx(1).tensors[0])
B = numpy_helper.to_array(self.end_p.get_precedence_by_idx(2).tensors[0])
mean = numpy_helper.to_array(self.end_p.get_precedence_by_idx(3).tensors[0])
var = numpy_helper.to_array(self.end_p.get_precedence_by_idx(4).tensors[0])
epsilon = self.end_p.get_attribute('epsilon', 1.0e-5)
adjusted_scale = scale /
|
np.sqrt(var + epsilon)
|
numpy.sqrt
|
"""RAVE: using predicitve inforamtion bottleneck framework to learn RCs
to enhance the sampling of MD simulation. Code maintained by Yihang.
Read and cite the following when using this method:
https://aip.scitation.org/doi/abs/10.1063/1.5025487
https://www.nature.com/articles/s41467-019-11405-4
https://arxiv.org/abs/2002.06099
"""
import numpy as np
import COLVAR2npy
import Analyze_prave
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Dense, Lambda
from keras.initializers import RandomUniform, Constant
from keras.optimizers import RMSprop
from keras.constraints import unit_norm
from keras import regularizers
from keras.callbacks import Callback
from keras.losses import mean_squared_error
########################
### Global Functions ###
def data_prep(system_name, number_trajs, predictive_step):
""" Read the input trajectory files.
Prepare x, x_t trajectory and corresponding reweighting factors
Parameters
----------
system_name : string
Name of the sytem.
number_trajs : int
Number of trajectories.
predictive_step : int
Predictive time delay.
Returns
-------
X : np.array
present trajectory.
Y : np.array
future trajectory.
W1 : np.array
reweighting factores in objective function before P(X_t | \chi )
W2 : np.array
reweighting factores in objective function before P(X | \chi )
"""
for j in range(number_trajs):
traj_file_name = 'input/x_'+system_name+'_%i.npy'%j #present trajecotry of the shape n*d, where n is the MD steps and d is the number of order parameters
w_file_name = 'input/w_'+system_name+'_%i.npy'%j #weights correspond to trajecotry in x. Calculated by exp(beta*V)
if predictive_step==0:
x = np.load(traj_file_name)
y = x[:,:]
w1 = np.load(w_file_name)
w2 = np.zeros( np.shape(w1) )
else:
x = np.load(traj_file_name)
y = x[predictive_step: , :]
x = x[:-predictive_step, :]
w = np.load(w_file_name)
w_x = w[:-predictive_step]
w_y = w[predictive_step:]
w1 = ( w_x * w_y )**0.5
w2 = w_x**0.5*( w_x**0.5- w_y**0.5)
try:
X = np.append(X, x, axis = 0)
Y = np.append(Y, y, axis = 0)
W1 = np.append(W1, w1, axis = 0)
W2 = np.append(W2, w2, axis = 0)
except:
X = x
Y = y
W1 = w1
W2 = w2
normaliztion_factor = np.sum(W1)/len(W1)
W1 /= normaliztion_factor
W2 /= normaliztion_factor
print('length of data:%i'%np.shape(X)[0] )
print('number of order parameters:%i'%np.shape(X)[1] )
print('min reweighting factor:%f'%np.min(W1))
print('max reweighting factor:%f'%np.max(W1))
return X, Y, W1, W2
def random_pick(x, x_dt, w1, w2, training_len):
""" ramdomly pick (x, x_dt) pair from data set
Parameters
----------
x : np.array
present trajectory.
x_dt : np.array
future trajectory.
w1 : np.array
reweighting factores in objective function before P(X_t | \chi )
w2 : np.array
reweighting factores in objective function before P(X | \chi )
training_len: int
length of the return data set
Returns
-------
x1 : np.array
ramdonly selected data pionts from present trajectory.
x2 : np.array
future trajectory corresponds to selected data points in x1.
w1 : np.array
coressponding reweighting factores in objective function before P(X_t | \chi )
w1 : np.array
coressponding reweighting factores in objective function before P(X | \chi )
"""
indices = np.arange( np.shape(x)[0])
np.random.shuffle(indices)
indices = indices[:training_len]
x = x[indices, :]
x_dt = x_dt[indices, :]
w1 = w1[indices]
w2 = w2[indices]
print('%i data points are used in this training'%len(indices))
return x, x_dt, w1, w2
def scaling(x):
""" make order parametes with mean 0 and variance 1
return new order parameter and scaling factors
Parameters
----------
x : np.array
order parameters
Returns
----------
x : np.array
order parameters after rescaling
std_x : np.array
resclaing factors of each OPs
"""
x = x-np.mean(x, axis =0)
std_x = np.std(x, axis =0)
return x/std_x, std_x
def sampling(args):
"""Sample the latent variable
from a Normal distribution."""
s_mean= args
epsilon = K.random_normal(shape=(batch_size,rc_dim), mean=0.0, stddev=s_vari )
s_noise = s_mean + epsilon
return s_noise
def dynamic_correction_loss(x, w1, w2):
"""loss function with dynamic correction"""
def custom_loss(y_true, y_pred ):
ce1 = mean_squared_error(y_true, y_pred )
ce2 = mean_squared_error(x, y_pred)
return (w1[:,0]*ce1+w2[:,0]*ce2)
return custom_loss
class WeightsHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.losses_vali = []
self.weights0 = []
def on_epoch_end(self, epoch, logs={}):
self.losses.append(logs.get('loss'))
self.losses_vali.append(logs.get('val_loss'))
self.weights0.append( prave.layers[1].get_weights())
#########################
if __name__ == '__main__':
### Global Variables ###
#system info
system_name = '6e1u_1'
n_trajs = 4 #number of trajectories
save_path = 'output/' #pth to the directory that saves output files
T = 300 #Temperature in unit of Kelvin
bias = True #When false reweigting factors are set to 1.
#When true, reweigting factors are calculated and save
###predictive time delay ###
time_delay= list(range(0, 100, 10)) #predictive time delay
#network variables
training_size = 10240000 # if training_size = n, only n data points will be randomly piked from the whole data set and used to do the training
batch_size = 2048 #total number of training data point n should be a multiple of batch_size
op_dim = 3 #dimensionality of order parameters
rc_dim = 2 #dimensionality of reaction coordinates
int_dim = 128 #number of cells in each layer
s_vari = 0.005
learning_rate = 0.0002
decay = 0.0
trials = range(4)
epochs = 20 #Number of epochs to train the model
random_uniform = RandomUniform(minval=-0.05, maxval=0.05)
set_constant = Constant(value = 0.5**0.5)
if_whiten = True
#convert COLVAR file to npy file
for traj_index in range(n_trajs):
COLVAR2npy.COLVAR2npy( system_name+'_%i'%traj_index, T, op_dim, 'input/', bias )
### set predictive time delay ###
if not bias:
system_name = 'unbiased_' + system_name
########################
for dt in time_delay:
########################
### load the dataset ###
(x, y, w1, w2) = data_prep( system_name, n_trajs, dt )
if if_whiten:
x, scaling_factors = scaling(x)
y -= np.mean( y, axis =0)
y /= scaling_factors
else:
scaling_factors = np.ones( op_dim )
############################
### run different trials ###
for trial in trials:
Result = []
############################################
### Variational Autoencoder architecture ###
input_Data = Input(batch_shape=(batch_size, op_dim))
input_w1 = Input(shape=(1,))
input_w2 = Input(shape=(1,))
linear_encoder = Dense( rc_dim, activation=None, use_bias=None, kernel_regularizer=regularizers.l1(0.0), kernel_initializer='random_uniform', kernel_constraint = unit_norm(axis=0))(input_Data)
s = Lambda(sampling)(linear_encoder)
hidden_a = Dense(int_dim, activation='elu', kernel_initializer='random_uniform')(s)
hidden_b = Dense(int_dim, activation='elu', kernel_initializer='random_uniform')(hidden_a)
y_reconstruction = Dense( op_dim, activation=None, kernel_initializer='random_uniform')(hidden_b)
#########################################
### Randomly pick samples from dataset ###
#data for training
train_x, train_y, train_w1, train_w2 = random_pick(x, y, w1, w2,training_size)
#data for validation
vali_x, vali_y, vali_w1, vali_w2 = random_pick(x , y, w1, w2, training_size)
#############################################
### Prepare the PRAVE and train the PRVAE ###
prave = Model([input_Data, input_w1 , input_w2] ,y_reconstruction)
rmsprop = RMSprop(lr=learning_rate, decay = decay)
prave.compile(optimizer=rmsprop,loss=dynamic_correction_loss(input_Data, input_w1, input_w2))
history = WeightsHistory()
History = prave.fit( [train_x,train_w1,train_w2], train_y,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=([vali_x,vali_w1,vali_w2], vali_y),
callbacks = [history ] )
####################
### Save results ###
Loss = np.array( history.losses )
Val_Loss = np.array( history.losses_vali )
Weights0=np.array( history.weights0 )[:,0,:,:]
#w_norm = np.linalg.norm(Weights0, axis=1)
for op_index in range( op_dim ):
Weights0[:,op_index,:]/=scaling_factors[op_index] #recale back to rc weights of non-whitenting ops
for rc_index in range( rc_dim ):
Weights0[:, :, rc_index]= np.transpose( np.transpose( Weights0[:, :, rc_index] )/np.linalg.norm(Weights0[:, :, rc_index], axis=1)) #normalize the rc weights
Loss = np.expand_dims(Loss, axis=-1)
Val_Loss = np.expand_dims(Val_Loss, axis=-1)
result_loss = np.concatenate((Loss, Val_Loss) , axis =-1)
result_weights = Weights0
K.clear_session()
print('!!!!')
print(np.shape(result_weights))
network_info = '_int_dim'+str(int_dim)+'_lr'+str(learning_rate)+'_decay'+str(decay)+'_batch_size'+str(batch_size)
save_info = system_name+'_dt'+str(dt)+'_trail'+str(trial)+'_svar'+str(s_vari)+'_train_size'+str(training_size)+network_info
np.save(save_path+'Loss_'+save_info, result_loss)
|
np.save(save_path+'Weights_'+save_info, result_weights)
|
numpy.save
|
#!/usr/bin/env python
"""
alpso - Python Version of the Augmented Lagrangian Particle Swarm Optimizer
alpso if a global optimizer which solves problems of the form:
min F(x)
subject to: Gi(x) = 0, i = 1(1)ME
Gj(x) <= 0, j = ME+1(1)M
xLB <= x <= xUB
To Do:
- Migrate Inner Loop Printing Option
- Add Other Inertia and Velocity Updates to Inner Loop
- Fix Neighbourhood best from Lagrangian value
"""
# =============================================================================
# Standard Python modules
# =============================================================================
import os
import random
import time
from math import floor
# =============================================================================
# External Python modules
# =============================================================================
import numpy as np
from ..pyOpt_error import pyOptSparseWarning
# =============================================================================
# Extension modules
# =============================================================================
# =============================================================================
# Misc Definitions
# =============================================================================
inf = 10.0e20 # define a value for infinity
# =============================================================================
eps = 1.0 # define a value for machine precision
while (eps / 2.0 + 1.0) > 1.0:
eps /= 2.0
eps *= 2.0
# eps = math.ldexp(1,-52)
# ==============================================================================
# alpso function
# ==============================================================================
# fmt: off
def alpso(dimensions, constraints, neqcons, xtype, x0, xmin, xmax, swarmsize, nhn,
nhm, maxOutIter, maxInnIter, minInnIter, stopCriteria, stopIters, etol,
itol, rtol, atol, dtol, prtOutIter, prtInnIter, r0, vinit, vmax, c1, c2, w1, w2,
ns, nf, vcrazy, fileout, filename, logfile, hstfile, rseed, scale, nhs, objfunc):
# fmt: on # noqa: E115
"""
Python Version of the Augmented Lagrangian Particle Swarm Optimizer
Documentation last updated: April. 29, 2008 - <NAME>
"""
#
if x0 != []:
if isinstance(x0, list):
x0 = np.array(x0)
elif not isinstance(x0, np.ndarray):
pyOptSparseWarning(
("Initial x must be either list or numpy.array, " "all initial positions randomly generated")
)
#
if hstfile is not None:
h_start = True
else:
h_start = False
if logfile is not None:
sto_hst = True
else:
sto_hst = False
# Set random number seed
rand = random.Random()
if rseed == {}:
rseed = time.time()
rand.seed(rseed)
#
if filename == "":
filename = "ALPSO.out"
ofname = ""
sfname = ""
fntmp = filename.split(".")
if len(fntmp) == 1:
ofname += fntmp[0] + "_print.out"
sfname += fntmp[0] + "_summary.out"
else:
if "/" not in fntmp[-1] and "\\" not in fntmp[-1]:
ofname += filename[: filename.rfind(".")] + "_print." + fntmp[-1]
sfname += filename[: filename.rfind(".")] + "_summary." + fntmp[-1]
else:
ofname += filename + "_print.out"
sfname += filename + "_summary.out"
header = ""
header += " " * 37 + "======================\n"
header += " " * 39 + " ALPSO 1.1 (Serial)\n"
header += " " * 37 + "======================\n\n"
header += "Parameters:\n"
header += "-" * 97 + "\n"
if maxInnIter != minInnIter:
diI = 1
else:
diI = 0
if x0 != []:
if len(x0.shape) == 1:
nxi = 1
else:
nxi = x0.shape[0]
else:
nxi = 0
# fmt: off
header += 'Swarmsize :%9d' % swarmsize + ' MaxOuterIters :%9d' % maxOutIter + ' Seed:%26.8f\n' % rseed
header += 'Cognitive Parameter :%9.3f' % c1 + ' MaxInnerIters :%9d' % maxInnIter + ' Scaling :%11d\n' % scale
header += 'Social Parameter :%9.3f' % c2 + ' MinInnerIters :%9d' % minInnIter + ' Stopping Criteria :%11d\n' % stopCriteria
header += 'Initial Weight :%9.3f' % w1 + ' DynInnerIters :%9d' % diI + ' Number of Failures :%11d\n' % ns
header += 'Final Weight :%9.3f' % w2 + ' StoppingIters :%9d' % stopIters + ' Number of Successes:%11d\n\n' % nf
header += 'Absolute Tolerance : %1.2e' % atol + ' Number Initial Pos:%9d' % nxi + ' Neighbourhood Model:%11s\n' % nhm
header += 'Relative Tolerance : %1.2e' % rtol + ' Initial Velocity :%9d' % vinit + ' Neighbourhood Size :%11d\n' % nhn
header += 'Inequality Tolerance: %1.2e' % itol + ' Maximum Velocity :%9d' % vmax + ' Selfless :%11d\n' % nhs
header += 'Equality Tolerance : %1.2e' % etol + ' Craziness Velocity: %1.2e' % vcrazy + ' Fileout :%11d\n' % fileout
header += 'Global Distance : %1.2e' % dtol + ' Initial Penalty :%9.2f' % r0 + ' File Name :%11s\n' % filename
header += '-' * 97 + '\n\n'
# fmt: on
if (fileout == 1) or (fileout == 3):
if os.path.isfile(ofname):
os.remove(ofname)
ofile = open(ofname, "w")
ofile.write(header)
if (fileout == 2) or (fileout == 3):
if os.path.isfile(sfname):
os.remove(sfname)
sfile = open(sfname, "w")
sfile.write(header)
#
dt = 1.0
vlimit = vmax
vmax =
|
np.ones(dimensions, float)
|
numpy.ones
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the convolutional layers classes (only the 2D convolution has been implemented by now)."""
import numpy as np
from typing import Tuple, Union
from .layer import Layer
from dlfs.activation_functions import ActivationFunction
from dlfs.convolutioners import Convolutioner, get_convolution
class Conv2D(Layer):
"""A 2D convolution layer.
Conv2D is analogous to the Dense layer. They differ in the fact that Conv2D takes into account
the spatial location. It also concerns some other parameters such as stride, kernel size, padding,
etc; which are essential characteristics in order to carry out a convolution.
Technical details (assuming data_format='channels_last'):
- The input is a 4D tensor (batch, height, width, number of input channels).
- The output is a 4D tensor (batch, height, width, n_filters). (see _get_output_shape for more details)
- Each kernel is a 3D tensor (kernel_height, kernel_width, number of input channels).
- The weights are a 4D tensor (kernel_height, kernel_width, number of input channels, n_filters).
- The bias is a 1D tensor (n_filters,).
- The layer performs a '3D' convolution on the input tensor for each kernel.
Args:
kernel_size (tuple): tuple of 2 integers, specifying the height and width of the 2D convolution window.
n_filters (int): Number of filters/kernels.
stride (tuple or int): specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for all spatial dimensions
padding (bool, tuple or int): If True, add padding to the input so that the output has the same shape as the
input (assuming stride = 1). If padding is a tuple of two integers, this defines the amount of padding
to add to the top, bottom, left and right of the input. If padding is an integer, this number of zeros
is added to the input on both sides.
activation (ActivationFunction): Activation function
use_bias (bool): Whether to use bias
convolution_type (str): convolution mode. Can be 'winograd', 'direct' or 'patch'.
name (str): Name of the layer
input_shape (tuple): shape of the input
weights_init (str): Initialization method for the weights
bias_init (str): Initialization method for the bias
Attributes:
n_filters (int): Number of filters
kernel_size (tuple): tuple of 2 integers, specifying the height and width of the 2D convolution window.
stride (tuple): specifying the strides of the convolution along the height and width.
padding (tuple): tuple of 2 integers, specifying the padding of the convolution along the height and width.
This values are computed from the stride and kernel size in order to ensure that the output has the same
shape as the input (if padding = True).
activation (ActivationFunction): Activation function
use_bias (bool): Whether to use bias.
convolution_type (str): convolution mode. Recommended to be 'winograd'.
name (str): Name of the layer
blocksize (Tuple[int, int]): the size of the block, only used with Winograd.
data_format (str): the data format of the input. Can be 'channels_last' or 'channels_first'.
Raises:
ValueError: If using padding and stride != 1.
"""
def __init__(self,
n_filters: int,
kernel_size: Union[Tuple[int, int], int],
stride: Union[Tuple[int, int], int] = (1, 1),
padding: Union[bool, tuple, int] = False,
activation: str = None,
use_bias: bool = True,
convolution_type: str = "simple",
name: str = "Conv2D",
input_shape: tuple = None,
weights_init: str = "glorot_uniform",
bias_init: str = "zeros",
blocksize: Tuple[int, int] = None,
data_format: str = "channels_last"):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
if n_filters < 0:
raise ValueError("The number of filters must be greater than 0")
if len(kernel_size) != 2:
raise ValueError("The kernel size should be a tuple of two integers")
if kernel_size[0] <= 0 or kernel_size[1] <= 0:
raise ValueError("The kernel size should be greater than 0")
input_shape = None if input_shape is None else (None, *input_shape)
output_shape = None
if isinstance(padding, bool):
padding = (0, 0) if padding is False else (kernel_size[0] // 2, kernel_size[1] // 2)
else:
padding = (padding, padding) if isinstance(padding, int) else padding
# get the output shape if the input shape is known
if input_shape is not None:
output_height = (input_shape[1] - kernel_size[0] + 2 * padding[0]) // stride[0] + 1
output_width = (input_shape[2] - kernel_size[1] + 2 * padding[1]) // stride[1] + 1
output_shape = (None, output_height, output_width, n_filters)
super().__init__(input_shape=input_shape,
output_shape=output_shape,
activation=activation,
name=name)
self.kernel_size = kernel_size
self.n_filters = n_filters
self.stride = stride
self.padding = padding
self.convolution_type = convolution_type
self.blocksize = blocksize
self.use_bias = use_bias
self.weights_init = weights_init
self.bias_init = bias_init
self.forward_conv = None
self.backward_conv = None
self.update_conv = None
self.data_format = data_format
self._batch_count = 0 # needed to preprocess the data in the winograd algorithm
def initialize(self, input_shape: tuple, weights: np.ndarray = None, bias: np.ndarray = None):
"""Initializes the layer.
If weights and bias are not provided, they are initialized using the specified initialization method.
Args:
input_shape (tuple): input shape of the layer, it has the form (n_samples, height, width, n_channels)
weights (np.ndarray): weights of the layer (optional, recommended to be None).
bias (np.ndarray): bias of the layer (optional, recommended to be None).
Raises:
ValueError: if the input shape is not valid.
ValueError: if the weights and bias are not of the shape:
(input_shape[3], self.kernel_size[0], self.kernel_size[1], self.n_filters),
(self.n_filters,), respectively.
ValueError: if trying to set bias and `self.use_bias` is False.
"""
# check if the input shape is correct
if len(input_shape) != 4:
raise ValueError("The input shape should be a tuple of four integers: "
"(n_samples, height, width, n_channels)")
self.input_shape = input_shape
self.forward_conv = get_convolution(self.convolution_type,
input_shape[1:],
self.kernel_size,
self.padding,
self.stride,
self.blocksize)
self.output_shape = self._get_output_shape()
if self.data_format == "channels_first":
weights_shape = (self.n_filters, input_shape[3], *self.kernel_size)
else:
weights_shape = (*self.kernel_size, input_shape[3], self.n_filters)
# initialize weights
if weights is not None:
if weights.shape != weights_shape:
raise ValueError(f"The shape of the weights should be "
"(n_filters, kernel_height, kernel_width, n_channels_previous_layer). "
f"Got {weights.shape}, expected {weights_shape}")
self.weights = weights
elif self.weights_init == "xavier":
self.weights = np.random.normal(loc=0,
scale=np.sqrt(
1 / (input_shape[3] * self.kernel_size[0] * self.kernel_size[1])),
size=weights_shape)
elif self.weights_init == "zeros":
self.weights = np.zeros(weights_shape)
elif self.weights_init == "ones":
self.weights = np.ones(weights_shape)
elif self.weights_init == "uniform":
self.weights = np.random.uniform(low=-1, high=1, size=weights_shape)
elif self.weights_init == "normal":
self.weights = np.random.normal(loc=0, scale=1, size=weights_shape)
elif self.weights_init == "glorot_uniform":
self.weights = np.random.uniform(low=-np.sqrt(6 / (input_shape[3] + self.n_filters)),
high=np.sqrt(6 / (input_shape[3] + self.n_filters)),
size=weights_shape)
else:
raise ValueError("Unknown weights initialization")
if self.use_bias:
bias_shape = (self.n_filters,)
# initialize bias
if bias is not None:
if bias.shape != bias_shape:
raise ValueError(f"The shape of the bias should be "
"(n_channels_current_layer). "
f"Got {bias.shape}, expected {bias_shape}")
self.bias = bias
elif self.bias_init == "zeros":
self.bias = np.zeros(bias_shape)
elif self.bias_init == "ones":
self.bias = np.ones(bias_shape)
elif self.bias_init == "uniform":
self.bias = np.random.uniform(low=-1, high=1, size=bias_shape)
elif self.bias_init == "normal":
self.bias =
|
np.random.normal(loc=0, scale=1, size=bias_shape)
|
numpy.random.normal
|
import json
import numpy as np
def analysis(fiename):
with open("output/{}.json".format(fiename), 'r') as data_file:
data = json.load(data_file)
na = lf = correct = incorrect = no_path = no_answer = 0
p = []
r = []
n_list = 0
n_count = 0
n_ask = 0
p_count = []
p_list = []
p_ask = []
r_count = []
r_list = []
r_ask = []
incor = 0
sp = []
sr = []
cp = []
cr = []
svp = []
svr = []
mvp = []
mvr = []
for i in data:
p.append(i['precision'])
r.append(i['recall'])
if i['precision'] == i['recall'] == 0.0:
incor += 1
if i['answer'] == "correct":
correct += 1
elif i['answer'] == "-Not_Applicable":
na += 1
elif i['answer'] == "-Linker_failed":
lf += 1
elif i['answer'] == "-incorrect":
incorrect += 1
elif i['answer'] == "-without_path":
no_path += 1
elif i['answer'] == "-no_answer":
no_answer += 1
if 'ASK' in i['query']:
n_ask += 1
p_ask.append(i['precision'])
r_ask.append(i['recall'])
elif 'COUNT(' in i['query']:
n_count += 1
p_count.append(i['precision'])
r_count.append(i['recall'])
else:
n_list += 1
p_list.append(i['precision'])
r_list.append(i['recall'])
if 'single' in i['features']:
sp.append(i['precision'])
sr.append(i['recall'])
elif 'compound' in i['features']:
cp.append(i['precision'])
cr.append(i['recall'])
if 'singlevar' in i['features']:
svp.append(i['precision'])
svr.append(i['recall'])
elif 'multivar' in i['features']:
mvp.append(i['precision'])
mvr.append(i['recall'])
print("-- Basic Stats --")
print("- Total Questions: %d" % (correct + incorrect + no_path + no_answer + na + lf))
print("- Correct: %d" % correct)
print("- Incorrect: %d" % incorrect)
print("- No-Path: %d" % no_path)
print("- No-Answer: %d" % no_answer)
print("- Not_Applicable: %d" % na)
print("- Linker_failed: %d" % lf)
print('- Wrong Answer: %d' % incor)
print('None in precision: ', sum(i is None for i in p))
print('None in recall: ', sum(i is None for i in r))
p = np.array(p, dtype=np.float64)
r = np.array(r, dtype=np.float64)
mp = np.nanmean(p)
mr = np.nanmean(r)
print("- Precision: %.4f" % mp)
print("- Recall: %.4f" % mr)
print("- F1: %.4f" % ((2 * mp * mr) / (mp + mr)))
p_count = np.array(p_count, dtype=np.float64)
p_list = np.array(p_list, dtype=np.float64)
p_ask = np.array(p_ask, dtype=np.float64)
r_count = np.array(r_count, dtype=np.float64)
r_list = np.array(r_list, dtype=np.float64)
r_ask = np.array(r_ask, dtype=np.float64)
print('List: ', n_list)
a = np.nanmean(p_list)
b = np.nanmean(r_list)
print('precision: %.4f' % a)
print('reacall: %.4f' % b)
print('f1-score: %.4f' % ((2 * a * b) / (a + b)))
print('Count: ', n_count)
a = np.nanmean(p_count)
b = np.nanmean(r_count)
print('precision: %.4f' % a)
print('reacall: %.4f' % b)
print('f1-score: %.4f' % ((2 * a * b) / (a + b)))
print('Ask: ', n_list)
a = np.nanmean(p_ask)
b = np.nanmean(r_ask)
print('precision: %.4f' % a)
print('reacall: %.4f' % b)
print('f1-score: %.4f' % ((2 * a * b) / (a + b)))
sp = np.array(sp, dtype=np.float64)
sr = np.array(sr, dtype=np.float64)
cp = np.array(cp, dtype=np.float64)
cr = np.array(cr, dtype=np.float64)
print('Single: ', len(sp), len(sr))
a = np.nanmean(sp)
b = np.nanmean(sr)
print('precision: %.4f' % a)
print('reacall: %.4f' % b)
print('f1-score: %.4f' % ((2 * a * b) / (a + b)))
print('Compound: ', len(cp), len(cr))
a = np.nanmean(cp)
b = np.nanmean(cr)
print('precision: %.4f' % a)
print('reacall: %.4f' % b)
print('f1-score: %.4f' % ((2 * a * b) / (a + b)))
svp = np.array(svp, dtype=np.float64)
svr = np.array(svr, dtype=np.float64)
mvp = np.array(mvp, dtype=np.float64)
mvr = np.array(mvr, dtype=np.float64)
print('Single Var: ', len(svp), len(svr))
a = np.nanmean(svp)
b = np.nanmean(svr)
print('precision: %.4f' % a)
print('reacall: %.4f' % b)
print('f1-score: %.4f' % ((2 * a * b) / (a + b)))
print('Multiple Var: ', len(mvp), len(mvr))
a = np.nanmean(mvp)
b =
|
np.nanmean(mvr)
|
numpy.nanmean
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 19:02:25 2021
@author: Administrator
"""
import os
import pandas as pd
import numpy as np
# import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
from matplotlib import pyplot as pl
from MyClass_python import base_function as bf
#--figure 1
base_folder = r'.\0429\figure1'
"""
Loss function + CNN architecture
"""
final_acc_list = []
final_fscore_list = []
final_kappa_list = []
nRepeat = 5
for idx in range(nRepeat):
repeat_name = 'repeat_' + str(idx)
# print(repeat_name)
acc_list = []
fscore_list = []
kappa_list = []
method_list = []
cnn_arch_list = os.listdir(base_folder)
for cnn_arch in cnn_arch_list:
print(cnn_arch)
cnn_arch_folder = os.path.join(base_folder, cnn_arch, repeat_name)
cnn_loss_list = os.listdir(cnn_arch_folder)
# cnn_loss_list = cnn_loss_list[0:3] + cnn_loss_list[4:]
# print(cnn_loss_list)
for cnn_loss in cnn_loss_list:
cnn_loss_path = os.path.join(cnn_arch_folder, cnn_loss)
tmp_list = os.listdir(cnn_loss_path)
for tmp_name in tmp_list:
final_path = os.path.join(cnn_loss_path, tmp_name)
# print(final_path)
acf_data = pd.read_csv(os.path.join(final_path, 'accuracy_fscore_kappa.csv'), header=None)
acf_value = acf_data.values
acc_list.append(acf_value[0])
fscore_list.append(acf_value[1])
kappa_list.append(acf_value[2])
method_list.append([cnn_arch, cnn_loss])
acc_array = np.vstack(acc_list)
fscore_array = np.vstack(fscore_list)
kappa_array = np.vstack(kappa_list)
final_acc_list.append(acc_array)
final_fscore_list.append(fscore_array)
final_kappa_list.append(kappa_array)
#############################################################
#--calculate Mean and Std
acc_mean = np.mean(np.hstack(final_acc_list), axis=1)
acc_std = np.std(np.hstack(final_acc_list), axis=1)
fscore_mean = np.mean(np.hstack(final_fscore_list), axis=1)
fscore_std = np.std(
|
np.hstack(final_fscore_list)
|
numpy.hstack
|
import numpy as np
import pandas as pd
import os
import glob
import shutil
from IPython.display import clear_output
import seaborn as sns
import matplotlib.pyplot as plt
def get_hit_metrics(job_dir, iter_max=10, task_col='pcba-aid624173', cluster_col='BT_0.4 ID'):
def _get_hits_helper(iter_df):
hits = iter_df[task_col].sum()
return hits
des_cols = ['iter_num',
'exploitation_hits', 'exploration_hits', 'total_hits',
'total_unique_hits',
'exploitation_batch_size', 'exploration_batch_size', 'total_batch_size']
iter_results = []
iter_dfs = [pd.read_csv(job_dir + "/training_data/iter_0.csv")]
for iter_i in range(iter_max):
iter_dir = job_dir + "/iter_{}/".format(iter_i)
exploit_csv = iter_dir + 'exploitation.csv'
explore_csv = iter_dir + 'exploration.csv'
exploit_hits, exploit_unique_hits, exploit_batch_size = 0,0,0
explore_hits, explore_unique_hits, explore_batch_size = 0,0,0
curr_iter_dfs = []
if os.path.exists(exploit_csv):
exploit_df = pd.read_csv(exploit_csv)
exploit_hits = _get_hits_helper(exploit_df)
exploit_batch_size = exploit_df.shape[0]
curr_iter_dfs.append(exploit_df)
if os.path.exists(explore_csv):
explore_df = pd.read_csv(explore_csv)
explore_hits = _get_hits_helper(explore_df)
explore_batch_size = explore_df.shape[0]
curr_iter_dfs.append(explore_df)
total_hits = exploit_hits + explore_hits
total_batch_size = exploit_batch_size + explore_batch_size
curr_iter_df = pd.concat(curr_iter_dfs)
iter_hits = curr_iter_df[curr_iter_df[task_col] == 1]
# unique hits are those that belong to a cluster for which we have not found a hit in previous iters
train_df = pd.concat(iter_dfs)
train_hits = train_df[train_df[task_col] == 1]
total_unique_hits = iter_hits[~iter_hits[cluster_col].isin(train_hits[cluster_col])]
total_unique_hits = total_unique_hits[cluster_col].unique().shape[0]
iter_results.append([iter_i,
exploit_hits, explore_hits, total_hits,
total_unique_hits,
exploit_batch_size, explore_batch_size, total_batch_size])
iter_dfs.extend(curr_iter_dfs)
job_df = pd.DataFrame(iter_results,
columns=des_cols)
total_iters = job_df['iter_num'].max()
iter_sums = [10, 20, 30, 40, 50]
sums_list = []
for i in iter_sums:
job_slice = job_df[job_df['iter_num'] < i]
sum_df = job_slice.sum().to_frame().T
sums_list.append(sum_df)
sums_df = pd.concat(sums_list)
final_df = pd.concat([job_df, sums_df])
iter_sums = [9000+i for i in iter_sums]
final_df['iter_num'] = list(np.arange(iter_max)) + iter_sums
final_df['max_iter'] = total_iters
return final_df
def get_results(results_dir, iter_max=10, task_col='pcba-aid624173', cluster_col='BT_0.4 ID', run_count_threshold=5,
check_failure=True):
successful_jobs = []
failed_jobs = []
all_96 = []
all_384 = []
all_1536 = []
for i, rdir in enumerate(results_dir):
#clear_output()
#print('{}/{}'.format(i, len(results_dir)))
config_file = rdir+'config.csv'
# get job identifiers
rd_splits = rdir.split('\\')
hs_group = rd_splits[1]
hs_id = rd_splits[2]
task_col = rd_splits[3]
rf_id = rd_splits[4]
batch_size = rd_splits[5]
# check that the job completed succesfully:
# - exactly iter_max*batch_size cpds were selected and that they have unique Index ID
batch_cpds = glob.glob(rdir+'iter_*/expl*.csv')
if len(batch_cpds) > 0:
cpd_df = pd.concat([pd.read_csv(x) for x in batch_cpds])
if cpd_df['Index ID'].unique().shape[0] < iter_max*int(batch_size.split('_')[-1]):
print('Failed to reach 50 iters {}_{}_{}'.format(hs_id, rf_id, task_col))
if cpd_df['Index ID'].unique().shape[0] != cpd_df.shape[0]:
print('Failed to uniqueness condition {}_{}_{}'.format(hs_id, rf_id, task_col))
cpd_df.to_csv('./failed.csv')
assert False
if check_failure:
if cpd_df.shape[0] == iter_max*int(batch_size.split('_')[-1]):
successful_jobs.append('{}_{}_{}'.format(hs_id, rf_id, task_col))
assert cpd_df['Index ID'].unique().shape[0] == iter_max*int(batch_size.split('_')[-1])
else:
failed_jobs.append('{}_{}_{}'.format(hs_id, rf_id, task_col))
continue
else:
if cpd_df['Index ID'].unique().shape[0] == cpd_df.shape[0]:
successful_jobs.append('{}_{}_{}'.format(hs_id, rf_id, task_col))
else:
failed_jobs.append('{}_{}_{}'.format(hs_id, rf_id, task_col))
continue
else:
failed_jobs.append('{}_{}_{}'.format(hs_id, rf_id, task_col))
continue
hs_id = hs_id.replace('ClusterBasedWCSelector', 'CBWS')
hs_id = hs_id.replace('InstanceBasedWCSelector', 'InstanceBWS')
job_df = get_hit_metrics(rdir, len(glob.glob(rdir+'iter_*/')), task_col, cluster_col)
job_df['rf_id'] = rf_id
job_df['hs_id'] = hs_id
job_df['hs_group'] = hs_group
job_df['config_file'] = config_file
job_df['task_col'] = task_col
if int(batch_size.split('_')[-1]) == 96:
all_96.append(job_df)
elif int(batch_size.split('_')[-1]) == 384:
all_384.append(job_df)
else:
all_1536.append(job_df)
if len(all_96) > 0:
all_96 = pd.concat(all_96)
else:
all_96 = None
if len(all_384) > 0:
all_384 = pd.concat(all_384)
else:
all_384 = None
if len(all_1536) > 0:
all_1536 = pd.concat(all_1536)
else:
all_1536 = None
all_df = pd.concat([all_96, all_384, all_1536])
return all_96, all_384, all_1536, all_df, successful_jobs, failed_jobs
def helper_agg(col):
if col.name in ['rf_id', 'task_col']:
return '-'
elif col.name in ['hs_id', 'hs_group']:
return col.unique()[0]
else:
if '_std' in col.name:
return col.std()
else:
return col.mean()
def get_all_failures(results_df, iter_max):
rf_ids = results_df['rf_id'].unique().tolist()
task_cols = results_df['task_col'].unique().tolist()
hs_ids = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1']
summary_df = results_df[results_df['iter_num']==iter_max]
cbrandom = summary_df[summary_df['hs_id'] == 'ClusterBasedRandom']
fail_success_counts = np.zeros(shape=(len(hs_ids),4))
for task in task_cols:
for rf_id in rf_ids:
for i, hs_id in enumerate(hs_ids):
temp_random = cbrandom[(cbrandom['rf_id'] == rf_id) & (cbrandom['task_col'] == task)]
rhits, runiquehits = temp_random['total_hits'].iloc[0], temp_random['total_unique_hits'].iloc[0]
temp_df = summary_df[(summary_df['rf_id'] == rf_id) & (summary_df['task_col'] == task) & (summary_df['hs_id'] == hs_id)]
mhits, muniquehits = temp_df['total_hits'].iloc[0], temp_df['total_unique_hits'].iloc[0]
hit_limit, unique_hit_limit = temp_df['hit_limit'].iloc[0], temp_df['unique_hit_limit'].iloc[0]
if (mhits <= rhits) or (muniquehits <= runiquehits):
fail_success_counts[i,0] += 1
if (mhits / hit_limit) >= 0.1:
fail_success_counts[i,1] += 1
if (mhits / hit_limit) >= 0.25:
fail_success_counts[i,2] += 1
if (mhits / hit_limit) >= 0.5:
fail_success_counts[i,3] += 1
fail_success_counts = pd.DataFrame(data=fail_success_counts,
columns=['# failures', '# >= 0.1', '# >= 0.25', '# >= 0.5'])
fail_success_counts['hs_id'] = hs_ids
return fail_success_counts
def get_last_iter_summary(results_df, iter_max, group_cols = ['hs_id', 'rf_id'],
add_fail_success_counts=False):
des_cols = ['hs_id', 'rf_id', 'max_iter', 'exploitation_hits', 'exploration_hits', 'total_hits',
'total_unique_hits', 'total_batch_size', 'hs_group', 'task_col']
sdf1 = results_df[results_df['iter_num']==iter_max][des_cols]
sdf1 = sdf1.groupby(group_cols).agg(helper_agg).sort_values('total_hits', ascending=False)
sorted_hid_list = sdf1.index.tolist()
sdf2 = results_df[results_df['iter_num']==iter_max][des_cols]
sdf2 = sdf2[[c for c in sdf2.columns if ('_hits' in c or 'hs_id' in c or 'rf_id' in c)]]
sdf2.columns = [c.replace('hits', 'std') for c in sdf2.columns]
sdf2 = sdf2.groupby(group_cols).agg(helper_agg).loc[sorted_hid_list]
sdf = pd.concat([sdf1, sdf2], axis=1)
if add_fail_success_counts:
fail_success_counts = get_all_failures(results_df, iter_max)
new_fs_cols = fail_success_counts.drop(['hs_id'], axis=1).columns.tolist()
for col in new_fs_cols:
sdf[col] = 0
sdf.loc[fail_success_counts['hs_id'].values, new_fs_cols] = fail_success_counts[new_fs_cols].values
return sdf
"""
for exp 3.1
"""
def get_stat_test_dict_exp3(results_df, iter_max, metric='total_hits'):
des_cols = ['hs_id', 'rf_id', 'max_iter', 'exploitation_hits', 'exploration_hits', 'total_hits',
'total_unique_hits', 'total_batch_size', 'hs_group', 'task_col']
results_df = results_df[results_df['iter_num']==iter_max][des_cols]
tasks = results_df['task_col'].unique()
rf_ids = results_df['rf_id'].unique()
hs_ids = results_df['hs_id'].unique()
task_data_df_dict = {}
for task_col in tasks:
data_df = pd.DataFrame(data=np.zeros((len(rf_ids),len(hs_ids))),
columns=hs_ids, index=rf_ids)
task_df = results_df[results_df['task_col'] == task_col]
for hs_id in hs_ids:
for rf_id in rf_ids:
tmp_df = task_df[(task_df['hs_id'] == hs_id) & (task_df['rf_id'] == rf_id)]
# for (strategy, rf_id) that don't exist, we set it to mean of strategy runs that do exist
metric_val = tmp_df[metric].iloc[0]
data_df.loc[rf_id, hs_id] = metric_val
task_data_df_dict[task_col] = data_df
return task_data_df_dict
"""
Computes contrast estimation based on medians in 4 steps as described in:
Garcia et al. 2010 https://sci2s.ugr.es/sites/default/files/files/TematicWebSites/sicidm/2010-Garcia-INS.pdf
see pages 6-8
exp 3.1
"""
def compute_custom_cem_exp3(results_df, iter_max, metric='total_hits'):
# get data in dataset (rows) vs strategy (columns) format
task_data_df_dict = get_stat_test_dict_exp3(results_df, iter_max, metric)
def custom_cem_helper(data_df):
# perform steps 1 and 2 of computing Zuv matrix
num_algorithms = data_df.columns.shape[0]
algorithm_names = data_df.columns.tolist()
Zuv_matrix = pd.DataFrame(data=np.zeros(shape=(num_algorithms, num_algorithms)),
columns=algorithm_names,
index=algorithm_names)
for u_idx in range(num_algorithms):
for v_idx in range(u_idx+1, num_algorithms):
u = algorithm_names[u_idx]
v = algorithm_names[v_idx]
tmp_df = data_df[[u, v]].copy()
tmp_df = tmp_df.dropna(axis=0)
u_arr = tmp_df[u].values
v_arr = tmp_df[v].values
# get difference vector of strategies u and v
perf_diff = u_arr - v_arr
# get median differences
median_diff = np.median(perf_diff)
# save to Zuv matrix
Zuv_matrix.loc[u,v] = median_diff
Zuv_matrix.loc[v,u] = -median_diff
# step 3 compute mean of median differens
mean_medians_diff = Zuv_matrix.mean(axis=1)
# step 4 compute difference of strategy u and v
cem_matrix = pd.DataFrame(data=np.zeros(shape=(num_algorithms, num_algorithms)),
columns=algorithm_names,
index=algorithm_names)
for u_idx in range(num_algorithms):
for v_idx in range(u_idx+1, num_algorithms):
u = algorithm_names[u_idx]
v = algorithm_names[v_idx]
u_val = mean_medians_diff.loc[u]
v_val = mean_medians_diff.loc[v]
# save to Zuv matrix
cem_matrix.loc[u,v] = u_val - v_val
cem_matrix.loc[v,u] = v_val - u_val
return cem_matrix
cem_task_dict = {}
for task_col in task_data_df_dict:
cem_data_df = task_data_df_dict[task_col]
cem_res = custom_cem_helper(cem_data_df)
cem_df = pd.DataFrame(cem_res, columns=cem_data_df.columns, index=cem_data_df.columns)
cem_task_dict[task_col] = cem_df
return task_data_df_dict, cem_task_dict
"""
cem for exp 3.1
"""
def compute_scmamp_cem_exp3(results_df, iter_max, metric='total_hits'):
import rpy2.robjects as robjects
import rpy2.robjects.packages as rpackages
from stat_analysis import setup_scmamp_rpy2
setup_scmamp_rpy2()
scmamp = rpackages.importr('scmamp')
task_data_df_dict = get_stat_test_dict_exp3(results_df, iter_max, metric)
cem_task_dict = {}
for task_col in task_data_df_dict:
data_df = task_data_df_dict[task_col]
cem_res = scmamp.contrastEstimationMatrix(data_df)
cem_df = pd.DataFrame(cem_res, columns=data_df.columns, index=data_df.columns)
cem_task_dict[task_col] = cem_df
return task_data_df_dict, cem_task_dict
"""
Failure of a (strategy, task, rf_id) combo is defined by having total_hits or total_unique_hits
not exceed that of ClusterBasedRandom or InstanceBasedRandom.
"""
def get_task_failures_dict(results_df, iter_max):
rf_ids = results_df['rf_id'].unique().tolist()
task_cols = results_df['task_col'].unique().tolist()
hs_ids = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1']
summary_df = results_df[results_df['iter_num']==iter_max]
cbrandom = summary_df[summary_df['hs_id'] == 'ClusterBasedRandom']
ibrandom = summary_df[summary_df['hs_id'] == 'InstanceBasedRandom']
fail_success_counts_dict = {}
for task in task_cols:
fail_success_counts = np.zeros(shape=(len(hs_ids),4))
for rf_id in rf_ids:
for i, hs_id in enumerate(hs_ids):
temp_cbrandom = cbrandom[(cbrandom['rf_id'] == rf_id) & (cbrandom['task_col'] == task)]
temp_ibrandom = ibrandom[(ibrandom['rf_id'] == rf_id) & (ibrandom['task_col'] == task)]
rcbhits, rcbuniquehits = temp_cbrandom['total_hits'].iloc[0], temp_cbrandom['total_unique_hits'].iloc[0]
ribhits, ribuniquehits = temp_ibrandom['total_hits'].iloc[0], temp_ibrandom['total_unique_hits'].iloc[0]
temp_df = summary_df[(summary_df['rf_id'] == rf_id) & (summary_df['task_col'] == task) & (summary_df['hs_id'] == hs_id)]
mhits, muniquehits = temp_df['total_hits'].iloc[0], temp_df['total_unique_hits'].iloc[0]
hit_limit, unique_hit_limit = temp_df['hit_limit'].iloc[0], temp_df['unique_hit_limit'].iloc[0]
if (mhits <= rcbhits) or (muniquehits <= rcbuniquehits) or (mhits <= ribhits) or (muniquehits <= ribuniquehits):
fail_success_counts[i,0] += 1
if (mhits / hit_limit) >= 0.1:
fail_success_counts[i,1] += 1
if (mhits / hit_limit) >= 0.25:
fail_success_counts[i,2] += 1
if (mhits / hit_limit) >= 0.5:
fail_success_counts[i,3] += 1
fail_success_counts = pd.DataFrame(data=fail_success_counts,
columns=['# failures', '# >= 0.1', '# >= 0.25', '# >= 0.5'])
fail_success_counts['hs_id'] = hs_ids
fail_success_counts_dict[task] = fail_success_counts
return fail_success_counts_dict
"""
for exp 3.1
"""
def plot_cem_heatmap_exp3(cem_df, title, figsize=(16, 16), fail_success_counts=None):
from matplotlib.collections import QuadMesh
from matplotlib.text import Text
add_fail_success_counts = False
if fail_success_counts is not None:
add_fail_success_counts = True
heatmap_df = cem_df.copy()
heatmap_df[' '] = 0
heatmap_df['Total Wins'] = (cem_df > 0).sum(axis=1)
heatmap_df = heatmap_df.sort_values('Total Wins', ascending=False)
ordered_wins_hs_ids = heatmap_df['Total Wins'].index.tolist()
heatmap_df = heatmap_df[ordered_wins_hs_ids + [' ', 'Total Wins']]
facecolor_limit = 3
shrink_factor = 0.6
if add_fail_success_counts:
heatmap_df['# Failures (out of 10)'] = np.nan
heatmap_df['# >= 10%'] = np.nan
heatmap_df['# >= 25%'] = np.nan
heatmap_df['# >= 50%'] = np.nan
facecolor_limit=7
shrink_factor = 0.5
for hs_id in fail_success_counts['hs_id'].unique():
tmp_df = fail_success_counts[fail_success_counts['hs_id'] == hs_id]
failures_cnt = tmp_df['# failures'].iloc[0]
a, b, c = tmp_df['# >= 0.1'].iloc[0], tmp_df['# >= 0.25'].iloc[0], tmp_df['# >= 0.5'].iloc[0]
heatmap_df.loc[hs_id, '# Failures (out of 10)'] = failures_cnt
heatmap_df.loc[hs_id, '# >= 10%'] = a
heatmap_df.loc[hs_id, '# >= 25%'] = b
heatmap_df.loc[hs_id, '# >= 50%'] = c
labels = []
for i, row in heatmap_df.iterrows():
x = row['Total Wins']
addendum_labels = ['', '{}'.format(x)]
if add_fail_success_counts:
f, a, b, c = row['# Failures (out of 10)'], row['# >= 10%'], row['# >= 25%'], row['# >= 50%']
addendum_labels += ['{}'.format(f), '{}'.format(a), '{}'.format(b), '{}'.format(c)]
tmp = ['' for _ in range(heatmap_df.shape[0])] + addendum_labels
labels.append(tmp)
labels = np.array(labels)
fig, ax = plt.subplots(1, 1, figsize=figsize)
sns.heatmap(heatmap_df, annot=labels, linewidths=1, linecolor='grey',
fmt='', square=True, cbar_kws={"shrink": shrink_factor})
# find your QuadMesh object and get array of colors
quadmesh = ax.findobj(QuadMesh)[0]
facecolors = quadmesh.get_facecolors()
# make colors of the last column white
# set modified colors
quadmesh.set_facecolors = facecolors
for i in range(1, facecolor_limit):
facecolors[np.arange(heatmap_df.shape[1]-i, heatmap_df.shape[0]*heatmap_df.shape[1],
heatmap_df.shape[1])] = np.array([1,1,1,1])
# set color of all text to black
for i in ax.findobj(Text):
i.set_color('black')
plt.title(title)
plt.show()
cem_wins_df = heatmap_df['Total Wins']
return cem_wins_df
def plot_cem_heatmap_all_tasks_exp3(cem_task_dict, task_info, fail_success_counts_dict,
title, figsize=(16, 16), add_fail_success_counts=True,
tasks_per_row=10, shrink_factor=0.1, fontsize=35, metric='Total Hits',
save_fmt='./exp3/cem/', title_y=0.55, hspace=0.2, wspace=0.2):
from matplotlib.collections import QuadMesh
from matplotlib.text import Text
hs_ids_order = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1',
'ClusterBasedRandom', 'InstanceBasedRandom']
task_info = task_info.sort_values('active_ratio')
tasks = task_info['task_col'].tolist()
task_labels = ['{}\n{} cpds\n{} hits\n{}% hits'.format(row['task_col'].replace('pcba-', ''),
row['cpd_count'], row['hit_limit'],
row['active_ratio']) for i, row in task_info.iterrows()]
cem_wins_dict = {}
total_iters = int(np.ceil(len(tasks)/tasks_per_row))
latex_lines = []
for task_batch in range(total_iters):
tasks_subset = tasks[task_batch*tasks_per_row:(task_batch+1)*tasks_per_row]
curr_tasks_per_row = len(tasks_subset)
if task_batch != (total_iters-1):
fig, axes = plt.subplots(2, curr_tasks_per_row//2, figsize=figsize)
axes = axes.flatten()
else:
fig, axes = plt.subplots(1, 2, figsize=(50, 20))
axes = axes.flatten()[:2]
for axes_i, task_col in enumerate(tasks_subset):
task_hit_limit = task_info[task_info['task_col'] == task_col]['hit_limit'].iloc[0]
task_title='Task: {}. Hit limit: {}.'.format(task_col, task_hit_limit)
cem_df = cem_task_dict[task_col]
if add_fail_success_counts:
fail_success_counts = fail_success_counts_dict[task_col]
heatmap_df = cem_df.copy()
heatmap_df[' '] = 0
heatmap_df['Total Wins'] = (cem_df > 0).sum(axis=1)
heatmap_df = heatmap_df.loc[hs_ids_order]
heatmap_df = heatmap_df[hs_ids_order + [' ', 'Total Wins']]
#heatmap_df = heatmap_df.sort_values('Total Wins', ascending=False)
#ordered_wins_hs_ids = heatmap_df['Total Wins'].index.tolist()
#heatmap_df = heatmap_df[ordered_wins_hs_ids + [' ', 'Total Wins']]
facecolor_limit = 3
if add_fail_success_counts:
heatmap_df['# Failures (out of 10)'] = np.nan
heatmap_df['# >= 10%'] = np.nan
heatmap_df['# >= 25%'] = np.nan
heatmap_df['# >= 50%'] = np.nan
facecolor_limit=7
for hs_id in fail_success_counts['hs_id'].unique():
tmp_df = fail_success_counts[fail_success_counts['hs_id'] == hs_id]
failures_cnt = tmp_df['# failures'].iloc[0]
a, b, c = tmp_df['# >= 0.1'].iloc[0], tmp_df['# >= 0.25'].iloc[0], tmp_df['# >= 0.5'].iloc[0]
heatmap_df.loc[hs_id, '# Failures (out of 10)'] = failures_cnt
heatmap_df.loc[hs_id, '# >= 10%'] = a
heatmap_df.loc[hs_id, '# >= 25%'] = b
heatmap_df.loc[hs_id, '# >= 50%'] = c
labels = []
for i, row in heatmap_df.iterrows():
x = int(row['Total Wins'])
addendum_labels = ['', '{}'.format(x)]
if add_fail_success_counts:
f, a, b, c = row['# Failures (out of 10)'], row['# >= 10%'], row['# >= 25%'], row['# >= 50%']
if not np.isnan(f):
f = int(f)
if not np.isnan(a):
a = int(a)
if not np.isnan(b):
b = int(b)
if not np.isnan(c):
c = int(c)
addendum_labels += ['{}'.format(f), '{}'.format(a), '{}'.format(b), '{}'.format(c)]
tmp = ['' for _ in range(heatmap_df.shape[0])] + addendum_labels
labels.append(tmp)
labels = np.array(labels)
cmap = plt.get_cmap("RdYlGn")
sns.heatmap(heatmap_df, annot=labels, linewidths=1, linecolor='grey', cmap=cmap,
fmt='', square=True, cbar_kws={"shrink": shrink_factor}, ax=axes[axes_i])
# find your QuadMesh object and get array of colors
quadmesh = axes[axes_i].findobj(QuadMesh)[0]
facecolors = quadmesh.get_facecolors()
# make colors of the last column white
# set modified colors
quadmesh.set_facecolors = facecolors
for i in range(1, facecolor_limit):
facecolors[np.arange(heatmap_df.shape[1]-i, heatmap_df.shape[0]*heatmap_df.shape[1],
heatmap_df.shape[1])] = np.array([1,1,1,1])
locs = axes[axes_i].get_xticks()
locs = [i+0.35 for i in locs]
axes[axes_i].set_xticks(locs)
# set color of all text to black
for i in axes[axes_i].findobj(Text):
i.set_color('black')
axes[axes_i].set_title(task_title, y=1.06, fontsize=fontsize)
if axes_i%2 > 0:
axes[axes_i].set_yticks([])
if (axes_i//2 > 0) or (task_batch == (total_iters-1)):
axes[axes_i].set_xticklabels(axes[axes_i].get_xticklabels(), rotation=70, ha='right')
else:
if task_batch != (total_iters-1):
axes[axes_i].set_xticks([])
cem_wins_df = heatmap_df['Total Wins']
cem_wins_dict[task_col] = cem_wins_df
fig.tight_layout()
plt.suptitle(title, fontsize=fontsize, y=title_y)
fig.subplots_adjust(hspace=hspace, wspace=wspace)
if save_fmt is not None:
plt.savefig(save_fmt+'{}_{}.png'.format(metric.replace(' ', '_'), task_batch+1), bbox_inches='tight');
plt.show()
latex_lines.append('\\vspace*{\\fill}')
latex_lines.append('\\begin{figure}[H]\\ContinuedFloat')
latex_lines.append('\\centering')
latex_lines.append('\\includegraphics[width=\\textwidth]{project_al/experiments/exp3/cem/'+'{}_{}'.format(metric.replace(' ', '_'), task_batch+1)+'.png}')
cont_line = '\\emph{('+ '{} of {} cont.'.format(task_batch+1, total_iters) +')}}'
latex_lines.append('\\caption[]{Experiment 3.1 per-task contrast estimation based on medians (CEM) heatmaps for \\textbf{'+metric+'} after 50 iterations along with extra columns denoting counts for various conditions. '+cont_line)
latex_lines.append("\\end{figure}")
latex_lines.append("\\vspace*{\\fill}")
with open(save_fmt+"/latex_{}.txt".format(metric), 'w') as f:
for line in latex_lines:
f.write("{}\n".format(line))
return cem_wins_dict
def plot_boxplots_simple_exp3(results_df, iter_max, task_info,
figsize=(16, 12), metric='total_hits',
title='', xlabel='', ylabel='', save_fmt=None,
fontsize=35, labelpad=20, tasks_per_plot=10, legendfontsize=25):
hue_order = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1',
'ClusterBasedRandom', 'InstanceBasedRandom']
results_df = results_df[results_df['iter_num']==iter_max]
task_info = task_info.sort_values('active_ratio')
tasks = task_info['task_col'].tolist()
task_labels = ['{}\n{} cpds\n{} hits\n{}% hits'.format(row['task_col'].replace('pcba-', ''),
row['cpd_count'], row['hit_limit'],
row['active_ratio']) for i, row in task_info.iterrows()]
latex_lines = []
total_iters = int(np.ceil(len(tasks)/tasks_per_plot))
for task_batch in range(total_iters):
tasks_subset = tasks[task_batch*tasks_per_plot:(task_batch+1)*tasks_per_plot]
xtick_labels = task_labels[task_batch*tasks_per_plot:(task_batch+1)*tasks_per_plot]
trimmed_results_df = results_df[results_df['task_col'].isin(tasks_subset)]
fig, ax = plt.subplots(figsize=figsize)
sns.boxplot(x="task_col", y=metric, hue="hs_id", data=trimmed_results_df,
order=tasks_subset, hue_order=hue_order)
locs, _ = plt.xticks()
locs = [i-0.4 for i in locs]
plt.xticks(locs, xtick_labels, ha='left')
plt.xlabel(xlabel, fontsize=fontsize, labelpad=labelpad)
plt.ylabel(ylabel, fontsize=fontsize, labelpad=labelpad)
plt.title(title + ' (plot {} of {})'.format(task_batch+1, total_iters), fontsize=fontsize, y=1.05)
[plt.axvline(x+0.5, color='r', linestyle='--') for x in range(tasks_per_plot-1)] # from:https://stackoverflow.com/a/60375919
ax.legend(title='Hyperparameter ID:', title_fontsize=legendfontsize, fontsize=legendfontsize)
if save_fmt is not None:
plt.savefig(save_fmt+'boxplots_{}_{}.png'.format(metric, task_batch+1), bbox_inches='tight');
plt.show()
latex_lines.append('\\vspace*{\\fill}')
latex_lines.append('\\begin{figure}[H]\\ContinuedFloat')
latex_lines.append('\\centering')
latex_lines.append('\\includegraphics[width=\\textwidth]{project_al/experiments/exp3/boxplots/boxplots_'+metric+'_'+str(task_batch+1)+'.png}')
latex_lines.append('\\caption[]{Experiment 3.1 per-task \\textbf{Total Hits} boxplots after 50 iterations (102 tasks). ')
latex_lines.append("The x-tick labels for each task include number of compounds, number of hits, and hit \\%. \\emph{(cont.)} }")
latex_lines.append("\\end{figure}")
latex_lines.append("\\vspace*{\\fill}")
latex_lines.append("\\newpage")
with open(save_fmt+"/latex_{}.txt".format(metric), 'w') as f:
for line in latex_lines:
f.write("{}\n".format(line))
def get_win_summary_df(task_info, cem_all_iters_metric_dict):
tasks = task_info.sort_values('active_ratio')['task_col'].unique()
hs_ids_order = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1',
'ClusterBasedRandom', 'InstanceBasedRandom']
iter_max_dict = {9010: 10, 9020: 20, 9030: 30, 9040: 40, 9050: 50}
metric_dict = {'total_hits': 'Total Hits', 'total_unique_hits': 'Total Unique Hits'}
data_1 = []
for metric in metric_dict:
for iter_max in iter_max_dict:
cem_task_dict, fail_success_counts_dict = cem_all_iters_metric_dict['{}_{}'.format(metric, iter_max)]
for task_col in tasks:
cem_df = cem_task_dict[task_col]
fail_success_df = fail_success_counts_dict[task_col]
cem_wins_df = (cem_df > 0).sum(axis=1)
top_strategy = cem_wins_df[cem_wins_df == cem_wins_df.max()]
top_strategy = "|".join(top_strategy.index.tolist())
data_1.append([metric_dict[metric], iter_max_dict[iter_max], task_col, top_strategy])
metric_task_top_strats_df = pd.DataFrame(data=data_1, columns=['Metric', '# Iterations', 'Task', 'Best Strategy (Ties)'])
hs_ids_order = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1',
'ClusterBasedRandom', 'InstanceBasedRandom']
tmp_df = metric_task_top_strats_df
win_summary_df_list = []
for metric in ['Total Hits', 'Total Unique Hits']:
data = np.zeros(shape=(len(hs_ids_order),5))
columns = [[],[]]
best_per_iter = []
for i, iter_max in enumerate(iter_max_dict):
df = tmp_df[(tmp_df['Metric'] == metric) & (tmp_df['# Iterations'] == iter_max_dict[iter_max])]
assert df.shape[0] == 102
hs_id_counts = []
for hs_id in hs_ids_order:
hs_id_counts.append(df[df['Best Strategy (Ties)'].str.contains(hs_id)].shape[0])
hs_id_counts = np.array(hs_id_counts)
columns[0].append(metric)
columns[1].append(iter_max_dict[iter_max])
data[:,i] = hs_id_counts
best_per_iter.append("|".join([hs_ids_order[i] for i in np.where(hs_id_counts == np.max(hs_id_counts))[0]]))
data = list(data) + [best_per_iter]
data_df = pd.DataFrame(data=data, columns=columns, index=hs_ids_order+['Best'])
win_summary_df_list.append(data_df)
hs_ids_order = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1']
fs_df = pd.DataFrame(data=np.zeros(shape=(len(hs_ids_order),5)),
columns=[10, 20, 30, 40, 50], index=hs_ids_order)
s10_df = pd.DataFrame(data=np.zeros(shape=(len(hs_ids_order),5)),
columns=[10, 20, 30, 40, 50], index=hs_ids_order)
s25_df = pd.DataFrame(data=np.zeros(shape=(len(hs_ids_order),5)),
columns=[10, 20, 30, 40, 50], index=hs_ids_order)
s50_df = pd.DataFrame(data=np.zeros(shape=(len(hs_ids_order),5)),
columns=[10, 20, 30, 40, 50], index=hs_ids_order)
for iter_max in iter_max_dict:
_, fail_success_counts_dict = cem_all_iters_metric_dict['total_hits_{}'.format(iter_max)]
fs_counts = np.zeros(shape=(4, len(tasks), len(hs_ids_order)))
for i, task_col in enumerate(tasks):
task_df = fail_success_counts_dict[task_col]
task_df.index = task_df['hs_id'].tolist()
task_df = task_df.drop('hs_id', axis=1)
for j, hs_id in enumerate(hs_ids_order):
f, a, b, c = task_df.loc[hs_id]
fs_counts[0,i,j] = f
fs_counts[1,i,j] = a
fs_counts[2,i,j] = b
fs_counts[3,i,j] = c
fs_df[iter_max_dict[iter_max]] = fs_counts[0,:].sum(axis=0)
s10_df[iter_max_dict[iter_max]] = fs_counts[1,:].sum(axis=0)
s25_df[iter_max_dict[iter_max]] = fs_counts[2,:].sum(axis=0)
s50_df[iter_max_dict[iter_max]] = fs_counts[3,:].sum(axis=0)
fs_df = fs_df.astype(int);s10_df = s10_df.astype(int);s25_df = s25_df.astype(int);s50_df = s50_df.astype(int);
fs_df.columns = [['# Failures for all 1020 task-plate runs' for _ in range(5)], [10, 20, 30, 40, 50]]
s10_df.columns = [['# $\ge$ 10\% hits for all 1020 task-plate runs' for _ in range(5)], [10, 20, 30, 40, 50]]
s25_df.columns = [['# $\ge$ 25\% hits for all 1020 task-plate runs' for _ in range(5)], [10, 20, 30, 40, 50]]
s50_df.columns = [['# $\ge$ 50\% hits for all 1020 task-plate runs' for _ in range(5)], [10, 20, 30, 40, 50]]
[win_summary_df_list.append(df) for df in [fs_df, s10_df, s25_df, s50_df]]
for iter_max in [9050]:
_, fail_success_counts_dict = cem_all_iters_metric_dict['total_hits_{}'.format(iter_max)]
fs_counts = np.zeros(shape=(2, len(tasks), len(hs_ids_order)))
for i, task_col in enumerate(tasks):
task_df = fail_success_counts_dict[task_col]
task_df.index = task_df['hs_id'].tolist()
task_df = task_df.drop('hs_id', axis=1)
for j, hs_id in enumerate(hs_ids_order):
f, a, b, c = task_df.loc[hs_id]
fs_counts[0,i,j] = f
fs_counts[1,i,j] = c
fs_task_df = pd.DataFrame(data=fs_counts[0,:], columns=hs_ids_order, index=tasks)
fs_task_df['Task Total'] = fs_task_df.sum(axis=1)
fs_task_df = fs_task_df[fs_task_df['Task Total'] > 0].astype(int)
fs_task_df['Task Hit %'] = [task_info[task_info['task_col'] == x]['active_ratio'].iloc[0] for x in fs_task_df.index.tolist()]
fs_task_df = fs_task_df.sort_values('Task Hit %')
fs_task_df = fs_task_df[['Task Hit %'] + hs_ids_order + ['Task Total']]
fs_task_df = pd.concat([fs_task_df, fs_task_df.sum(axis=0).to_frame().T])
fs_task_df.name = '# iterations: 50'
fs_task_df.index = fs_task_df.index.tolist()[:-1] + ['Strategy Total']
s50_task_df = pd.DataFrame(data=fs_counts[1,:], columns=hs_ids_order, index=tasks)
s50_task_df['Task Total'] = s50_task_df.sum(axis=1)
s50_task_df = s50_task_df[s50_task_df['Task Total'] > 0].astype(int)
s50_task_df['Task Hit %'] = [task_info[task_info['task_col'] == x]['active_ratio'].iloc[0] for x in s50_task_df.index.tolist()]
s50_task_df = s50_task_df.sort_values('Task Hit %')
s50_task_df = s50_task_df[['Task Hit %'] + hs_ids_order + ['Task Total']]
s50_task_df = pd.concat([s50_task_df, s50_task_df.sum(axis=0).to_frame().T])
s50_task_df.name = '# iterations: 50'
s50_task_df.index = s50_task_df.index.tolist()[:-1] + ['Strategy Total']
return win_summary_df_list, fs_task_df, s50_task_df
def get_exp3_2_failures(results_df, task_info):
tasks = task_info.sort_values('active_ratio')['task_col'].unique()
hs_ids_order = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1']
iter_max_dict = {9010: 10, 9020: 20, 9030: 30, 9040: 40, 9050: 50}
fs_df = pd.DataFrame(data=np.zeros(shape=(len(hs_ids_order),5)),
columns=[10, 20, 30, 40, 50], index=hs_ids_order)
s10_df = pd.DataFrame(data=np.zeros(shape=(len(hs_ids_order),5)),
columns=[10, 20, 30, 40, 50], index=hs_ids_order)
s25_df = pd.DataFrame(data=np.zeros(shape=(len(hs_ids_order),5)),
columns=[10, 20, 30, 40, 50], index=hs_ids_order)
s50_df = pd.DataFrame(data=np.zeros(shape=(len(hs_ids_order),5)),
columns=[10, 20, 30, 40, 50], index=hs_ids_order)
for iter_max in iter_max_dict:
fail_success_counts_dict = get_task_failures_dict(results_df, iter_max)
fs_counts = np.zeros(shape=(4, len(tasks), len(hs_ids_order)))
for i, task_col in enumerate(tasks):
task_df = fail_success_counts_dict[task_col]
task_df.index = task_df['hs_id'].tolist()
task_df = task_df.drop('hs_id', axis=1)
for j, hs_id in enumerate(hs_ids_order):
f, a, b, c = task_df.loc[hs_id]
fs_counts[0,i,j] = f
fs_counts[1,i,j] = a
fs_counts[2,i,j] = b
fs_counts[3,i,j] = c
fs_df[iter_max_dict[iter_max]] = fs_counts[0,:].sum(axis=0)
s10_df[iter_max_dict[iter_max]] = fs_counts[1,:].sum(axis=0)
s25_df[iter_max_dict[iter_max]] = fs_counts[2,:].sum(axis=0)
s50_df[iter_max_dict[iter_max]] = fs_counts[3,:].sum(axis=0)
fs_df = fs_df.astype(int);s10_df = s10_df.astype(int);s25_df = s25_df.astype(int);s50_df = s50_df.astype(int);
fs_df.columns = [['# Failures for all 102 task-plate runs' for _ in range(5)], [10, 20, 30, 40, 50]]
s10_df.columns = [['# $\ge$ 10\% hits for all 102 task-plate runs' for _ in range(5)], [10, 20, 30, 40, 50]]
s25_df.columns = [['# $\ge$ 25\% hits for all 102 task-plate runs' for _ in range(5)], [10, 20, 30, 40, 50]]
s50_df.columns = [['# $\ge$ 50\% hits for all 102 task-plate runs' for _ in range(5)], [10, 20, 30, 40, 50]]
fs_df_list = [fs_df, s10_df, s25_df, s50_df]
for iter_max in [9050]:
fail_success_counts_dict = get_task_failures_dict(results_df, iter_max)
fs_counts = np.zeros(shape=(2, len(tasks), len(hs_ids_order)))
for i, task_col in enumerate(tasks):
task_df = fail_success_counts_dict[task_col]
task_df.index = task_df['hs_id'].tolist()
task_df = task_df.drop('hs_id', axis=1)
for j, hs_id in enumerate(hs_ids_order):
f, a, b, c = task_df.loc[hs_id]
fs_counts[0,i,j] = f
fs_counts[1,i,j] = c
fs_task_df = pd.DataFrame(data=fs_counts[0,:], columns=hs_ids_order, index=tasks)
fs_task_df['Task Total'] = fs_task_df.sum(axis=1)
fs_task_df = fs_task_df[fs_task_df['Task Total'] > 0].astype(int)
fs_task_df['Task Hit %'] = [task_info[task_info['task_col'] == x]['active_ratio'].iloc[0] for x in fs_task_df.index.tolist()]
fs_task_df = fs_task_df.sort_values('Task Hit %')
fs_task_df = fs_task_df[['Task Hit %'] + hs_ids_order + ['Task Total']]
fs_task_df = pd.concat([fs_task_df, fs_task_df.sum(axis=0).to_frame().T])
fs_task_df.name = '# iterations: 50'
fs_task_df.index = fs_task_df.index.tolist()[:-1] + ['Strategy Total']
s50_task_df = pd.DataFrame(data=fs_counts[1,:], columns=hs_ids_order, index=tasks)
s50_task_df['Task Total'] = s50_task_df.sum(axis=1)
s50_task_df = s50_task_df[s50_task_df['Task Total'] > 0].astype(int)
s50_task_df['Task Hit %'] = [task_info[task_info['task_col'] == x]['active_ratio'].iloc[0] for x in s50_task_df.index.tolist()]
s50_task_df = s50_task_df.sort_values('Task Hit %')
s50_task_df = s50_task_df[['Task Hit %'] + hs_ids_order + ['Task Total']]
s50_task_df = pd.concat([s50_task_df, s50_task_df.sum(axis=0).to_frame().T])
s50_task_df.name = '# iterations: 50'
s50_task_df.index = s50_task_df.index.tolist()[:-1] + ['Strategy Total']
return fs_df_list, fs_task_df, s50_task_df
"""
for exp 3.2
"""
def get_stat_test_dict_exp3_2(results_df, iter_max, metric='total_hits'):
des_cols = ['hs_id', 'rf_id', 'max_iter', 'exploitation_hits', 'exploration_hits', 'total_hits',
'total_unique_hits', 'total_batch_size', 'hs_group', 'task_col']
results_df = results_df[results_df['iter_num']==iter_max][des_cols]
tasks = results_df['task_col'].unique()
rf_ids = results_df['rf_id'].unique()
hs_ids = results_df['hs_id'].unique()
data_df = pd.DataFrame(data=np.zeros((len(tasks),len(hs_ids))),
columns=hs_ids, index=tasks)
for hs_id in hs_ids:
for task_col in tasks:
tmp_df = results_df[(results_df['hs_id'] == hs_id) & (results_df['task_col'] == task_col)]
assert tmp_df.shape[0] == 1
metric_val = tmp_df[metric].iloc[0]
data_df.loc[task_col, hs_id] = metric_val
return data_df
"""
Computes contrast estimation based on medians in 4 steps as described in:
Garcia et al. 2010 https://sci2s.ugr.es/sites/default/files/files/TematicWebSites/sicidm/2010-Garcia-INS.pdf
see pages 6-8
exp 3.2
"""
def compute_custom_cem_exp3_2(results_df, iter_max, metric='total_hits'):
# get data in dataset (rows) vs strategy (columns) format
cem_data_df = get_stat_test_dict_exp3_2(results_df, iter_max, metric)
# perform steps 1 and 2 of computing Zuv matrix
num_algorithms = cem_data_df.columns.shape[0]
algorithm_names = cem_data_df.columns.tolist()
Zuv_matrix = pd.DataFrame(data=np.zeros(shape=(num_algorithms, num_algorithms)),
columns=algorithm_names,
index=algorithm_names)
for u_idx in range(num_algorithms):
for v_idx in range(u_idx+1, num_algorithms):
u = algorithm_names[u_idx]
v = algorithm_names[v_idx]
tmp_df = cem_data_df[[u, v]].copy()
tmp_df = tmp_df.dropna(axis=0)
u_arr = tmp_df[u].values
v_arr = tmp_df[v].values
# get difference vector of strategies u and v
perf_diff = u_arr - v_arr
# get median differences
median_diff = np.median(perf_diff)
# save to Zuv matrix
Zuv_matrix.loc[u,v] = median_diff
Zuv_matrix.loc[v,u] = -median_diff
# step 3 compute mean of median differens
mean_medians_diff = Zuv_matrix.mean(axis=1)
# step 4 compute difference of strategy u and v
cem_matrix = pd.DataFrame(data=np.zeros(shape=(num_algorithms, num_algorithms)),
columns=algorithm_names,
index=algorithm_names)
for u_idx in range(num_algorithms):
for v_idx in range(u_idx+1, num_algorithms):
u = algorithm_names[u_idx]
v = algorithm_names[v_idx]
u_val = mean_medians_diff.loc[u]
v_val = mean_medians_diff.loc[v]
# save to Zuv matrix
cem_matrix.loc[u,v] = u_val - v_val
cem_matrix.loc[v,u] = v_val - u_val
return cem_matrix
"""
for exp 3.2
"""
def compute_scmamp_cem_exp3_2(results_df, iter_max, metric='total_hits'):
import rpy2.robjects as robjects
import rpy2.robjects.packages as rpackages
from stat_analysis import setup_scmamp_rpy2
setup_scmamp_rpy2()
scmamp = rpackages.importr('scmamp')
cem_data_df = get_stat_test_dict_exp3_2(results_df, iter_max, metric)
cem_res = scmamp.contrastEstimationMatrix(cem_data_df)
cem_df = pd.DataFrame(cem_res, columns=cem_data_df.columns, index=cem_data_df.columns)
return cem_df
"""
for exp 3.2
"""
def plot_cem_heatmap_exp3_2(results_df, task_info, metric,
title, figsize=(16, 16), add_fail_success_counts=True,
shrink_factor=0.1, fontsize=45, title_y=1.05,
hspace=0.2, wspace=0.2):
from matplotlib.collections import QuadMesh
from matplotlib.text import Text
hs_ids_order = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1',
'ClusterBasedRandom', 'InstanceBasedRandom']
iter_max_dict = {9010: 10, 9020: 20, 9030: 30, 9040: 40, 9050: 50}
task_info = task_info.sort_values('active_ratio')
tasks = task_info['task_col'].tolist()
if add_fail_success_counts:
fs_df_list, _, _ = get_exp3_2_failures(results_df, task_info)
fs_df = fs_df_list[0]
cem_wins_dict = {}
#fig, axes = plt.subplots(2, len(iter_max_dict), figsize=figsize)
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(2, 3, wspace=wspace, hspace=hspace)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[0,1])
ax3 = fig.add_subplot(gs[0,2])
axes = [[ax1, ax2, ax3]]
ax1 = fig.add_subplot(gs[1,0])
ax2 = fig.add_subplot(gs[1,1])
axes.append([ax1, ax2, None])
axes = np.array(axes).flatten()
for axes_i, iter_max in enumerate(iter_max_dict):
curr_title='# iterations: {}'.format(iter_max_dict[iter_max])
cem_df = compute_custom_cem_exp3_2(results_df, iter_max, metric)
heatmap_df = cem_df.copy()
heatmap_df[' '] = 0
heatmap_df['Total Wins'] = (cem_df > 0).sum(axis=1)
heatmap_df = heatmap_df.loc[hs_ids_order]
heatmap_df = heatmap_df[hs_ids_order + [' ', 'Total Wins']]
facecolor_limit = 3
if add_fail_success_counts:
heatmap_df['# Failures (out of 102)'] = np.nan
heatmap_df['# >= 10%'] = np.nan
heatmap_df['# >= 25%'] = np.nan
heatmap_df['# >= 50%'] = np.nan
facecolor_limit=7
for hs_id in hs_ids_order[:-2]:
failures_cnt = fs_df_list[0][(fs_df_list[0].columns[0][0],iter_max_dict[iter_max])].loc[hs_id]
a = fs_df_list[1][(fs_df_list[1].columns[0][0],iter_max_dict[iter_max])].loc[hs_id]
b = fs_df_list[2][(fs_df_list[2].columns[0][0],iter_max_dict[iter_max])].loc[hs_id]
c = fs_df_list[3][(fs_df_list[3].columns[0][0],iter_max_dict[iter_max])].loc[hs_id]
heatmap_df.loc[hs_id, '# Failures (out of 102)'] = failures_cnt
heatmap_df.loc[hs_id, '# >= 10%'] = a
heatmap_df.loc[hs_id, '# >= 25%'] = b
heatmap_df.loc[hs_id, '# >= 50%'] = c
labels = []
for i, row in heatmap_df.iterrows():
x = int(row['Total Wins'])
addendum_labels = ['', '{}'.format(x)]
if add_fail_success_counts:
f, a, b, c = row['# Failures (out of 102)'], row['# >= 10%'], row['# >= 25%'], row['# >= 50%']
if not np.isnan(f):
f = int(f)
if not
|
np.isnan(a)
|
numpy.isnan
|
from __future__ import division
import numpy as np
import pickle
import tensorflow as tf
import random
import math
import sys
import os
with open('param.pickle','rb') as l_record:
para = pickle.load(l_record)
atom_type = para['atom_type']
resi_atom_type = para['resi_atom_type']
def iRMSD_cal(x, protein, ind):
rpath="/home/cyppsp/project_bayesian/zdock/2c/"+protein+"_r_u.pdb.ms"
fr = open('re.pdb', "w")
with open(rpath, "r") as f:
receptor_pdb = f.readlines()
i=0
for re in range(len(receptor_pdb)):
if(receptor_pdb[re][0:4]=='ATOM' and (receptor_pdb[re][17:20] in resi_atom_type)):
fr.write("%s%7.3f %7.3f %7.3f%s" %(receptor_pdb[re][0:31], x[i][0], x[i][1], x[i][2], receptor_pdb[re][54:]))
i+=1
fr.close()
rpath="/home/cyppsp/project_bayesian/zdock/2c/"+protein+"_l_u_"+str(ind)+".pdb"
fr = open('le.pdb', "w")
with open(rpath, "r") as f:
receptor_pdb = f.readlines()
i=0
for re in range(len(receptor_pdb)):
if(receptor_pdb[re][0:4]=='ATOM' and (receptor_pdb[re][17:20] in resi_atom_type)):
fr.write("%s%7.3f %7.3f %7.3f%s" %(receptor_pdb[re][0:31], x[i][0], x[i][1], x[i][2], receptor_pdb[re][54:]))
i+=1
fr.close()
os.system("cp re.pdb /home/cyppsp/cNMA/Example/Example1/Input/"+protein+'_r_u.pdb')
os.system("cp le.pdb /home/cyppsp/cNMA/Example/Example1/Input/"+protein+'_l_u.pdb')
os.system("cp /home/cyppsp/project_bayesian/zdock/benchmark/"+protein+"_r_b.pdb /home/cyppsp/cNMA/Example/Example1/Input/")
os.system("cp /home/cyppsp/project_bayesian/zdock/benchmark/"+protein+"_l_b.pdb /home/cyppsp/cNMA/Example/Example1/Input/")
os.system("/home/cyppsp/cNMA/Example/Example1/run_irmsd.sh "+protein)
t=
|
np.loadtxt("/home/cyppsp/cNMA/Example/Example1/Output/iRMSD.txt")
|
numpy.loadtxt
|
from update_functions import Update_Functions, Enum
import numpy as np
class NewUpdate(Enum):
LINE = "Line"
MODULUS="Modulus"
QUADRATIC="Quadratic"
CUBIC="Cubic"
MULTIROOT="MultiRoot"#Not used anymore.
INTERPOLATED="InterpolatedCubic"
class BF_Update_Functions(Update_Functions):
def __init__(self):
super().__init__()
self.add_function(NewUpdate.LINE,self.neighbours_line_update)
self.add_function(NewUpdate.MODULUS,self.neighbours_modulus_update)
self.add_function(NewUpdate.QUADRATIC,self.neighbours_quadratic_update)
self.add_function(NewUpdate.CUBIC,self.neighbours_cubic_update)
self.add_function(NewUpdate.MULTIROOT,self.neighbours_multiroot_update)
self.add_function(NewUpdate.INTERPOLATED,self.neighbours_super_update)
def neighbours_modulus_update(self,beliefs,inf_graph, rat_graph,**kwargs):
"""Applies the inverse-modulus update function as matrix multiplication.
For each agent, update their beliefs factoring the authority bias,
the confirmation-backfire factor and the beliefs of all the agents' neighbors.
"""
rat_graph=rat_graph.copy()
rat_graph=0.5*rat_graph+0.5
neighbours = [np.count_nonzero(inf_graph[:, i]) for i, _ in enumerate(beliefs)]
diff = np.ones((len(beliefs), 1)) @ np.asarray(beliefs)[np.newaxis]
diff = np.transpose(diff) - diff
sigs=np.where(diff>=0, 1, -1)
infs=sigs*inf_graph*(-np.abs(np.abs(diff)-rat_graph)+rat_graph)
preAns=np.add.reduce(infs) / neighbours
preAns+=beliefs
return np.clip(preAns,0,1)
def neighbours_line_update(self,beliefs,inf_graph, rat_graph,**kwargs):
"""Applies the rotated-line update function as matrix multiplication.
For each agent, update their beliefs factoring the authority bias,
the confirmation-backfire factor and the beliefs of all the agents' neighbors.
"""
rat_graph=rat_graph.copy()
neighbours = [np.count_nonzero(inf_graph[:, i]) for i, _ in enumerate(beliefs)]
diff = np.ones((len(beliefs), 1)) @ np.asarray(beliefs)[np.newaxis]
diff = np.transpose(diff) - diff
infs = inf_graph * rat_graph * diff
preAns=np.add.reduce(infs) / neighbours
preAns+=beliefs
return np.clip(preAns,0,1)
def neighbours_quadratic_update(self,beliefs,inf_graph, rat_graph,**kwargs):
"""Applies the inverse-quadratic update function as matrix multiplication.
For each agent, update their beliefs factoring the authority bias,
the confirmation-backfire factor and the beliefs of all the agents' neighbors.
"""
rat_graph=rat_graph.copy().T
rat_graph+=1
neighbours = [np.count_nonzero(inf_graph[:, i]) for i, _ in enumerate(beliefs)]
diff = np.ones((len(beliefs), 1)) @ np.asarray(beliefs)[np.newaxis]
diff =
|
np.transpose(diff)
|
numpy.transpose
|
from solver.solutionCost import SolutionCost
from solver.classesAndResources import ClassesAndResources
import numpy as np
class SolutionInstance:
def __init__(self, classesAndResources, meetByPeriodByDayByLocalBySubjectByGroup):
self.classesAndResources: ClassesAndResources = classesAndResources
self.meetByPeriodByDayByLocalBySubjectByGroup = meetByPeriodByDayByLocalBySubjectByGroup
self.meetByPeriodByDayBySpecialistByGroup = np.sum(self.meetByPeriodByDayByLocalBySubjectByGroup, axis=2) != 0
def equals(self, otherSolutionInstance):
return np.array_equal(self.meetByPeriodByDayByLocalBySubjectByGroup, otherSolutionInstance.meetByPeriodByDayByLocalBySubjectByGroup)
def assignGroupToSpecialistDayAndPeriod(self, specialist, day, period, local, oldGroupId, newGroupId):
meetByPeriodByDayByLocalBySubjectByGroup = np.copy(self.meetByPeriodByDayByLocalBySubjectByGroup)
meetByPeriodByDayByLocalBySubjectByGroup[[oldGroupId, newGroupId], specialist, local, day, period] = \
meetByPeriodByDayByLocalBySubjectByGroup[[newGroupId, oldGroupId], specialist, local, day, period]
return SolutionInstance(self.classesAndResources, meetByPeriodByDayByLocalBySubjectByGroup)
def assignLocalToSpecialistDayAndPeriod(self, specialist, day, period, group, oldLocalId, newLocalId):
meetByPeriodByDayByLocalBySubjectByGroup = np.copy(self.meetByPeriodByDayByLocalBySubjectByGroup)
meetByPeriodByDayByLocalBySubjectByGroup[group, specialist, [oldLocalId, newLocalId], day, period] = \
meetByPeriodByDayByLocalBySubjectByGroup[group, specialist, [newLocalId, oldLocalId], day, period]
return SolutionInstance(self.classesAndResources, meetByPeriodByDayByLocalBySubjectByGroup)
def removeSpecialistDayPeriodMeeting(self, specialist, day, period):
meetByPeriodByDayByLocalBySubjectByGroup = np.copy(self.meetByPeriodByDayByLocalBySubjectByGroup)
meetByPeriodByDayByLocalBySubjectByGroup[:, specialist, :, day, period] = False
return SolutionInstance(self.classesAndResources, meetByPeriodByDayByLocalBySubjectByGroup)
def addSpecialistDayPeriodMeetingWithGroupAtLocal(self, specialist, day, period, group, local):
meetByPeriodByDayByLocalBySubjectByGroup = np.copy(self.meetByPeriodByDayByLocalBySubjectByGroup)
meetByPeriodByDayByLocalBySubjectByGroup[group, specialist, local, day, period] = True
return SolutionInstance(self.classesAndResources, meetByPeriodByDayByLocalBySubjectByGroup)
def getTotalCost(self) -> SolutionCost:
hardConstraintViolationCount = self.getHardConstraintCost()
customHardCost = self.classesAndResources.getDepthCost(self, 0)
meetArgs = np.where(self.meetByPeriodByDayByLocalBySubjectByGroup)
premiseConstraintViolationCount = self.getPremiseConstraintCost(meetArgs)
customPremiseCost = self.classesAndResources.getDepthCost(self, 1)
(softiesConstraintViolationCount, softiesDetails) = self.getSoftConstraintCost(meetArgs)
customSoftCost = self.classesAndResources.getDepthCost(self, 2)
return SolutionCost(np.asarray([hardConstraintViolationCount + customHardCost,
premiseConstraintViolationCount + customPremiseCost,
softiesConstraintViolationCount + customSoftCost]),
softiesDetails)
def getHardConstraintCost(self):
hardConstraintViolationCount = 0
hardConstraintViolationCount += self.groupNeedsHardConstraintViolationCost()
hardConstraintViolationCount += self.singleSpecialistByGroupPeriodViolationCost()
hardConstraintViolationCount += self.singleGroupByFreeSpecialistPeriodViolationCost()
hardConstraintViolationCount += self.singleLocalOccupancyViolationCost()
# Locals only see groups and specialists they are supposed to
# Expected not to happen by neighbour generation
return hardConstraintViolationCount
def groupNeedsHardConstraintViolationCost(self):
# Every group need has been fulfilled
return np.sum(np.abs(
|
np.sum(self.meetByPeriodByDayBySpecialistByGroup, axis=(2, 3))
|
numpy.sum
|
import numpy as np
from igp2 import AgentState
from igp2.opendrive.map import Map
from igp2.trajectory import VelocityTrajectory
from igp2.util import Circle
from grit.core.goal_generator import GoalGenerator
def goal_in_list(goals, goal_type, goal_center):
return sum([g.goal_type == goal_type and np.allclose(g.goal.center, goal_center, atol=1) for g in goals]) == 1
def test_heckstrasse_north_west():
xodr = "../scenarios/maps/heckstrasse.xodr"
scenario_map = Map.parse_from_opendrive(xodr)
heading = np.deg2rad(-45)
speed = 5
time = 0
position = np.array((28.9, -21.9))
velocity = speed * np.array((np.cos(heading), np.sin(heading)))
acceleration = np.array((0, 0))
state = AgentState(time, position, velocity, acceleration, heading)
trajectory = VelocityTrajectory.from_agent_state(state)
goal_generator = GoalGenerator()
goals = goal_generator.generate(scenario_map, trajectory)
assert len(goals) == 2
assert goal_in_list(goals, 'straight-on', (61.9, -47.3))
assert goal_in_list(goals, 'turn-left', (60.5, -18.7))
def test_heckstrasse_south_east():
xodr = "../scenarios/maps/heckstrasse.xodr"
scenario_map = Map.parse_from_opendrive(xodr)
heading = np.deg2rad(135)
speed = 5
time = 0
position = np.array((68.7, -42.9))
velocity = speed * np.array((np.cos(heading), np.sin(heading)))
acceleration = np.array((0, 0))
state = AgentState(time, position, velocity, acceleration, heading)
trajectory = VelocityTrajectory.from_agent_state(state)
goal_generator = GoalGenerator()
goals = goal_generator.generate(scenario_map, trajectory)
assert len(goals) == 2
assert sum([g.goal_type == 'straight-on' and np.allclose(g.goal.center, (35.5, -17.5), atol=1) for g in goals]) == 1
assert sum([g.goal_type == 'turn-right' and np.allclose(g.goal.center, (60.5, -18.7), atol=1) for g in goals]) == 1
def test_heckstrasse_north_east():
xodr = "../scenarios/maps/heckstrasse.xodr"
scenario_map = Map.parse_from_opendrive(xodr)
heading = np.deg2rad(-135)
speed = 5
time = 0
position = np.array((60.4, -15.1))
velocity = speed * np.array((np.cos(heading), np.sin(heading)))
acceleration = np.array((0, 0))
state = AgentState(time, position, velocity, acceleration, heading)
trajectory = VelocityTrajectory.from_agent_state(state)
goal_generator = GoalGenerator()
goals = goal_generator.generate(scenario_map, trajectory)
assert len(goals) == 2
assert goal_in_list(goals, 'turn-left', (61.9, -47.3))
assert goal_in_list(goals, 'turn-right', (35.1, -17.4))
def test_bendplatz_south_west():
xodr = "../scenarios/maps/bendplatz.xodr"
scenario_map = Map.parse_from_opendrive(xodr)
heading = np.deg2rad(45)
speed = 5
time = 0
position = np.array((48.5, -43.8))
velocity = speed * np.array((
|
np.cos(heading)
|
numpy.cos
|
import numpy as np
from chainer import as_variable
from chainercb.util import RidgeRegression
from chainer.testing import assert_allclose
def test_predict():
r = RidgeRegression(6)
x = np.array([[1.0, 2.0, 3.0, -3.0, -2.0, -1.0],
[2.0, 3.0, 1.0, -1.0, -3.0, -2.0],
[-1.0, -2.0, -1.0, 1.0, 3.0, 1.0]])
y = np.array([1.0, 1.0, -1.0])
x = as_variable(x)
y = as_variable(y)
# No observations
assert_allclose(r.predict(x).data,
np.array([0.0, 0.0, 0.0]))
# Update once
r.update(x, y)
assert_allclose(r.predict(x).data,
np.array([0.99656349, 1.03779954, -0.90377945]))
# Update 100 times
for _ in range(100):
r.update(x, y)
assert_allclose(r.predict(x).data,
|
np.array([1.00018692, 1.00122452, -0.99799728])
|
numpy.array
|
import numpy as np
from scipy.fft import fftn,ifftn,fftshift
import h5py
from pathlib import Path
fileprefix_snapshot = 'snapdir_%03d/snapshot_%03d'
fileprefix_subhalo = 'groups_%03d/fof_subhalo_tab_%03d'
fileprefix_subhalo_desc = 'groups_%03d/subhalo_desc_%03d'
fileprefix_subhalo_prog = 'groups_%03d/subhalo_prog_%03d'
def gadget_to_particles(fileprefix, opts={'pos':True,'vel':True,'ID':False,'mass':True}):
'''
Read particles from GADGET HDF5 snapshot.
Parameters:
fileprefix: input file prefix (e.g., snapshot_000, not snapshot_000.0.hdf5)
opts: which fields to read and return
Returns:
pos: position array, shape (3,NP), comoving
vel: velocity array, shape (3,NP), peculiar
ID: ID array, shape (NP,)
mass: mass array, shape (NP,)
header: a dict with header info, use list(header) to see the fields
'''
filepath = [
Path(fileprefix + '.hdf5'),
Path(fileprefix + '.0.hdf5'),
Path(fileprefix),
]
if filepath[0].is_file():
filebase = fileprefix + '.hdf5'
numfiles = 1
elif filepath[1].is_file():
filebase = fileprefix + '.%d.hdf5'
numfiles = 2
elif filepath[2].is_file():
# exact filename was passed - will cause error if >1 files, otherwise fine
filebase = fileprefix
numfiles = 1
fileinst = 0
pinst = 0
while fileinst < numfiles:
if numfiles == 1:
filename = filebase
else:
filename = filebase%fileinst
with h5py.File(filename, 'r') as f:
print('reading %s'%filename)
header = dict(f['Header'].attrs)
MassTable = header['MassTable']
ScaleFactor = 1./(1+header['Redshift'])
NP = header['NumPart_ThisFile']
NPtot = header['NumPart_Total']
numfiles = header['NumFilesPerSnapshot']
if fileinst == 0:
# allocate full-sized memory blocks in advance, for efficiency
if opts.get('pos'): pos = np.zeros((3,np.sum(NPtot)),dtype=np.float32)
if opts.get('vel'): vel = np.zeros((3,np.sum(NPtot)),dtype=np.float32)
if opts.get('mass'): mass = np.zeros(np.sum(NPtot),dtype=np.float32)
if opts.get('ID'): ID = np.zeros(np.sum(NPtot),dtype=np.uint32)
for typ in range(len(NPtot)):
NPtyp = int(NP[typ])
if NPtyp == 0:
continue
if opts.get('pos'): pos[:,pinst:pinst+NPtyp] = np.array(f['PartType%d/Coordinates'%typ]).T
if opts.get('vel'): vel[:,pinst:pinst+NPtyp] = np.array(f['PartType%d/Velocities'%typ]).T * np.sqrt(ScaleFactor)
if opts.get('mass'):
if MassTable[typ] == 0.:
mass[pinst:pinst+NPtyp] = np.array(f['PartType%d/Masses'%typ])
else:
mass[pinst:pinst+NPtyp] = np.full(NPtyp,MassTable[typ])
if opts.get('ID'): ID[pinst:pinst+NPtyp] = np.array(f['PartType%d/ParticleIDs'%typ])
pinst += NPtyp
fileinst += 1
ret = []
if opts.get('pos'): ret += [pos]
if opts.get('vel'): ret += [vel]
if opts.get('mass'): ret += [mass]
if opts.get('ID'): ret += [ID]
ret += [header]
return tuple(ret)
def fof_to_halos(fileprefix,opts={'pos':True,'vel':True,'mass':True}):
'''
Read halos from GADGET HDF5 FOF file.
Parameters:
fileprefix: input file prefix (e.g., fof_tab_000, not fof_tab_000.0.hdf5)
opts: which fields to read and return
Returns:
pos: position array, shape (3,NH), comoving
vel: velocity array, shape (3,NH), peculiar
mass: mass array, shape (NH,)
header: a dict with header info, use list(header) to see the fields
'''
filepath = [
Path(fileprefix + '.hdf5'),
Path(fileprefix + '.0.hdf5'),
Path(fileprefix),
]
if filepath[0].is_file():
filebase = fileprefix + '.hdf5'
numfiles = 1
elif filepath[1].is_file():
filebase = fileprefix + '.%d.hdf5'
numfiles = 2
elif filepath[2].is_file():
# exact filename was passed - will cause error if >1 files, otherwise fine
filebase = fileprefix
numfiles = 1
fileinst = 0
if opts.get('pos'): pos = []
if opts.get('vel'): vel = []
if opts.get('mass'): mass = []
while fileinst < numfiles:
if numfiles == 1:
filename = filebase
else:
filename = filebase%fileinst
with h5py.File(filename, 'r') as f:
print('reading %s'%filename)
header = dict(f['Header'].attrs)
ScaleFactor = 1./(1+header['Redshift'])
numfiles = header['NumFiles']
if header['Ngroups_Total'] == 0:
if opts.get('pos'): pos = [[]]
if opts.get('vel'): vel = [[]]
if opts.get('mass'): mass = [[]]
break
if header['Ngroups_ThisFile'] > 0:
if opts.get('pos'): pos += [np.array(f['Group/GroupPos']).T]
if opts.get('vel'): vel += [np.array(f['Group/GroupVel']).T * np.sqrt(ScaleFactor)]
if opts.get('mass'): mass += [np.array(f['Group/GroupMass'])]
fileinst += 1
ret = []
if opts.get('pos'): ret += [np.concatenate(pos,axis=1)]
if opts.get('vel'): ret += [np.concatenate(vel,axis=1)]
if opts.get('mass'): ret += [np.concatenate(mass)]
ret += [header]
return tuple(ret)
def cic_bin(x,BoxSize,GridSize,weights=1,density=True):
'''
Bin particles into a density field using cloud-in-cell method
Parameters:
x: 3D positions, shape (3,NP) where NP is the number of particles
BoxSize: size of periodic region
GridSize: resolution of output density field, per dimension
weights: weight (e.g. mass) to assign to each particle, either a number or
an array of length NP
density: If False, output the total mass within each cell. If True, output
mass/volume.
Returns:
field of shape (GridSize,GridSize,GridSize)
bin edges
'''
NP = x.shape[1]
N = GridSize
dx = BoxSize / GridSize
bins = dx * np.arange(N+1)
# idea:
# i and i1 are indices of the two adjacent cells (in each dimension)
# f is the fraction from i to i1 where particle lies
# shapes are (3,NP)
f = x / dx
f[f < 0.5] += N
f[f >= N+0.5] -= N
i = (f-0.5).astype(np.int32)
f -= i + 0.5
i1 = i+1
i[i<0] += N
i[i>=N] -= N
i1[i1<0] += N
i1[i1>=N] -= N
# now appropriately add each particle into the 8 adjacent cells
hist = np.zeros((N,N,N))
np.add.at(hist,(i[0],i[1],i[2]),(1-f[0])*(1-f[1])*(1-f[2])*weights)
np.add.at(hist,(i1[0],i[1],i[2]),f[0]*(1-f[1])*(1-f[2])*weights)
np.add.at(hist,(i[0],i1[1],i[2]),(1-f[0])*f[1]*(1-f[2])*weights)
np.add.at(hist,(i[0],i[1],i1[2]),(1-f[0])*(1-f[1])*f[2]*weights)
np.add.at(hist,(i1[0],i1[1],i[2]),f[0]*f[1]*(1-f[2])*weights)
np.add.at(hist,(i[0],i1[1],i1[2]),(1-f[0])*f[1]*f[2]*weights)
np.add.at(hist,(i1[0],i[1],i1[2]),f[0]*(1-f[1])*f[2]*weights)
np.add.at(hist,(i1[0],i1[1],i1[2]),f[0]*f[1]*f[2]*weights)
if density:
hist /= dx**3
return hist,bins
def power_spectrum(delta,BoxSize,bins=None):
'''
Find spherically averaged power spectrum of density field
Parameters:
delta: input density field
BoxSize: width of periodic box
bins: desired k bin edges
Returns:
k: array of wavenumbers
P(k): array comprising the power spectrum as a function of k
'''
GridSize = delta.shape[0]
dk = 2*np.pi/BoxSize
# radial bins for k
if bins is None:
# corner of cube is at distance np.sqrt(3)/2*length from center
bins = np.arange(1,int((GridSize+1) * np.sqrt(3)/2)) * dk
# get wavenumbers associated with k-space grid
k = ((np.indices(delta.shape)+GridSize//2)%GridSize-GridSize//2) * dk
k_mag = np.sqrt(np.sum(k**2,axis=0))
# Fourier transform and get power spectrum
pk = np.abs(fftn(delta,overwrite_x=True))**2*BoxSize**3/GridSize**6
hist_pk,_ = np.histogram(k_mag,bins=bins,weights=pk)
hist_ct,_ = np.histogram(k_mag,bins=bins)
hist_k,_ = np.histogram(k_mag,bins=bins,weights=k_mag)
return hist_k/hist_ct, hist_pk/hist_ct
def density_profile(pos,mass,bins=None,BoxSize=None):
'''
Spherically averaged density profile centered at position (0,0,0)
Parameters:
pos: 3D positions relative to center, shape (3,NP) where NP is the number of particles
mass: masses of particles, shape (NP)
bins: radial bin edges
BoxSize: size of periodic region (None if not periodic)
Returns:
radius, density
'''
NP = pos.shape[1]
# shift periodic box
if BoxSize is not None:
pos[pos >= 0.5*BoxSize] -= BoxSize
pos[pos < -0.5*BoxSize] -= BoxSize
# radii
r = np.sqrt(np.sum(pos**2,axis=0))
# radial bins
if bins is None:
rmin = np.sort(r)[100]/10
rmax = np.max(r)
bins = np.geomspace(rmin,rmax,50)
bin_volume = 4./3 * np.pi * (bins[1:]**3 - bins[:-1]**3)
hist_mass,_ = np.histogram(r,bins=bins,weights=mass)
return 0.5*(bins[1:]+bins[:-1]), hist_mass / bin_volume
def subhalo_tracing_data(snapshot_number,subhalo_number):
'''
Get a subhalo's mass, position, velocity, progenitor, descendant,
and other tracking information.
Parameters:
snapshot_number
subhalo_number
Returns:
prog: best-scoring progenitor
desc: best-scoring descendant
pos: subhalo (comoving) position, shape (3,)
vel: subhalo (peculiar) velocity, shape (3,)
mass: subhalo mass
ID: dict with group ID, subhalo ID, and most bound particle ID
header: a dict with header info, use list(header) to see the fields
'''
prefix_sub = fileprefix_subhalo%(snapshot_number,snapshot_number)
prefix_desc = fileprefix_subhalo_desc%(snapshot_number,snapshot_number)
prefix_prog = fileprefix_subhalo_prog%(snapshot_number,snapshot_number)
filepath = [
Path(prefix_sub + '.hdf5'),
Path(prefix_sub + '.0.hdf5'),
]
if filepath[0].is_file():
filebase_sub = prefix_sub + '.hdf5'
filebase_desc = prefix_desc + '.hdf5'
filebase_prog = prefix_prog + '.hdf5'
numfiles = 1
elif filepath[1].is_file():
filebase_sub = prefix_sub + '.%d.hdf5'
filebase_desc = prefix_desc + '.%d.hdf5'
filebase_prog = prefix_prog + '.%d.hdf5'
numfiles = 2
prog, desc, pos, vel, mass, ID, header = -1, -1, np.zeros(3), np.zeros(3), 0., {}, {}
fileinst = 0
hinst = 0
ginst = 0
while fileinst < numfiles:
if numfiles == 1:
filename_sub = filebase_sub
filename_desc = filebase_desc
filename_prog = filebase_prog
else:
filename_sub = filebase_sub%fileinst
filename_desc = filebase_desc%fileinst
filename_prog = filebase_prog%fileinst
with h5py.File(filename_sub, 'r') as f:
print('reading %s'%filename_sub)
header = dict(f['Header'].attrs)
ScaleFactor = 1./(1+header['Redshift'])
numfiles = header['NumFiles']
if hinst + header['Nsubhalos_ThisFile'] > subhalo_number:
index = subhalo_number - hinst
pos = np.array(f['Subhalo/SubhaloPos'])[index]
vel = np.array(f['Subhalo/SubhaloVel'])[index] * np.sqrt(ScaleFactor)
ID = {'group':np.array(f['Subhalo/SubhaloGroupNr'])[index],
'subhalo':subhalo_number,
'particle':np.array(f['Subhalo/SubhaloIDMostbound'])[index]}
if ID['group']>=ginst:
mass = {'group':np.array(f['Group/GroupMass'])[ID['group']-ginst],
'subhalo':np.array(f['Subhalo/SubhaloMass'])[index]}
else:
ginst2 = ginst
fileinst2 = fileinst
while ID['group'] < ginst2:
fileinst2 -= 1
filename_sub2 = filebase_sub%fileinst2
with h5py.File(filename_sub2, 'r') as f2:
print('reading %s'%filename_sub2)
header2 = dict(f2['Header'].attrs)
ginst2 -= int(header2['Ngroups_ThisFile'])
if ID['group'] >= ginst2:
mass = {'group':np.array(f2['Group/GroupMass'])[ID['group']-ginst2],
'subhalo':np.array(f['Subhalo/SubhaloMass'])[index]}
try:
with h5py.File(filename_desc, 'r') as fd:
print('reading %s'%filename_desc)
if np.array(fd['Subhalo/SubhaloNr'])[index] != subhalo_number:
raise Exception('halo number mismatch, %d != %d'%(np.array(fd['Subhalo/SubhaloNr'])[index],subhalo_number))
desc = np.array(fd['Subhalo/DescSubhaloNr'])[index]
except Exception as e:
print(str(e))
desc = -1
try:
with h5py.File(filename_prog, 'r') as fp:
print('reading %s'%filename_prog)
if np.array(fp['Subhalo/SubhaloNr'])[index] != subhalo_number:
raise Exception('halo number mismatch, %d != %d'%(np.array(fp['Subhalo/SubhaloNr'])[index],subhalo_number))
prog = np.array(fp['Subhalo/ProgSubhaloNr'])[index]
except Exception as e:
print(str(e))
prog = -1
break
hinst += int(header['Nsubhalos_ThisFile'])
ginst += int(header['Ngroups_ThisFile'])
fileinst += 1
else:
print('Warning: halo %d not found'%subhalo_number)
return prog, desc, pos, vel, mass, ID, header
def trace_subhalo(snapshot_number,subhalo_number):
'''
Trace a subhalo's position, mass, and other tracking information across snapshots.
Parameters:
snapshot_number
subhalo_number
Returns:
num: snapshot number
time: scale factor array, shape (NT,)
pos: position array, shape (NT,3), comoving
vel: velocity array, shape (NT,3), peculiar
mass: mass array, shape (NT,)
group: host group, shape (NT,)
ID: list of dicts with group ID, subhalo ID, and most bound particle ID; shape (NT,)
'''
prog, desc, pos_, vel_, mass_, ID_, header_ = subhalo_tracing_data(snapshot_number,subhalo_number)
print('halo: %d in snapshot %d'%(subhalo_number,snapshot_number))
pos = [pos_]
vel = [vel_]
mass = [mass_]
ID = [ID_]
time = [header_['Time']]
num = [snapshot_number]
shift = 0
while prog >= 0:
shift += 1
print('progenitor: %d in snapshot %d'%(prog,snapshot_number-shift))
prog, _, pos_, vel_, mass_, ID_, header_ = subhalo_tracing_data(snapshot_number-shift,prog)
pos += [pos_]
vel += [vel_]
mass += [mass_]
ID += [ID_]
time += [header_['Time']]
num += [snapshot_number-shift]
pos = pos[::-1]
vel = vel[::-1]
mass = mass[::-1]
ID = ID[::-1]
time = time[::-1]
num = num[::-1]
shift = 0
while desc >= 0:
shift += 1
print('descendant: %d in snapshot %d'%(desc,snapshot_number+shift))
_, desc, pos_, vel_, mass_, ID_, header_ = subhalo_tracing_data(snapshot_number+shift,desc)
pos += [pos_]
vel += [vel_]
mass += [mass_]
ID += [ID_]
time += [header_['Time']]
num += [snapshot_number+shift]
return np.array(num), np.array(time), np.array(pos), np.array(vel), mass, ID
def subhalo_group_data(fileprefix,opts={'mass':True,'len':False,'pos':False},parttype=None):
'''
Read halos from GADGET HDF5 FOF+subhalo file and return data relevant to group membership.
Parameters:
fileprefix: input file prefix (e.g., fof_subhalo_tab_000, not fof_subhalo_tab_000.0.hdf5)
opts: which fields to read and return
parttype: if not None, consider only particles of the given type for certain outputs
Returns:
group: host group number
rank: rank of subhalo within host group
parentrank: rank of parent subhalo within host group
mass: subhalo mass
groupmass: mass of host group
length: subhalo particle count
grouplength: host group particle count
pos: subhalo position (NH,3)
grouppos: host group position (NH,3)
header: a dict with header info, use list(header) to see the fields
'''
filepath = [
Path(fileprefix + '.hdf5'),
Path(fileprefix + '.0.hdf5'),
Path(fileprefix),
]
if filepath[0].is_file():
filebase = fileprefix + '.hdf5'
numfiles = 1
elif filepath[1].is_file():
filebase = fileprefix + '.%d.hdf5'
numfiles = 2
elif filepath[2].is_file():
# exact filename was passed - will cause error if >1 files, otherwise fine
filebase = fileprefix
numfiles = 1
fileinst = 0
group = []
rank = []
parentrank = []
if opts.get('mass'):
mass = []
_groupmass = []
if opts.get('len'):
length = []
_grouplength = []
if opts.get('pos'):
pos = []
_grouppos = []
while fileinst < numfiles:
if numfiles == 1:
filename = filebase
else:
filename = filebase%fileinst
with h5py.File(filename, 'r') as f:
print('reading %s'%filename)
header = dict(f['Header'].attrs)
ScaleFactor = 1./(1+header['Redshift'])
numfiles = header['NumFiles']
group += [np.array(f['Subhalo/SubhaloGroupNr'])]
rank += [np.array(f['Subhalo/SubhaloRankInGr'])]
parentrank += [np.array(f['Subhalo/SubhaloParentRank'])]
if parttype is None:
if opts.get('mass'):
mass += [np.array(f['Subhalo/SubhaloMass'])]
_groupmass += [np.array(f['Group/GroupMass'])]
if opts.get('len'):
length += [np.array(f['Subhalo/SubhaloLen'])]
_grouplength += [np.array(f['Group/GroupLen'])]
else:
if opts.get('mass'):
mass += [np.array(f['Subhalo/SubhaloMassType'][:,parttype])]
_groupmass += [np.array(f['Group/GroupMassType'][:,parttype])]
if opts.get('len'):
length += [np.array(f['Subhalo/SubhaloLenType'][:,parttype])]
_grouplength += [np.array(f['Group/GroupLenType'][:,parttype])]
if opts.get('pos'):
pos += [np.array(f['Subhalo/SubhaloPos'])]
_grouppos += [np.array(f['Group/GroupPos'])]
fileinst += 1
group = np.concatenate(group)
ret = [group, np.concatenate(rank), np.concatenate(parentrank)]
if opts.get('mass'):
groupmass = np.concatenate(_groupmass)[group]
ret += [np.concatenate(mass), groupmass]
if opts.get('len'):
grouplength = np.concatenate(_grouplength)[group]
ret += [np.concatenate(length), grouplength]
if opts.get('pos'):
grouppos = np.concatenate(_grouppos,axis=0)[group]
ret += [np.concatenate(pos), grouppos]
return tuple(ret + [header])
def group_extent(fileprefix,group,size_definition='TopHat200'):
'''
Return position and extent of a group from a GADGET HDF5 FOF file (SUBFIND required).
Parameters:
fileprefix: input file prefix (e.g., fof_tab_000, not fof_tab_000.0.hdf5)
group: group number
size_definition: 'Crit200', 'Crit500', 'Mean200', or 'TopHat200' (default)
Returns:
pos: shape (3,), comoving
radius
header: a dict with header info, use list(header) to see the fields
'''
filepath = [
Path(fileprefix + '.hdf5'),
Path(fileprefix + '.0.hdf5'),
Path(fileprefix),
]
if filepath[0].is_file():
filebase = fileprefix + '.hdf5'
numfiles = 1
elif filepath[1].is_file():
filebase = fileprefix + '.%d.hdf5'
numfiles = 2
elif filepath[2].is_file():
# exact filename was passed - will cause error if >1 files, otherwise fine
filebase = fileprefix
numfiles = 1
fileinst = 0
hinst = 0
while fileinst < numfiles:
if numfiles == 1:
filename = filebase
else:
filename = filebase%fileinst
with h5py.File(filename, 'r') as f:
print('reading %s'%filename)
header = dict(f['Header'].attrs)
numfiles = header['NumFiles']
if hinst + header['Ngroups_ThisFile'] > group:
index = group - hinst
pos = np.array(f['Group/GroupPos'])[index]
radius =
|
np.array(f['Group/Group_R_'+size_definition])
|
numpy.array
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import numpy as np
import os
import glog as log
import copy
from __future__ import division
import open3d as o3d
from open3d import JVisualizer
import pandas as pd
from evaluation.tools.mesh import Mesh
from evaluation.tools.mesh_evaluator import MeshEvaluator
# Rotation matrices:
# East North Up (ENU) frame to Unity's world frame of reference
# fmt: off
enu_R_unity = np.array([[1, 0, 0],
[0, 0, 1],
[0, 1, 0]])
# fmt: on
unity_R_enu =
|
np.transpose(enu_R_unity)
|
numpy.transpose
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None):
with self.test_session():
ans = array_ops.where(x)
self.assertEqual([None, x.ndim], ans.get_shape().as_list())
if expected_err_re is None:
tf_ans = ans.eval()
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
def testWrongNumbers(self):
with self.test_session():
with self.assertRaises(ValueError):
array_ops.where([False, True], [1, 2], None)
with self.assertRaises(ValueError):
array_ops.where([False, True], None, [1, 2])
def testBasicMat(self):
x =
|
np.asarray([[True, False], [True, False]])
|
numpy.asarray
|
import copy
import numpy as np
import openmdao.api as om
import wisdem.commonse.utilities as util
import wisdem.commonse.utilization_constraints as util_con
from wisdem.commonse.environment import TowerSoil
from wisdem.commonse.cross_sections import CylindricalShellProperties
from wisdem.commonse.wind_wave_drag import CylinderEnvironment
from wisdem.commonse.vertical_cylinder import (
NFREQ,
RIGID,
CylinderMass,
CylinderFrame3DD,
CylinderDiscretization,
get_nfull,
)
NPTS_SOIL = 10
# -----------------
# Components
# -----------------
class DiscretizationYAML(om.ExplicitComponent):
"""
Process some of the tower YAML inputs.
Parameters
----------
tower_s : numpy array[n_height_tow]
1D array of the non-dimensional grid defined along the tower axis (0-tower base,
1-tower top)
tower_layer_materials : list of strings
1D array of the names of the materials of each layer modeled in the tower
structure.
tower_layer_thickness : numpy array[n_layers_tow, n_height_tow], [m]
2D array of the thickness of the layers of the tower structure. The first
dimension represents each layer, the second dimension represents each piecewise-
constant entry of the tower sections.
tower_height : float, [m]
Scalar of the tower height computed along the z axis.
tower_outer_diameter_in : numpy array[n_height_tow], [m]
cylinder diameter at corresponding locations
tower_outfitting_factor : float
Multiplier that accounts for secondary structure mass inside of cylinder
monopile_s : numpy array[n_height_mon]
1D array of the non-dimensional grid defined along the tower axis (0-tower base,
1-tower top)
monopile_layer_materials : list of strings
1D array of the names of the materials of each layer modeled in the tower
structure.
monopile_layer_thickness : numpy array[n_layers_mon, n_height_mon], [m]
2D array of the thickness of the layers of the tower structure. The first
dimension represents each layer, the second dimension represents each piecewise-
constant entry of the tower sections.
monopile_height : float, [m]
Scalar of the tower height computed along the z axis.
monopile_outer_diameter_in : numpy array[n_height_tow], [m]
cylinder diameter at corresponding locations
monopile_outfitting_factor : float
Multiplier that accounts for secondary structure mass inside of cylinder
material_names : list of strings
1D array of names of materials.
E_mat : numpy array[n_mat, 3], [Pa]
2D array of the Youngs moduli of the materials. Each row represents a material,
the three columns represent E11, E22 and E33.
G_mat : numpy array[n_mat, 3], [Pa]
2D array of the shear moduli of the materials. Each row represents a material,
the three columns represent G12, G13 and G23.
sigma_y_mat : numpy array[n_mat], [Pa]
2D array of the yield strength of the materials. Each row represents a material,
the three columns represent Xt12, Xt13 and Xt23.
rho_mat : numpy array[n_mat], [kg/m**3]
1D array of the density of the materials. For composites, this is the density of
the laminate.
unit_cost_mat : numpy array[n_mat], [USD/kg]
1D array of the unit costs of the materials.
Returns
-------
tower_section_height : numpy array[n_height-1], [m]
parameterized section heights along cylinder
tower_outer_diameter : numpy array[n_height], [m]
cylinder diameter at corresponding locations
tower_wall_thickness : numpy array[n_height-1], [m]
shell thickness at corresponding locations
outfitting_factor : numpy array[n_height-1]
Multiplier that accounts for secondary structure mass inside of cylinder
E : numpy array[n_height-1], [Pa]
Isotropic Youngs modulus of the materials along the tower sections.
G : numpy array[n_height-1], [Pa]
Isotropic shear modulus of the materials along the tower sections.
sigma_y : numpy array[n_height-1], [Pa]
Isotropic yield strength of the materials along the tower sections.
rho : numpy array[n_height-1], [kg/m**3]
Density of the materials along the tower sections.
unit_cost : numpy array[n_height-1], [USD/kg]
Unit costs of the materials along the tower sections.
outfitting_factor : numpy array[n_height-1]
Additional outfitting multiplier in each section
"""
def initialize(self):
self.options.declare("n_height_tower")
self.options.declare("n_height_monopile")
self.options.declare("n_layers_tower")
self.options.declare("n_layers_monopile")
self.options.declare("n_mat")
def setup(self):
n_height_tow = self.options["n_height_tower"]
n_height_mon = self.options["n_height_monopile"]
n_layers_tow = self.options["n_layers_tower"]
n_layers_mon = self.options["n_layers_monopile"]
n_mat = self.options["n_mat"]
if n_height_mon > 0:
n_height = n_height_tow + n_height_mon - 1 # Should have one overlapping point
else:
n_height = n_height_tow
# Inputs here are the outputs from the Tower component in load_IEA_yaml
# TODO: Use reference axis and curvature, s, instead of assuming everything is vertical on z
self.add_input("tower_s", val=np.zeros(n_height_tow))
self.add_discrete_input("tower_layer_materials", val=n_layers_tow * [""])
self.add_input("tower_layer_thickness", val=np.zeros((n_layers_tow, n_height_tow)), units="m")
self.add_input("tower_height", val=0.0, units="m")
self.add_input("tower_foundation_height", val=0.0, units="m")
self.add_input("tower_outer_diameter_in", np.zeros(n_height_tow), units="m")
self.add_input("tower_outfitting_factor", val=0.0)
self.add_input("monopile_s", val=np.zeros(n_height_mon))
self.add_discrete_input("monopile_layer_materials", val=n_layers_tow * [""])
self.add_input("monopile_layer_thickness", val=np.zeros((n_layers_mon, n_height_mon)), units="m")
self.add_input("monopile_foundation_height", val=0.0, units="m")
self.add_input("monopile_height", val=0.0, units="m")
self.add_input("monopile_outer_diameter_in", np.zeros(n_height_mon), units="m")
self.add_input("monopile_outfitting_factor", val=0.0)
self.add_discrete_input("material_names", val=n_mat * [""])
self.add_input("water_depth", val=0.0, units="m")
self.add_input("E_mat", val=np.zeros([n_mat, 3]), units="Pa")
self.add_input("G_mat", val=np.zeros([n_mat, 3]), units="Pa")
self.add_input("sigma_y_mat", val=np.zeros(n_mat), units="Pa")
self.add_input("rho_mat", val=np.zeros(n_mat), units="kg/m**3")
self.add_input("unit_cost_mat", val=np.zeros(n_mat), units="USD/kg")
self.add_output("tower_section_height", val=np.zeros(n_height - 1), units="m")
self.add_output("tower_outer_diameter", val=np.zeros(n_height), units="m")
self.add_output("tower_wall_thickness", val=np.zeros(n_height - 1), units="m")
self.add_output("transition_piece_height", 0.0, units="m")
self.add_output("suctionpile_depth", 0.0, units="m")
self.add_output("outfitting_factor", val=np.zeros(n_height - 1))
self.add_output("E", val=np.zeros(n_height - 1), units="Pa")
self.add_output("G", val=np.zeros(n_height - 1), units="Pa")
self.add_output("sigma_y", val=np.zeros(n_height - 1), units="Pa")
self.add_output("rho", val=np.zeros(n_height - 1), units="kg/m**3")
self.add_output("unit_cost", val=np.zeros(n_height - 1), units="USD/kg")
self.add_output("z_start", 0.0, units="m")
# self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack dimensions
n_height_tow = self.options["n_height_tower"]
n_height_mon = self.options["n_height_monopile"]
n_layers_tow = self.options["n_layers_tower"]
n_layers_mon = self.options["n_layers_monopile"]
n_height = n_height_tow if n_height_mon == 0 else n_height_tow + n_height_mon - 1
# Unpack values
h_mon = inputs["monopile_height"]
h_tow = inputs["tower_height"]
s_mon = inputs["monopile_s"]
s_tow = inputs["tower_s"]
lthick_mon = inputs["monopile_layer_thickness"]
lthick_tow = inputs["tower_layer_thickness"]
lmat_mon = copy.copy(discrete_inputs["monopile_layer_materials"])
lmat_tow = copy.copy(discrete_inputs["tower_layer_materials"])
fh_tow = inputs["tower_foundation_height"]
fh_mon = inputs["monopile_foundation_height"]
water_depth = inputs["water_depth"]
# Enforce constant tower thickness per section, assuming rolling of a flat steel plate
# Might not have to assume this with concrete, but can account for it in input values if necessary
lthick_tow = 0.5 * (lthick_tow[:, :-1] + lthick_tow[:, 1:])
if n_height_mon > 0:
lthick_mon = 0.5 * (lthick_mon[:, :-1] + lthick_mon[:, 1:])
outputs["transition_piece_height"] = fh_tow
if n_height_mon > 0:
if np.abs(fh_tow - fh_mon - h_mon) > 1.0:
print(
"WARNING: Monopile length is not consistent with transition piece height and monopile base height\n",
" Determining new base height value . . .",
)
outputs["z_start"] = fh_tow - h_mon
pile = h_mon - fh_tow - water_depth
outputs["suctionpile_depth"] = pile
# Ensure that we have only one segment for pile, a current limitation
if pile > 0:
s1 = pile / h_mon
icheck = np.where(s_mon > s1 + 1e-3)[0][0]
s_mon = np.r_[0.0, np.linspace(s1, s_mon[icheck], icheck).flatten(), s_mon[(icheck + 1) :].flatten()]
# Last monopile point and first tower point are the same
outputs["tower_section_height"] = np.r_[np.diff(h_mon * s_mon), np.diff(h_tow * s_tow)]
outputs["outfitting_factor"] = np.r_[
inputs["monopile_outfitting_factor"] * np.ones(n_height_mon - 1),
inputs["tower_outfitting_factor"] * np.ones(n_height_tow - 1),
]
outputs["tower_outer_diameter"] = np.r_[
inputs["monopile_outer_diameter_in"], inputs["tower_outer_diameter_in"][1:]
]
# Combine layers into one structure
layer_mat = []
twall = np.zeros((1, n_height - 1))
for k in range(n_layers_mon):
ilayer = np.zeros(n_height - 1)
ilayer[: (n_height_mon - 1)] = lthick_mon[k, :]
imat_mon = lmat_mon[k]
layer_mat.append(imat_mon)
if imat_mon in lmat_tow:
ktow = lmat_tow.index(imat_mon)
ilayer[(n_height_mon - 1) :] = lthick_tow[ktow, :]
# Remove from listing so we don't double count later
lmat_tow.pop(ktow)
lthick_tow = np.delete(lthick_tow, [ktow], axis=0)
twall = np.vstack((twall, ilayer))
# If there any uncounted tower layers, add them in
n_layers_tow = len(lmat_tow)
for k in range(n_layers_tow):
ilayer = np.zeros(n_height - 1)
ilayer[(n_height_mon - 1) :] = lthick_tow[k, :]
twall = np.vstack((twall, ilayer))
imat = lmat_tow[k]
layer_mat.append(imat)
twall = np.delete(twall, [0], axis=0)
outputs["tower_wall_thickness"] = np.sum(twall, axis=0)
else:
outputs["tower_section_height"] = np.diff(h_tow * s_tow)
outputs["tower_wall_thickness"] = np.sum(lthick_tow, axis=0)
outputs["outfitting_factor"] = inputs["tower_outfitting_factor"] * np.ones(n_height - 1)
outputs["tower_outer_diameter"] = inputs["tower_outer_diameter_in"]
twall = lthick_tow
layer_mat = discrete_inputs["tower_layer_materials"]
outputs["z_start"] = fh_tow
outputs["suctionpile_depth"] = 0.0
# Check to make sure we have good values
if np.any(outputs["tower_section_height"] <= 0.0):
raise ValueError("Section height values must be greater than zero, " + str(outputs["tower_section_height"]))
if np.any(outputs["tower_wall_thickness"] <= 0.0):
raise ValueError("Wall thickness values must be greater than zero, " + str(outputs["tower_wall_thickness"]))
if np.any(outputs["tower_outer_diameter"] <= 0.0):
raise ValueError("Diameter values must be greater than zero, " + str(outputs["tower_outer_diameter"]))
# DETERMINE MATERIAL PROPERTIES IN EACH SECTION
# Convert to isotropic material
E = np.mean(inputs["E_mat"], axis=1)
G = np.mean(inputs["G_mat"], axis=1)
sigy = inputs["sigma_y_mat"]
rho = inputs["rho_mat"]
cost = inputs["unit_cost_mat"]
mat_names = discrete_inputs["material_names"]
# Initialize sectional data
E_param = np.zeros(twall.shape)
G_param = np.zeros(twall.shape)
sigy_param = np.zeros(twall.shape)
rho_param = np.zeros(n_height - 1)
cost_param = np.zeros(n_height - 1)
# Loop over materials and associate it with its thickness
for k in range(len(layer_mat)):
# Get the material name for this layer
iname = layer_mat[k]
# Get the index into the material list
imat = mat_names.index(iname)
imass = rho[imat] * twall[k, :]
# For density, take mass weighted layer
rho_param += imass
# For cost, take mass weighted layer
cost_param += imass * cost[imat]
# Store the value associated with this thickness
E_param[k, :] = E[imat]
G_param[k, :] = G[imat]
sigy_param[k, :] = sigy[imat]
# Mass weighted cost (should really weight by radius too)
cost_param /= rho_param
# Thickness weighted density (should really weight by radius too)
rho_param /= twall.sum(axis=0)
# Mixtures of material properties: https://en.wikipedia.org/wiki/Rule_of_mixtures
# Volume fraction
vol_frac = twall / twall.sum(axis=0)[np.newaxis, :]
# Average of upper and lower bounds
E_param = 0.5 * np.sum(vol_frac * E_param, axis=0) + 0.5 / np.sum(vol_frac / E_param, axis=0)
G_param = 0.5 * np.sum(vol_frac * G_param, axis=0) + 0.5 / np.sum(vol_frac / G_param, axis=0)
sigy_param = 0.5 * np.sum(vol_frac * sigy_param, axis=0) + 0.5 / np.sum(vol_frac / sigy_param, axis=0)
# Store values
outputs["E"] = E_param
outputs["G"] = G_param
outputs["rho"] = rho_param
outputs["sigma_y"] = sigy_param
outputs["unit_cost"] = cost_param
class TowerDiscretization(om.ExplicitComponent):
"""
Compute the full arrays for some measures along the tower by interpolating.
Parameters
----------
hub_height : float, [m]
diameter at tower base
z_param : numpy array[n_height], [m]
parameterized locations along tower, linear lofting between
z_full : numpy array[nFull], [m]
parameterized locations along tower, linear lofting between
rho : numpy array[n_height-1], [kg/m**3]
Density of the materials along the tower sections.
unit_cost : numpy array[n_height-1], [USD/kg]
Unit costs of the materials along the tower sections.
outfitting_factor : numpy array[n_height-1]
Multiplier that accounts for secondary structure mass inside of cylinder
E : numpy array[n_height-1], [Pa]
Isotropic Youngs modulus of the materials along the tower sections.
G : numpy array[n_height-1], [Pa]
Isotropic shear modulus of the materials along the tower sections.
sigma_y : numpy array[n_height-1], [Pa]
Isotropic yield strength of the materials along the tower sections.
Az : numpy array[nFull-1], [m**2]
cross-sectional area
Jz : numpy array[nFull-1], [m**4]
polar moment of inertia
Ixx : numpy array[nFull-1], [m**4]
area moment of inertia about x-axis
Iyy : numpy array[nFull-1], [m**4]
area moment of inertia about y-axis
Returns
-------
height_constraint : float, [m]
mismatch between tower height and desired hub_height
rho_full : numpy array[nFull-1], [kg/m**3]
Density of the materials along the tower sections.
unit_cost_full : numpy array[nFull-1], [USD/kg]
Unit costs of the materials along the tower sections.
outfitting_full : numpy array[nFull-1]
Multiplier that accounts for secondary structure mass inside of cylinder
E_full : numpy array[nFull-1], [Pa]
Isotropic Youngs modulus of the materials along the tower sections.
G_full : numpy array[nFull-1], [Pa]
Isotropic shear modulus of the materials along the tower sections.
sigma_y_full : numpy array[nFull-1], [Pa]
Isotropic yield strength of the materials along the tower sections.
"""
def initialize(self):
self.options.declare("n_height")
def setup(self):
n_height = self.options["n_height"]
nFull = get_nfull(n_height)
self.add_input("hub_height", val=0.0, units="m")
self.add_input("z_param", np.zeros(n_height), units="m")
self.add_input("z_full", val=np.zeros(nFull), units="m")
self.add_input("rho", val=np.zeros(n_height - 1), units="kg/m**3")
self.add_input("unit_cost", val=np.zeros(n_height - 1), units="USD/kg")
self.add_input("outfitting_factor", val=np.zeros(n_height - 1))
self.add_input("E", val=np.zeros(n_height - 1), units="Pa")
self.add_input("G", val=np.zeros(n_height - 1), units="Pa")
self.add_input("sigma_y", val=np.zeros(n_height - 1), units="Pa")
self.add_input("Az", np.zeros(nFull - 1), units="m**2")
self.add_input("Jz", np.zeros(nFull - 1), units="m**4")
self.add_input("Ixx", np.zeros(nFull - 1), units="m**4")
self.add_input("Iyy", np.zeros(nFull - 1), units="m**4")
self.add_output("height_constraint", val=0.0, units="m")
self.add_output("rho_full", val=np.zeros(nFull - 1), units="kg/m**3")
self.add_output("unit_cost_full", val=np.zeros(nFull - 1), units="USD/kg")
self.add_output("outfitting_full", val=np.zeros(nFull - 1))
self.add_output("E_full", val=np.zeros(nFull - 1), units="Pa")
self.add_output("G_full", val=np.zeros(nFull - 1), units="Pa")
self.add_output("sigma_y_full", val=np.zeros(nFull - 1), units="Pa")
# Tower Distributed Beam Properties (properties needed for ElastoDyn (OpenFAST) inputs or BModes inputs for verification purposes)
self.add_output("sec_loc", np.zeros(n_height - 1), desc="normalized sectional location")
self.add_output("str_tw", np.zeros(n_height - 1), units="deg", desc="structural twist of section")
self.add_output("tw_iner", np.zeros(n_height - 1), units="deg", desc="inertial twist of section")
self.add_output("mass_den", np.zeros(n_height - 1), units="kg/m", desc="sectional mass per unit length")
self.add_output(
"foreaft_iner",
np.zeros(n_height - 1),
units="kg*m",
desc="sectional fore-aft intertia per unit length about the Y_G inertia axis",
)
self.add_output(
"sideside_iner",
np.zeros(n_height - 1),
units="kg*m",
desc="sectional side-side intertia per unit length about the Y_G inertia axis",
)
self.add_output(
"foreaft_stff",
np.zeros(n_height - 1),
units="N*m**2",
desc="sectional fore-aft bending stiffness per unit length about the Y_E elastic axis",
)
self.add_output(
"sideside_stff",
np.zeros(n_height - 1),
units="N*m**2",
desc="sectional side-side bending stiffness per unit length about the Y_E elastic axis",
)
self.add_output("tor_stff", np.zeros(n_height - 1), units="N*m**2", desc="sectional torsional stiffness")
self.add_output("axial_stff", np.zeros(n_height - 1), units="N", desc="sectional axial stiffness")
self.add_output("cg_offst", np.zeros(n_height - 1), units="m", desc="offset from the sectional center of mass")
self.add_output("sc_offst", np.zeros(n_height - 1), units="m", desc="offset from the sectional shear center")
self.add_output("tc_offst", np.zeros(n_height - 1), units="m", desc="offset from the sectional tension center")
self.declare_partials("height_constraint", ["hub_height", "z_param"], method="fd")
self.declare_partials("outfitting_full", ["outfitting_factor"], method="fd")
self.declare_partials("rho_full", ["rho"], method="fd")
self.declare_partials("unit_cost_full", ["unit_cost"], method="fd")
def compute(self, inputs, outputs):
z_full = inputs["z_full"]
z_param = inputs["z_param"]
z_section = 0.5 * (z_full[:-1] + z_full[1:])
outputs["height_constraint"] = inputs["hub_height"] - z_param[-1]
outputs["rho_full"] = util.sectionalInterp(z_section, z_param, inputs["rho"])
outputs["outfitting_full"] = util.sectionalInterp(z_section, z_param, inputs["outfitting_factor"])
outputs["unit_cost_full"] = util.sectionalInterp(z_section, z_param, inputs["unit_cost"])
outputs["E_full"] = util.sectionalInterp(z_section, z_param, inputs["E"])
outputs["G_full"] = util.sectionalInterp(z_section, z_param, inputs["G"])
outputs["sigma_y_full"] = util.sectionalInterp(z_section, z_param, inputs["sigma_y"])
# Unpack for Elastodyn
z = 0.5 * (z_param[:-1] + z_param[1:])
rho = inputs["rho"]
E = inputs["E"]
G = inputs["G"]
Az = util.sectionalInterp(z, z_full, inputs["Az"])
Ixx = util.sectionalInterp(z, z_full, inputs["Ixx"])
Iyy = util.sectionalInterp(z, z_full, inputs["Iyy"])
Jz = util.sectionalInterp(z, z_full, inputs["Jz"])
outputs["sec_loc"] = (z - z[0]) / (z[-1] - z[0])
outputs["mass_den"] = rho * Az
outputs["foreaft_iner"] = rho * Ixx
outputs["sideside_iner"] = rho * Iyy
outputs["foreaft_stff"] = E * Ixx
outputs["sideside_stff"] = E * Iyy
outputs["tor_stff"] = G * Jz
outputs["axial_stff"] = E * Az
class TowerMass(om.ExplicitComponent):
"""
Compute the tower and monopile masses, raw cost, and CG properties.
Parameters
----------
cylinder_mass : numpy array[nFull-1], [kg]
Total cylinder mass
cylinder_cost : float, [USD]
Total cylinder cost
cylinder_center_of_mass : float, [m]
z position of center of mass of cylinder
cylinder_section_center_of_mass : numpy array[nFull-1], [m]
z position of center of mass of each can in the cylinder
cylinder_I_base : numpy array[6], [kg*m**2]
Mass moment of inertia of cylinder about base [xx yy zz xy xz yz]
transition_piece_height : float, [m]
Point mass height of transition piece above water line
transition_piece_mass : float, [kg]
Point mass of transition piece
transition_piece_cost : float, [USD]
Cost of transition piece
gravity_foundation_mass : float, [kg]
Extra mass of gravity foundation
z_full : numpy array[nFull], [m]
Parameterized locations along tower, linear lofting between
d_full : numpy array[nFull], [m]
diameter along tower
Returns
-------
structural_cost : float, [USD]
Total structural cost (tower+monopile)
structural_mass : float, [kg]
Total structural mass (tower+monopile)
tower_raw_cost : float, [USD]
Tower cost only
tower_mass : float, [kg]
Tower mass only
tower_center_of_mass : float, [m]
z-position of center of mass of tower
tower_section_center_of_mass : numpy array[nFull-1], [m]
z position of center of mass of each can in the tower
tower_I_base : numpy array[6], [kg*m**2]
Mass moment of inertia of tower about base [xx yy zz xy xz yz]
monopile_mass : float, [kg]
Mass of monopile from bottom of suction pile through transition piece
monopile_cost : float, [USD]
Total monopile cost
"""
def initialize(self):
self.options.declare("n_height")
def setup(self):
n_height = self.options["n_height"]
nFull = get_nfull(n_height)
self.add_input("cylinder_mass", val=np.zeros(nFull - 1), units="kg")
self.add_input("cylinder_cost", val=0.0, units="USD")
self.add_input("cylinder_center_of_mass", val=0.0, units="m")
self.add_input("cylinder_section_center_of_mass", val=np.zeros(nFull - 1), units="m")
self.add_input("cylinder_I_base", np.zeros(6), units="kg*m**2")
self.add_input("transition_piece_height", 0.0, units="m")
self.add_input("transition_piece_mass", 0.0, units="kg")
self.add_input("transition_piece_cost", 0.0, units="USD")
self.add_input("gravity_foundation_mass", 0.0, units="kg")
self.add_input("z_full", val=np.zeros(nFull), units="m")
self.add_input("d_full", val=np.zeros(nFull), units="m")
self.add_output("structural_cost", val=0.0, units="USD")
self.add_output("structural_mass", val=0.0, units="kg")
self.add_output("tower_cost", val=0.0, units="USD")
self.add_output("tower_mass", val=0.0, units="kg")
self.add_output("tower_center_of_mass", val=0.0, units="m")
self.add_output("tower_section_center_of_mass", val=np.zeros(nFull - 1), units="m")
self.add_output("tower_I_base", np.zeros(6), units="kg*m**2")
self.add_output("monopile_mass", val=0.0, units="kg")
self.add_output("monopile_cost", val=0.0, units="USD")
self.add_output("transition_piece_I", np.zeros(6), units="kg*m**2")
self.add_output("gravity_foundation_I", np.zeros(6), units="kg*m**2")
def compute(self, inputs, outputs):
# Unpack inputs
z = inputs["z_full"]
d = inputs["d_full"]
z_trans = inputs["transition_piece_height"]
m_trans = inputs["transition_piece_mass"]
m_grav = inputs["gravity_foundation_mass"]
m_cyl = inputs["cylinder_mass"]
outputs["structural_cost"] = inputs["cylinder_cost"] + inputs["transition_piece_cost"]
outputs["structural_mass"] = m_cyl.sum() + m_trans + m_grav
outputs["tower_center_of_mass"] = (
inputs["cylinder_center_of_mass"] * m_cyl.sum() + m_trans * z_trans + m_grav * z[0]
) / (m_cyl.sum() + m_trans + m_grav)
outputs["tower_section_center_of_mass"] = inputs["cylinder_section_center_of_mass"]
outputs["monopile_mass"], dydx, dydxp, dydyp = util.interp_with_deriv(z_trans, z, np.r_[0.0, np.cumsum(m_cyl)])
outputs["monopile_cost"] = (
inputs["cylinder_cost"] * outputs["monopile_mass"] / m_cyl.sum() + inputs["transition_piece_cost"]
)
outputs["monopile_mass"] += m_trans + m_grav
outputs["tower_cost"] = outputs["structural_cost"] - outputs["monopile_cost"]
outputs["tower_mass"] = outputs["structural_mass"] - outputs["monopile_mass"]
outputs["tower_I_base"] = inputs["cylinder_I_base"]
outputs["tower_I_base"][:2] += m_trans * (z_trans - z[0]) ** 2
# Mass properties for transition piece and gravity foundation
itrans = util.find_nearest(z, z_trans)
r_trans = 0.5 * d[itrans]
r_grav = 0.5 * d[0]
I_trans = m_trans * r_trans ** 2.0 * np.r_[0.5, 0.5, 1.0, np.zeros(3)] # shell
I_grav = m_grav * r_grav ** 2.0 * np.r_[0.25, 0.25, 0.5, np.zeros(3)] # disk
outputs["transition_piece_I"] = I_trans
outputs["gravity_foundation_I"] = I_grav
class TurbineMass(om.ExplicitComponent):
"""
Compute the turbine mass, center of mass, and mass moment of inertia.
Parameters
----------
hub_height : float, [m]
Hub-height
rna_mass : float, [kg]
Total tower mass
rna_I : numpy array[6], [kg*m**2]
Mass moment of inertia of RNA about tower top [xx yy zz xy xz yz]
rna_cg : numpy array[3], [m]
xyz-location of RNA cg relative to tower top
tower_mass : float, [kg]
Total tower mass (not including monopile)
monopile_mass : float, [kg]
Monopile mass
tower_center_of_mass : float, [m]
z-position of center of mass of tower
tower_I_base : numpy array[6], [kg*m**2]
Mass moment of inertia of tower about base [xx yy zz xy xz yz]
Returns
-------
turbine_mass : float, [kg]
Total mass of tower+rna
turbine_center_of_mass : numpy array[3], [m]
xyz-position of tower+rna center of mass
turbine_I_base : numpy array[6], [kg*m**2]
mass moment of inertia of tower about base [xx yy zz xy xz yz]
"""
def setup(self):
self.add_input("hub_height", val=0.0, units="m")
self.add_input("rna_mass", val=0.0, units="kg")
self.add_input("rna_I", np.zeros(6), units="kg*m**2")
self.add_input("rna_cg", np.zeros(3), units="m")
self.add_input("tower_mass", val=0.0, units="kg")
self.add_input("monopile_mass", val=0.0, units="kg")
self.add_input("tower_center_of_mass", val=0.0, units="m")
self.add_input("tower_I_base", np.zeros(6), units="kg*m**2")
self.add_output("turbine_mass", val=0.0, units="kg")
self.add_output("turbine_center_of_mass", val=np.zeros(3), units="m")
self.add_output("turbine_I_base", np.zeros(6), units="kg*m**2")
self.declare_partials(
"turbine_I_base", ["hub_height", "rna_I", "rna_cg", "rna_mass", "tower_I_base"], method="fd"
)
self.declare_partials(
"turbine_center_of_mass",
["hub_height", "monopile_mass", "rna_cg", "rna_mass", "tower_center_of_mass", "tower_mass"],
method="fd",
)
self.declare_partials("turbine_mass", ["monopile_mass", "rna_mass", "tower_mass"], val=1.0)
def compute(self, inputs, outputs):
outputs["turbine_mass"] = inputs["rna_mass"] + inputs["tower_mass"] + inputs["monopile_mass"]
cg_rna = inputs["rna_cg"] + np.r_[0.0, 0.0, inputs["hub_height"]]
cg_tower = np.r_[0.0, 0.0, inputs["tower_center_of_mass"]]
outputs["turbine_center_of_mass"] = (inputs["rna_mass"] * cg_rna + inputs["tower_mass"] * cg_tower) / outputs[
"turbine_mass"
]
R = cg_rna
I_tower = util.assembleI(inputs["tower_I_base"])
I_rna = util.assembleI(inputs["rna_I"]) + inputs["rna_mass"] * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
outputs["turbine_I_base"] = util.unassembleI(I_tower + I_rna)
class TowerPreFrame(om.ExplicitComponent):
"""
Compute some properties of the tower needed for FEM analysis.
This component can be simplified by using src_indices for data-passing.
At the very least, we can code the sparse derivatives as-is for
input-output relationships.
Parameters
----------
z_full : numpy array[nFull], [m]
location along tower. start at bottom and go to top
mass : float, [kg]
added mass
mI : numpy array[6], [kg*m**2]
mass moment of inertia about some point p [xx yy zz xy xz yz]
mrho : numpy array[3], [m]
xyz-location of p relative to node
transition_piece_mass : float, [kg]
point mass of transition piece
gravity_foundation_mass : float, [kg]
point mass of transition piece
transition_piece_height : float, [m]
height of transition piece above water line
rna_F : numpy array[3], [N]
rna force
rna_M : numpy array[3], [N*m]
rna moment
k_monopile : numpy array[6], [N/m]
Stiffness BCs for ocean soil. Only used if monoflag inputis True
Returns
-------
kidx : numpy array[np.int_]
indices of z where external stiffness reactions should be applied.
kx : numpy array[nK], [N/m]
spring stiffness in x-direction
ky : numpy array[nK], [N/m]
spring stiffness in y-direction
kz : numpy array[nK], [N/m]
spring stiffness in z-direction
ktx : numpy array[nK], [N/m]
spring stiffness in theta_x-rotation
kty : numpy array[nK], [N/m]
spring stiffness in theta_y-rotation
ktz : numpy array[nK], [N/m]
spring stiffness in theta_z-rotation
midx : numpy array[np.int_]
indices where added mass should be applied.
m : numpy array[nMass], [kg]
added mass
mIxx : numpy array[nMass], [kg*m**2]
x mass moment of inertia about some point p
mIyy : numpy array[nMass], [kg*m**2]
y mass moment of inertia about some point p
mIzz : numpy array[nMass], [kg*m**2]
z mass moment of inertia about some point p
mIxy : numpy array[nMass], [kg*m**2]
xy mass moment of inertia about some point p
mIxz : numpy array[nMass], [kg*m**2]
xz mass moment of inertia about some point p
mIyz : numpy array[nMass], [kg*m**2]
yz mass moment of inertia about some point p
mrhox : numpy array[nMass], [m]
x-location of p relative to node
mrhoy : numpy array[nMass], [m]
y-location of p relative to node
mrhoz : numpy array[nMass], [m]
z-location of p relative to node
plidx : numpy array[np.int_]
indices where point loads should be applied.
Fx : numpy array[nPL], [N]
point force in x-direction
Fy : numpy array[nPL], [N]
point force in y-direction
Fz : numpy array[nPL], [N]
point force in z-direction
Mxx : numpy array[nPL], [N*m]
point moment about x-axis
Myy : numpy array[nPL], [N*m]
point moment about y-axis
Mzz : numpy array[nPL], [N*m]
point moment about z-axis
"""
def initialize(self):
self.options.declare("n_height")
self.options.declare("monopile", default=False)
self.options.declare("soil_springs", default=False)
self.options.declare("gravity_foundation", default=False)
def setup(self):
n_height = self.options["n_height"]
nFull = get_nfull(n_height)
self.add_input("z_full", np.zeros(nFull), units="m")
# extra mass
self.add_input("mass", 0.0, units="kg")
self.add_input("mI", np.zeros(6), units="kg*m**2")
self.add_input("mrho", np.zeros(3), units="m")
self.add_input("transition_piece_mass", 0.0, units="kg")
self.add_input("transition_piece_I", np.zeros(6), units="kg*m**2")
self.add_input("gravity_foundation_I", np.zeros(6), units="kg*m**2")
self.add_input("gravity_foundation_mass", 0.0, units="kg")
self.add_input("transition_piece_height", 0.0, units="m")
self.add_input("suctionpile_depth", 0.0, units="m")
# point loads
self.add_input("rna_F", np.zeros(3), units="N")
self.add_input("rna_M", np.zeros(3), units="N*m")
# Monopile handling
self.add_input("z_soil", np.zeros(NPTS_SOIL), units="N/m")
self.add_input("k_soil", np.zeros((NPTS_SOIL, 6)), units="N/m")
# spring reaction data.
nK = 4 if self.options["monopile"] and not self.options["gravity_foundation"] else 1
self.add_output("kidx", np.zeros(nK, dtype=np.int_))
self.add_output("kx", np.zeros(nK), units="N/m")
self.add_output("ky", np.zeros(nK), units="N/m")
self.add_output("kz", np.zeros(nK), units="N/m")
self.add_output("ktx", np.zeros(nK), units="N/m")
self.add_output("kty", np.zeros(nK), units="N/m")
self.add_output("ktz", np.zeros(nK), units="N/m")
# extra mass
nMass = 3
self.add_output("midx", np.zeros(nMass, dtype=np.int_))
self.add_output("m", np.zeros(nMass), units="kg")
self.add_output("mIxx", np.zeros(nMass), units="kg*m**2")
self.add_output("mIyy", np.zeros(nMass), units="kg*m**2")
self.add_output("mIzz", np.zeros(nMass), units="kg*m**2")
self.add_output("mIxy", np.zeros(nMass), units="kg*m**2")
self.add_output("mIxz", np.zeros(nMass), units="kg*m**2")
self.add_output("mIyz", np.zeros(nMass), units="kg*m**2")
self.add_output("mrhox", np.zeros(nMass), units="m")
self.add_output("mrhoy", np.zeros(nMass), units="m")
self.add_output("mrhoz", np.zeros(nMass), units="m")
# point loads (if addGravityLoadForExtraMass=True be sure not to double count by adding those force here also)
nPL = 1
self.add_output("plidx", np.zeros(nPL, dtype=np.int_))
self.add_output("Fx", np.zeros(nPL), units="N")
self.add_output("Fy", np.zeros(nPL), units="N")
self.add_output("Fz", np.zeros(nPL), units="N")
self.add_output("Mxx", np.zeros(nPL), units="N*m")
self.add_output("Myy", np.zeros(nPL), units="N*m")
self.add_output("Mzz", np.zeros(nPL), units="N*m")
self.declare_partials("Fx", ["rna_F"], method="fd")
self.declare_partials("Fy", ["rna_F"], method="fd")
self.declare_partials("Fz", ["rna_F"], method="fd")
self.declare_partials("G_full", [], method="fd")
self.declare_partials("Mxx", ["rna_M"], method="fd")
self.declare_partials("Myy", ["rna_M"], method="fd")
self.declare_partials("Mzz", ["rna_M"], method="fd")
self.declare_partials("kidx", [], method="fd")
self.declare_partials("ktx", [], method="fd")
self.declare_partials("kty", [], method="fd")
self.declare_partials("ktz", [], method="fd")
self.declare_partials("kx", [], method="fd")
self.declare_partials("ky", [], method="fd")
self.declare_partials("kz", [], method="fd")
self.declare_partials("m", ["gravity_foundation_mass", "mass", "transition_piece_mass"], method="fd")
self.declare_partials("mIxx", ["gravity_foundation_mass", "mI", "transition_piece_mass"], method="fd")
self.declare_partials("mIxy", ["mI"], method="fd")
self.declare_partials("mIxz", ["mI"], method="fd")
self.declare_partials("mIyy", ["gravity_foundation_mass", "mI", "transition_piece_mass"], method="fd")
self.declare_partials("mIyz", ["mI"], method="fd")
self.declare_partials("mIzz", ["gravity_foundation_mass", "mI", "transition_piece_mass"], method="fd")
self.declare_partials("midx", [], method="fd")
self.declare_partials("mrhox", ["mrho"], method="fd")
self.declare_partials("mrhoy", ["mrho"], method="fd")
self.declare_partials("mrhoz", ["mrho"], method="fd")
self.declare_partials("plidx", [], method="fd")
def compute(self, inputs, outputs):
n_height = self.options["n_height"]
nFull = get_nfull(n_height)
z = inputs["z_full"]
# Prepare RNA, transition piece, and gravity foundation (if any applicable) for "extra node mass"
itrans = util.find_nearest(z, inputs["transition_piece_height"])
mtrans = inputs["transition_piece_mass"]
Itrans = inputs["transition_piece_I"]
mgrav = inputs["gravity_foundation_mass"]
Igrav = inputs["gravity_foundation_I"]
# Note, need len()-1 because Frame3DD crashes if mass add at end
outputs["midx"] = np.array([nFull - 1, itrans, 0], dtype=np.int_)
outputs["m"] = np.array([inputs["mass"], mtrans, mgrav]).flatten()
outputs["mIxx"] = np.array([inputs["mI"][0], Itrans[0], Igrav[0]]).flatten()
outputs["mIyy"] = np.array([inputs["mI"][1], Itrans[1], Igrav[1]]).flatten()
outputs["mIzz"] = np.array([inputs["mI"][2], Itrans[2], Igrav[2]]).flatten()
outputs["mIxy"] = np.array([inputs["mI"][3], Itrans[3], Igrav[3]]).flatten()
outputs["mIxz"] = np.array([inputs["mI"][4], Itrans[4], Igrav[4]]).flatten()
outputs["mIyz"] = np.array([inputs["mI"][5], Itrans[5], Igrav[5]]).flatten()
outputs["mrhox"] = np.array([inputs["mrho"][0], 0.0, 0.0]).flatten()
outputs["mrhoy"] = np.array([inputs["mrho"][1], 0.0, 0.0]).flatten()
outputs["mrhoz"] = np.array([inputs["mrho"][2], 0.0, 0.0]).flatten()
# Prepare point forces at RNA node
outputs["plidx"] = np.array([nFull - 1], dtype=np.int_) # -1 b/c same reason as above
outputs["Fx"] = np.array([inputs["rna_F"][0]]).flatten()
outputs["Fy"] = np.array([inputs["rna_F"][1]]).flatten()
outputs["Fz"] = np.array([inputs["rna_F"][2]]).flatten()
outputs["Mxx"] = np.array([inputs["rna_M"][0]]).flatten()
outputs["Myy"] = np.array([inputs["rna_M"][1]]).flatten()
outputs["Mzz"] = np.array([inputs["rna_M"][2]]).flatten()
# Prepare for reactions: rigid at tower base
if self.options["monopile"] and not self.options["gravity_foundation"]:
if self.options["soil_springs"]:
z_soil = inputs["z_soil"]
k_soil = inputs["k_soil"]
z_pile = z[z <= (z[0] + 1e-1 + np.abs(z_soil[0]))]
if z_pile.size != 4:
print(z)
print(z_soil)
print(z_pile)
raise ValueError("Please use only one section for submerged pile for now")
k_mono = np.zeros((z_pile.size, 6))
for k in range(6):
k_mono[:, k] = np.interp(z_pile + np.abs(z_soil[0]), z_soil, k_soil[:, k])
outputs["kidx"] = np.arange(len(z_pile), dtype=np.int_)
outputs["kx"] = np.array([k_mono[:, 0]])
outputs["ky"] = np.array([k_mono[:, 2]])
outputs["kz"] = np.zeros(k_mono.shape[0])
outputs["kz"][0] = np.array([k_mono[0, 4]])
outputs["ktx"] = np.array([k_mono[:, 1]])
outputs["kty"] = np.array([k_mono[:, 3]])
outputs["ktz"] = np.array([k_mono[:, 5]])
else:
z_pile = z[z <= (z[0] + 1e-1 + inputs["suctionpile_depth"])]
npile = z_pile.size
if npile != 4:
print(z)
print(z_pile)
print(inputs["suctionpile_depth"])
raise ValueError("Please use only one section for submerged pile for now")
outputs["kidx"] = np.arange(npile, dtype=np.int_)
outputs["kx"] = outputs["ky"] = outputs["kz"] = RIGID * np.ones(npile)
outputs["ktx"] = outputs["kty"] = outputs["ktz"] = RIGID * np.ones(npile)
else:
outputs["kidx"] = np.array([0], dtype=np.int_)
outputs["kx"] = outputs["ky"] = outputs["kz"] = np.array([RIGID])
outputs["ktx"] = outputs["kty"] = outputs["ktz"] = np.array([RIGID])
class TowerPostFrame(om.ExplicitComponent):
"""
Postprocess results from Frame3DD.
Parameters
----------
z_full : numpy array[nFull], [m]
location along tower. start at bottom and go to top
d_full : numpy array[nFull], [m]
effective tower diameter for section
t_full : numpy array[nFull-1], [m]
effective shell thickness for section
E_full : numpy array[nFull-1], [N/m**2]
modulus of elasticity
sigma_y_full : numpy array[nFull-1], [N/m**2]
yield stress
Fz : numpy array[nFull-1], [N]
Axial foce in vertical z-direction in cylinder structure.
Mxx : numpy array[nFull-1], [N*m]
Moment about x-axis in cylinder structure.
Myy : numpy array[nFull-1], [N*m]
Moment about y-axis in cylinder structure.
axial_stress : numpy array[nFull-1], [N/m**2]
axial stress in tower elements
shear_stress : numpy array[nFull-1], [N/m**2]
shear stress in tower elements
hoop_stress : numpy array[nFull-1], [N/m**2]
hoop stress in tower elements
tower_deflection_in : numpy array[nFull], [m]
Deflection of tower nodes in yaw-aligned +x direction
life : float
fatigue life of tower
freqs : numpy array[NFREQ], [Hz]
Natural frequencies of the structure
x_mode_freqs : numpy array[NFREQ2]
Frequencies associated with mode shapes in the x-direction
y_mode_freqs : numpy array[NFREQ2]
Frequencies associated with mode shapes in the y-direction
x_mode_shapes : numpy array[NFREQ2, 5]
6-degree polynomial coefficients of mode shapes in the x-direction
y_mode_shapes : numpy array[NFREQ2, 5]
6-degree polynomial coefficients of mode shapes in the x-direction
Returns
-------
structural_frequencies : numpy array[NFREQ], [Hz]
First and second natural frequency
fore_aft_freqs : numpy array[NFREQ2]
Frequencies associated with mode shapes in the tower fore-aft direction
side_side_freqs : numpy array[NFREQ2]
Frequencies associated with mode shapes in the tower side-side direction
fore_aft_modes : numpy array[NFREQ2, 5]
6-degree polynomial coefficients of mode shapes in the tower fore-aft direction
(without constant term)
side_side_modes : numpy array[NFREQ2, 5]
6-degree polynomial coefficients of mode shapes in the tower side-side direction
(without constant term)
tower_deflection : numpy array[nFull], [m]
Deflection of tower nodes in yaw-aligned +x direction
top_deflection : float, [m]
Deflection of tower top in yaw-aligned +x direction
stress : numpy array[nFull-1]
Von Mises stress utilization along tower at specified locations. Includes safety
factor.
shell_buckling : numpy array[nFull-1]
Shell buckling constraint. Should be < 1 for feasibility. Includes safety
factors
global_buckling : numpy array[nFull-1]
Global buckling constraint. Should be < 1 for feasibility. Includes safety
factors
turbine_F : numpy array[3], [N]
Total force on tower+rna
turbine_M : numpy array[3], [N*m]
Total x-moment on tower+rna measured at base
"""
def initialize(self):
self.options.declare("n_height")
self.options.declare("modeling_options")
# self.options.declare('nDEL')
def setup(self):
n_height = self.options["n_height"]
nFull = get_nfull(n_height)
# effective geometry -- used for handbook methods to estimate hoop stress, buckling, fatigue
self.add_input("z_full", np.zeros(nFull), units="m")
self.add_input("d_full", np.zeros(nFull), units="m")
self.add_input("t_full", np.zeros(nFull - 1), units="m")
# Material properties
self.add_input("E_full", np.zeros(nFull - 1), units="N/m**2", desc="modulus of elasticity")
self.add_input(
"G_full",
np.zeros(nFull - 1),
units="Pa",
desc="Isotropic shear modulus of the materials along the tower sections.",
)
self.add_input(
"rho_full", np.zeros(nFull - 1), units="kg/m**3", desc="Density of the materials along the tower sections."
)
self.add_input("sigma_y_full", np.zeros(nFull - 1), units="N/m**2", desc="yield stress")
# Processed Frame3DD outputs
self.add_input("Fz", np.zeros(nFull - 1), units="N")
self.add_input("Mxx", np.zeros(nFull - 1), units="N*m")
self.add_input("Myy", np.zeros(nFull - 1), units="N*m")
self.add_input("axial_stress", val=np.zeros(nFull - 1), units="N/m**2")
self.add_input("shear_stress", val=np.zeros(nFull - 1), units="N/m**2")
self.add_input("hoop_stress", val=np.zeros(nFull - 1), units="N/m**2")
self.add_input("tower_deflection_in", val=np.zeros(nFull), units="m")
# safety factors
# self.add_input('gamma_f', 1.35, desc='safety factor on loads')
# self.add_input('gamma_m', 1.1, desc='safety factor on materials')
# self.add_input('gamma_n', 1.0, desc='safety factor on consequence of failure')
# self.add_input('gamma_b', 1.1, desc='buckling safety factor')
# self.add_input('gamma_fatigue', 1.755, desc='total safety factor for fatigue')
# fatigue parameters
self.add_input("life", 20.0)
# self.add_input('m_SN', 4, desc='slope of S/N curve')
# self.add_input('DC', 80.0, desc='standard value of stress')
# self.add_input('z_DEL', np.zeros(nDEL), units='m', desc='absolute z coordinates of corresponding fatigue parameters')
# self.add_input('M_DEL', np.zeros(nDEL), desc='fatigue parameters at corresponding z coordinates')
# Frequencies
NFREQ2 = int(NFREQ / 2)
self.add_input("freqs", val=np.zeros(NFREQ), units="Hz", desc="Natural frequencies of the structure")
self.add_input(
"x_mode_shapes",
val=np.zeros((NFREQ2, 5)),
desc="6-degree polynomial coefficients of mode shapes in the x-direction (x^2..x^6, no linear or constant term)",
)
self.add_input(
"y_mode_shapes",
val=np.zeros((NFREQ2, 5)),
desc="6-degree polynomial coefficients of mode shapes in the x-direction (x^2..x^6, no linear or constant term)",
)
self.add_input(
"x_mode_freqs", val=np.zeros(NFREQ2), desc="Frequencies associated with mode shapes in the x-direction"
)
self.add_input(
"y_mode_freqs", val=np.zeros(NFREQ2), desc="Frequencies associated with mode shapes in the y-direction"
)
# outputs
self.add_output(
"structural_frequencies", np.zeros(NFREQ), units="Hz", desc="First and second natural frequency"
)
self.add_output(
"fore_aft_modes",
np.zeros((NFREQ2, 5)),
desc="6-degree polynomial coefficients of mode shapes in the tower fore-aft direction (x^2..x^6, no linear or constant term)",
)
self.add_output(
"side_side_modes",
|
np.zeros((NFREQ2, 5))
|
numpy.zeros
|
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import cm
class BodyMap:
def __init__(self):
im_link = 'https://raw.githubusercontent.com/MuteJester/MediPlot/master/MediPlot/Body_Sil.png'
self.body_sil = plt.imread(im_link)
self.bmap = {'head': self.set_head, 'legs': self.set_legs, 'les': self.set_right_leg,
'left leg': self.set_left_leg, 'right leg': self.set_right_leg, 'right arm': self.set_right_arm,
'left arm': self.set_left_arm, 'torso': self.set_torso, 'arms': self.set_arms,
'waist': self.set_waist,
'neck': self.set_neck, 'left hand': self.set_left_hand, 'right hand': self.set_right_hand,
'left foot': self.set_left_foot, 'right foot': self.set_right_foot,
'upper right arm':self.set_upper_right_arm,'upper left arm':self.set_upper_left_arm,
'right forearm':self.set_right_forearm,'left forearm':self.set_left_forearm,
'right thigh':self.set_right_thigh,'left thigh':self.set_left_thigh,
'right lower leg':self.set_right_lower_leg,'left lower leg':self.set_left_lower_leg}
def list_of_targets(self):
return ['head', 'legs', 'right leg', 'left leg', 'right arm', 'left arm', 'torso', 'arms', 'waist', 'neck',
'left hand', 'right hand','upper right arm','upper left arm','right forearm','left forearm',
'right thigh','left thigh','right lower leg','left lower leg']
def set_head(self, value):
head = self.body_sil[0:270, 300:580]
self.body_sil[0:270, 300:580] =
|
np.where(head == [1, 1, 1], head, value)
|
numpy.where
|
import sys
import threading
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import random
import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from functools import partial
from PIL import Image
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
sns.set()
matplotlib.use("TkAgg")
tt_table = np.zeros((3, 6), np.int32)
stroke_row_col = []
def warp_coordinates(x, y, matrix):
print('warping')
print(x, y, 'pints')
p = (x, y)
px = (matrix[0][0] * p[0] + matrix[0][1] * p[1] + matrix[0][2]) / \
((matrix[2][0] * p[0] + matrix[2][1] * p[1] + matrix[2][2]))
py = (matrix[1][0] * p[0] + matrix[1][1] * p[1] + matrix[1][2]) / \
((matrix[2][0] * p[0] + matrix[2][1] * p[1] + matrix[2][2]))
print(px, py, ' warped pints')
return [int(px), int(py)]
def visualize_strokes():
stroke_data = pd.read_csv('final_result.csv')
prev_index = 1
strokes_x_y = []
strokes = []
for pos, value in enumerate(stroke_data.iterrows()):
if value[1]['stroke_number'] != prev_index:
prev_index = value[1]['stroke_number']
strokes_x_y.append(np.asarray(strokes))
strokes = []
x = value[1]['x']
y = value[1]['y']
strokes.append(np.asarray([x,y]))
strokes_x_y.append(np.asarray(strokes))
path = './pictures/table_image.png'
image = cv2.imread(path)
center_coordinates = (120, 50)
for x, strokes in enumerate(strokes_x_y):
colors = (random.randint(0,255), random.randint(0,255), random.randint(0,255))
img_copy = image.copy()
for i in range(0, len(strokes)-1):
start_point = strokes[i]
end_point = strokes[i+1]
start_point = start_point.astype('int32')
end_point = end_point.astype('int32')
start_x, start_y = start_point
end_x, end_y = end_point
start_x = int(start_x*960/1920)
start_y = int(start_y*540/1080)
end_x = int(end_x*960/1920)
end_y = int(end_y*540/1080)
offset_ = 0 # int(150*540/1080)
# image = cv2.line(image, (start_x, start_y+offset_), (end_x, end_y+offset_), colors, thickness=1)
# image = cv2.resize(image, (960, 540))
cv2.line(img_copy, (start_x, start_y+offset_), (end_x, end_y+offset_), colors, thickness=2)
img_copy = cv2.resize(img_copy, (960, 540))
cv2.imshow("result", img_copy)
cv2.waitKey(300)
cv2.destroyAllWindows()
def read_coordinates():
f = open("coordinates.txt", 'r')
data_ = f.read()
data = data_.strip()
str_table_coordinates = data.split(' ')
table_coordinates = []
for i in str_table_coordinates:
table_coordinates.append(list(map(int, (i.split(',')))))
return table_coordinates
def get_transposed_coordinates(x, y):
table_coordinates = read_coordinates()
table_coordinates_corners = np.array([
table_coordinates[0],
table_coordinates[2],
table_coordinates[3],
table_coordinates[5]
], np.float32)
print(table_coordinates, 'table coords')
warped_dimensions = np.array([[0, 0], [1920, 0], [0, 1080], [1920, 1080]], np.float32)
matrix = cv2.getPerspectiveTransform(table_coordinates_corners,
warped_dimensions)
x, y = warp_coordinates(x, y, matrix)
return get_zone([x,y])
def get_zone(ball_coordinates, no_columns=6, no_rows=3):
global tt_table
col_seg = 1920 / no_columns
row_seg = 1080 / no_rows
bounce_col = 0
bounce_row = 0
for i in range(1, no_columns+1):
if ball_coordinates[0] < col_seg * i:
bounce_col = i
break
for i in range(1, no_rows+1):
if ball_coordinates[1] < row_seg * i:
bounce_row = i
break
tt_table[bounce_row-1][bounce_col-1] += 1
return [bounce_row, bounce_col]
def visualize_warped_table():
global stroke_row_col
result = pd.read_csv('final_result.csv')
left_bounces = result.groupby("stroke_number").first().reset_index()
right_bounces = result.groupby("stroke_number").last().reset_index()
for bounces in zip(left_bounces.iterrows(), right_bounces.iterrows()):
left, right = bounces[0][1], bounces[1][1]
left_ = get_transposed_coordinates(left['x'], left['y'])
right_ = get_transposed_coordinates(right['x'], right['y'])
stroke_row_col.append(left_)
stroke_row_col.append(right_)
# json.dump(stroke_data, file_)
def display_plot():
global tt_table
ax = sns.heatmap(tt_table, cbar=False, annot=True, fmt="d")
plt.savefig('plot.jpg')
plt.close()
visualize_warped_table()
print(stroke_row_col, 'wtf')
# display_plot()
def animate_heat_map():
global stroke_row_col
fig = plt.figure()
tt =
|
np.zeros((3, 6), np.int32)
|
numpy.zeros
|
import copy
import networkx as nx
import numpy as np
import config as cf
import random
import math
from network import Network
from network import Energy
import matplotlib.pyplot as plt
from random import *
def Get_Fitness(network,SMID,Alive_Node):
Fitness = 0
Consume = 0
Cover = np.zeros(cf.N_NODE+1)
INNER = []
OUTER = []
RTBS = []
for T in SMID:
RTBS.append(network.node[T]['RTBS'])
CENTER = np.median(RTBS)
for i in Alive_Node:
x1,y1 = network.node[i]['pos']
NNDist = 1000
NNID = 0
for j in SMID:
if i == j:
if network.node[i]['RTBS']<CENTER:
INNER.append(i)
Consume += Energy.ETX(network,i,0,cf.L) + cf.E_DA
else:
OUTER.append(i)
continue
x2,y2 = network.node[j]['pos']
NewDist = math.sqrt((x1-x2)**2+(y1-y2)**2)
if NewDist < NNDist:
NNDist = NewDist
NNID = j
Cover[NNID] += 1
Consume += Energy.ETX(network,i,NNID,cf.NCH_L)+cf.E_DA
for k in OUTER:
NNID = 0
NNDist = 1000
xo,yo = network.node[k]['pos']
for j in INNER:
xi,yi = network.node[k]['pos']
NewDist = math.sqrt((xi-xo)**2+(yi-yo)**2)
if NewDist <= NNDist:
NNID = j
NNDist = NewDist
Consume += Energy.ETX(network,k,NNID,cf.NCH_L) + cf.E_DA
f1 = Consume
f2 = np.max(Cover) - np.min(Cover)
Fitness = 1/(f1+f2)
return Fitness
def Optimizer(network, Alive_Node, Update=False, R=30, In_Median=30, First=False):
NET_MAX = 0
SSMO_NET = nx.create_empty_copy(network)
SSMO_CHID = []
NB_Cluster = max(round(cf.P_CH*len(Alive_Node)),1)
update = 0
if Update == True:
Rmax = 0
for i in Alive_Node:
R_tmp = math.sqrt((SSMO_NET.node[i]['RTBS']**2)/NB_Cluster)
if R_tmp > Rmax:
Rmax = R_tmp
if Rmax != R:
R = Rmax
update = 1
if update == 1:
INNER = []
for i in Alive_Node:
if SSMO_NET.node[i]['RTBS'] < R:
INNER.append(i)
SSMO_NET.node[i]['Cover'] = []
for j in Alive_Node:
if i == j:
continue
x1,y1 = SSMO_NET.node[i]['pos']
x2,y2 = SSMO_NET.node[j]['pos']
D = math.sqrt((x1-x2)**2 + (y1-y2)**2)
if D < R:
SSMO_NET.node[i]['Cover'].append(j)
In_Median = np.median(INNER)
if len(INNER) ==0:
In_Median = 0
## Initializing Phase
SM_Arr = []
MG = 5
MIR = 100
Swarm_Size = 40
FIT = []
MGLL = 20
MLLL = 8
Group0 = []
Group1 = []
Group2 = []
Group3 = []
for i in range(0,Swarm_Size):
choice = np.random.choice(Alive_Node,NB_Cluster,replace = False)
SM_Arr.append(choice)
Group0.append(i)
FIT.append(Get_Fitness(SSMO_NET,choice,Alive_Node))
Group = 1
GLID = np.where(FIT==np.max(FIT))[0][0]
LLID_ARR = np.zeros(MG,dtype=np.int32)
LLID_ARR[0] = GLID
Pr = 0.1
GLL = 0
for Iter in range(0,MIR):
LLL = 0
## Local Leader Phase
Pr += (0.4-0.1)/MIR
for i in range(0,Group):
if i == 0:
temp = Group0
if i == 1:
temp = Group1
if i == 2:
temp = Group2
if i == 3:
temp = Group3
LLID = LLID_ARR[i]
LLMAX = FIT[LLID]
LMAX = FIT[LLID]
MAXFIT = FIT[LLID]
for j in temp:
if j == LLID or j == GLID:
continue
if random() < Pr:
Prob_Arr = []
LL = SM_Arr[LLID]
SM = SM_Arr[j]
Rand = np.random.choice(temp,1)[0]
SMR = SM_Arr[Rand]
ARANGE = np.hstack([SM,LL,SMR])
b = uniform(0,1)
d = uniform(-1,1)
PROBSM = np.ones(NB_Cluster) * (1-b-d)
PROBLL = np.ones(NB_Cluster) * (b)
PROBSMR = np.ones(NB_Cluster) * (d)
Prob_Arr = np.hstack([PROBSM,PROBLL,PROBSMR])
Prob_Arr = np.exp(Prob_Arr)/np.sum(np.exp(Prob_Arr))
choice = np.random.choice(ARANGE,NB_Cluster,replace = False, p = Prob_Arr/np.sum(Prob_Arr))
SM_Arr[j] = choice
FIT[j] = Get_Fitness(SSMO_NET,choice,Alive_Node)
if LMAX < FIT[j]:
LMAX = FIT[j]
LLID_ARR[i] = j
if LLMAX == LMAX:
LLL += 1
## Global Leader Phase
GLID = np.where(FIT==np.max(FIT))[0][0]
for i in range(0,Swarm_Size-1):
GGLMAX = FIT[GLID]
GLMAX = FIT[GLID]
if i == GLID:
continue
Prob = 0.9*(FIT[i]/FIT[GLID]) + 0.1
if Prob > random():
GL = SM_Arr[GLID]
SM = SM_Arr[i]
Rand = np.random.choice(Group0,1)[0]
SMR = SM_Arr[Rand]
ARANGE = np.hstack([SM,GL,SMR])
b = uniform(0,1)
d = uniform(-1,1)
PROBSM = np.ones(NB_Cluster) * (1-b-d)
PROBGL = np.ones(NB_Cluster) * (b)
PROBSMR = np.ones(NB_Cluster) * (d)
Prob_Arr = np.hstack([PROBSM,PROBGL,PROBSMR])
Prob_Arr = np.exp(Prob_Arr)/np.sum(np.exp(Prob_Arr))
choice = np.random.choice(ARANGE,NB_Cluster,replace = False, p = Prob_Arr/np.sum(Prob_Arr))
SM_Arr[i] = choice
FIT[i] = Get_Fitness(SSMO_NET,choice,Alive_Node)
if FIT[i]>GLMAX:
GLMAX = FIT[i]
GLID = i
if GLMAX == GGLMAX:
GLL += 1
## Local Decision Phase
# if LLL == MLLL:
## Global Decision Phase
if GLL == MGLL:
GLL = 0
Group += 1
Choice_Node = np.arange(0,Swarm_Size,1)
if Group == 2:
Group0 = np.random.choice(Choice_Node,int(len(Choice_Node)/Group),replace=False)
Choice_Node = list(set(Choice_Node)-set(Group0))
Group1 =
|
np.array(Choice_Node)
|
numpy.array
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
# ==============================================================================
#
# Adapted by: <NAME>, ETH (<EMAIL>)
import hashlib
import math
import os.path
import random
import os
import re
import glob
import time
import torch
import torchaudio
from collections import Counter, OrderedDict
import soundfile as sf
import numpy as np
import tensorflow as tf
MAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M
BACKGROUND_NOISE_LABEL = '_background_noise_'
SILENCE_LABEL = '_silence_'
SILENCE_INDEX = 0
UNKNOWN_WORD_LABEL = '_unknown_'
UNKNOWN_WORD_INDEX = 1
RANDOM_SEED = 59185
def prepare_words_list(wanted_words):
return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words
def which_set(filename, validation_percentage, testing_percentage):
# Split dataset in training, validation, and testing set
# Should be modified to load validation data from validation_list.txt
# Should be modified to load testing data from testing_list.txt
base_name = os.path.basename(filename)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put a wav in, so the data set creator has a way of
# grouping wavs that are close variations of each other.
hash_name = re.sub(r'_nohash_.*$', '', base_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(hash_name.encode()).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_WAVS_PER_CLASS + 1)) *
(100.0 / MAX_NUM_WAVS_PER_CLASS))
if percentage_hash < validation_percentage:
result = 'validation'
elif percentage_hash < (testing_percentage + validation_percentage):
result = 'testing'
else:
result = 'training'
return result
class AudioProcessor(object):
# Prepare data
def __init__(self, training_parameters, data_processing_parameters):
self.data_directory = training_parameters['data_dir']
self.generate_background_noise()
self.generate_data_dictionary(training_parameters)
self.data_processing_parameters = data_processing_parameters
def generate_data_dictionary(self, training_parameters):
# For each data set, generate a dictionary containing the path to each file, its label, and its speaker.
# Make sure the shuffling and picking of unknowns is deterministic.
random.seed(RANDOM_SEED)
wanted_words_index = {}
for index, wanted_word in enumerate(training_parameters['wanted_words']):
wanted_words_index[wanted_word] = index + 2
# Prepare data sets
self.data_set = {'validation': [], 'testing': [], 'training': []}
unknown_set = {'validation': [], 'testing': [], 'training': []}
all_words = {}
# Find all audio samples
search_path = os.path.join(self.data_directory, '*', '*.wav')
for wav_path in glob.glob(search_path):
_ , word = os.path.split(os.path.dirname(wav_path))
speaker_id = wav_path.split('/')[8].split('_')[0] # Hardcoded, should use regex.
word = word.lower()
# Ignore background noise, as it has been handled by generate_background_noise()
if word == BACKGROUND_NOISE_LABEL:
continue
all_words[word] = True
# Determine the set to which the word should belong
set_index = which_set(wav_path, training_parameters['validation_percentage'], training_parameters['testing_percentage'])
# If it's a known class, store its detail, otherwise add it to the list
# we'll use to train the unknown label.
# If we use 35 classes - all are known, hence no unkown samples
if word in wanted_words_index:
self.data_set[set_index].append({'label': word, 'file': wav_path, 'speaker': speaker_id})
else:
unknown_set[set_index].append({'label': word, 'file': wav_path, 'speaker': speaker_id})
if not all_words:
raise Exception('No .wavs found at ' + search_path)
for index, wanted_word in enumerate(training_parameters['wanted_words']):
if wanted_word not in all_words:
raise Exception('Expected to find ' + wanted_word +
' in labels but only found ' +
', '.join(all_words.keys()))
# We need an arbitrary file to load as the input for the silence samples.
# It's multiplied by zero later, so the content doesn't matter.
silence_wav_path = self.data_set['training'][0]['file']
# Add silence and unknown words to each set
for set_index in ['validation', 'testing', 'training']:
set_size = len(self.data_set[set_index])
silence_size = int(math.ceil(set_size * training_parameters['silence_percentage'] / 100))
for _ in range(silence_size):
self.data_set[set_index].append({
'label': SILENCE_LABEL,
'file': silence_wav_path,
'speaker': "None"
})
# Pick some unknowns to add to each partition of the data set.
random.shuffle(unknown_set[set_index])
unknown_size = int(math.ceil(set_size * training_parameters['unknown_percentage'] / 100))
self.data_set[set_index].extend(unknown_set[set_index][:unknown_size])
# Make sure the ordering is random.
for set_index in ['validation', 'testing', 'training']:
random.shuffle(self.data_set[set_index])
# Prepare the rest of the result data structure.
self.words_list = prepare_words_list(training_parameters['wanted_words'])
self.word_to_index = {}
for word in all_words:
if word in wanted_words_index:
self.word_to_index[word] = wanted_words_index[word]
else:
self.word_to_index[word] = UNKNOWN_WORD_INDEX
self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX
def generate_background_noise(self):
# Load background noise, used to augment clean speech
self.background_noise = []
background_dir = os.path.join(self.data_directory, BACKGROUND_NOISE_LABEL)
if not os.path.exists(background_dir):
return self.background_noise
search_path = os.path.join(self.data_directory, BACKGROUND_NOISE_LABEL,'*.wav')
for wav_path in glob.glob(search_path):
# List of tensor, each one is a background noise
sf_loader, _ = sf.read(wav_path)
wav_file = torch.Tensor(np.array([sf_loader]))
self.background_noise.append(wav_file[0])
if not self.background_noise:
raise Exception('No background wav files were found in ' + search_path)
def get_size(self, mode):
# Compute data set size
return len(self.data_set[mode])
def get_data(self, mode, training_parameters):
# Prepare and return data (utterances and labels) for inference
# Pick one of the partitions to choose samples from
candidates = self.data_set[mode]
if training_parameters['batch_size'] == -1:
samples_number = len(candidates)
else:
samples_number = max(0, min(training_parameters['batch_size'], len(candidates)))
# Create a data placeholder
data_placeholder = np.zeros((samples_number, self.data_processing_parameters['spectrogram_length'],self.data_processing_parameters['feature_bin_count']),dtype='float32' )
labels_placeholder = np.zeros(samples_number)
# Required for noise analysis
use_background = (self.background_noise and (mode == 'training'))
pick_deterministically = (mode != 'training')
for i in range(0, samples_number):
# Pick which audio sample to use.
if training_parameters['batch_size'] == -1 or pick_deterministically:
# The randomness is eliminated here to train on the same batch ordering
sample_index = i
else:
sample_index = np.random.randint(len(candidates))
sample = candidates[sample_index]
# Compute time shift offset
if training_parameters['time_shift_samples'] > 0:
time_shift_amount = np.random.randint(-training_parameters['time_shift_samples'], training_parameters['time_shift_samples'])
else:
time_shift_amount = 0
if time_shift_amount > 0:
time_shift_padding = [[time_shift_amount, 0], [0, 0]]
time_shift_offset = [0, 0]
else:
time_shift_padding = [[0, -time_shift_amount], [0, 0]]
time_shift_offset = [-time_shift_amount, 0]
data_augmentation_parameters = {
'wav_filename': sample['file'],
'time_shift_padding': time_shift_padding,
'time_shift_offset': time_shift_offset,
}
# Select background noise to mix in.
if use_background or sample['label'] == SILENCE_LABEL:
background_index = np.random.randint(len(self.background_noise))
background_samples = self.background_noise[background_index].numpy()
assert (len(background_samples) > self.data_processing_parameters['desired_samples'])
background_offset = np.random.randint(0, len(background_samples) - self.data_processing_parameters['desired_samples'])
background_clipped = background_samples[background_offset:(background_offset + self.data_processing_parameters['desired_samples'])]
background_reshaped = background_clipped.reshape([self.data_processing_parameters['desired_samples'], 1])
if sample['label'] == SILENCE_LABEL:
background_volume =
|
np.random.uniform(0, 1)
|
numpy.random.uniform
|
# -*- coding: utf-8 -*-
import scipy.sparse as sp
import networkx as nx
import numpy as np
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
# The zeroth element of the tuple contains the cell location of each
# non-zero value in the sparse matrix, each element looks like[i,j]
# The first element of the tuple contains the value at each cell location
# in the sparse matrix
# The second element of the tuple contains the full shape of the sparse
# matrix
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_adj(adj):
'''
Symmetrically normalize adjacency matrix.
Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.
'''
adj_tilde = adj + np.identity(n=adj.shape[0])
#np.squeeze()--从数组的形状中删除单维度条目,即把shape中为1的维度去掉
d_tilde_diag = np.squeeze(np.sum(np.array(adj_tilde), axis=1))
d_tilde_inv_sqrt_diag = np.power(d_tilde_diag, -1/2)
d_tilde_inv_sqrt =
|
np.diag(d_tilde_inv_sqrt_diag)
|
numpy.diag
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.