prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
#-*- coding: utf-8 -*-
import numpy as n
from scipy.io import wavfile as w
H=n.hstack
V=n.vstack
f_a = 44100. # Hz, frequência de amostragem
############## 2.2.1 Tabela de busca (LUT)
Lambda_tilde=Lt=1024.*16
# Senoide
foo=n.linspace(0,2*n.pi,Lt,endpoint=False)
S_i=n.sin(foo) # um período da senóide com T amostras
# Quadrada:
Q_i=n.hstack( ( n.ones(Lt/2)*-1 , n.ones(Lt/2) ) )
# Triangular:
foo=n.linspace(-1,1,Lt/2,endpoint=False)
Tr_i=n.hstack( ( foo , foo*-1 ) )
# Dente de Serra:
D_i=n.linspace(-1,1,Lt)
def v(f=200,d=2.,tab=S_i,fv=2.,nu=2.,tabv=S_i):
Lambda=n.floor(f_a*d)
ii=n.arange(Lambda)
Lv=float(len(tabv))
Gammav_i=n.floor(ii*fv*Lv/f_a) # índices para a LUT
Gammav_i=n.array(Gammav_i,n.int)
# padrão de variação do vibrato para cada amostra
Tv_i=tabv[Gammav_i%int(Lv)]
# frequência em Hz em cada amostra
F_i=f*( 2.**( Tv_i*nu/12. ) )
# a movimentação na tabela por amostra
D_gamma_i=F_i*(Lt/float(f_a))
Gamma_i=n.cumsum(D_gamma_i) # a movimentação na tabela total
Gamma_i=n.floor( Gamma_i) # já os índices
Gamma_i=n.array( Gamma_i, dtype=n.int) # já os índices
return tab[Gamma_i%int(Lt)] # busca dos índices na tabela
def A(fa=2.,V_dB=10.,d=2.,taba=S_i):
Lambda=n.floor(f_a*d)
ii=n.arange(Lambda)
Lt=float(len(taba))
Gammaa_i=n.floor(ii*fa*Lt/f_a) # índices para a LUT
Gammaa_i=n.array(Gammaa_i,n.int)
# variação da amplitude em cada amostra
A_i=taba[Gammaa_i%int(Lt)]
A_i=A_i*10.**(V_dB/20.)
return A_i
def adsr(som,A=10.,D=20.,S=-20.,R=100.,xi=1e-2):
a_S=10**(S/20.)
Lambda=len(som)
Lambda_A=int(A*f_a*0.001)
Lambda_D=int(D*f_a*0.001)
Lambda_R=int(R*f_a*0.001)
ii=n.arange(Lambda_A,dtype=n.float)
A=ii/(Lambda_A-1)
A_i=A
ii=n.arange(Lambda_A,Lambda_D+Lambda_A,dtype=n.float)
D=1-(1-a_S)*( ( ii-Lambda_A )/( Lambda_D-1) )
A_i=n.hstack( (A_i, D ) )
S=
|
n.ones(Lambda-Lambda_R-(Lambda_A+Lambda_D),dtype=n.float)
|
numpy.ones
|
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 <NAME>
# SPDX-License-Identifier: MIT
from simpa.utils import Tags
from simpa.utils.settings import Settings
from simpa.core.simulation_modules.reconstruction_module import ReconstructionAdapterBase
from simpa.core.device_digital_twins import LinearArrayDetectionGeometry
import numpy as np
import scipy.io as sio
import subprocess
import os
import inspect
class TimeReversalAdapter(ReconstructionAdapterBase):
"""
The time reversal adapter includes the time reversal reconstruction
algorithm implemented by the k-Wave toolkit into SIMPA.
Time reversal reconstruction uses the time series data and computes the forward simulation model
backwards in time::
Treeby, <NAME>., <NAME>, and <NAME>.
"Photoacoustic tomography in absorbing acoustic media using
time reversal." Inverse Problems 26.11 (2010): 115003.
"""
def get_acoustic_properties(self, input_data: dict, detection_geometry):
"""
This method extracts the acoustic tissue properties from the settings dictionary and
amends the information to the input_data.
:param input_data: a dictionary containing the information needed for time reversal.
:param detection_geometry: PA device that is used for reconstruction
"""
if Tags.ACOUSTIC_SIMULATION_3D not in self.component_settings or not \
self.component_settings[Tags.ACOUSTIC_SIMULATION_3D]:
axes = (0, 1)
else:
axes = (0, 2)
pa_device = detection_geometry
pa_device.check_settings_prerequisites(self.global_settings)
# spacing
if Tags.SPACING_MM in self.component_settings and self.component_settings[Tags.SPACING_MM]:
spacing_in_mm = self.component_settings[Tags.SPACING_MM]
elif Tags.SPACING_MM in self.global_settings and self.global_settings[Tags.SPACING_MM]:
spacing_in_mm = self.global_settings[Tags.SPACING_MM]
else:
raise AttributeError("Please specify a value for SPACING_MM")
detector_positions = detection_geometry.get_detector_element_positions_accounting_for_device_position_mm()
detector_positions_voxels = np.round(detector_positions / spacing_in_mm).astype(int)
volume_x_dim = int(np.ceil(self.global_settings[Tags.DIM_VOLUME_X_MM] / spacing_in_mm) + 1) # plus 2 because of off-
volume_y_dim = int(np.ceil(self.global_settings[Tags.DIM_VOLUME_Y_MM] / spacing_in_mm) + 1) # by-one error in matlab
volume_z_dim = int(np.ceil(self.global_settings[Tags.DIM_VOLUME_Z_MM] / spacing_in_mm) + 1) # otherwise
if Tags.ACOUSTIC_SIMULATION_3D not in self.component_settings or not \
self.component_settings[Tags.ACOUSTIC_SIMULATION_3D]:
sizes = (volume_z_dim, volume_x_dim)
sensor_map = np.zeros(sizes)
sensor_map[detector_positions_voxels[:, 2]+1, detector_positions_voxels[:, 0]+1] = 1
else:
sizes = (volume_z_dim, volume_y_dim, volume_x_dim)
sensor_map = np.zeros(sizes)
sensor_map[detector_positions_voxels[:, 2]+1,
detector_positions_voxels[:, 1]+1,
detector_positions_voxels[:, 0]+1] = 1
# check that the spacing is large enough for all detector elements to be on the sensor map
det_elements_sensor_map = np.count_nonzero(sensor_map)
if det_elements_sensor_map != pa_device.number_detector_elements:
raise AttributeError("The spacing is too large to fit every detector element on the sensor map."
"Please increase it! "
f"Expected {pa_device.number_detector_elements} elements but it "
f"were {det_elements_sensor_map}.")
# TODO: Include possibility to
possible_acoustic_properties = [Tags.DATA_FIELD_SPEED_OF_SOUND,
Tags.DATA_FIELD_DENSITY,
Tags.DATA_FIELD_ALPHA_COEFF
]
input_data[Tags.KWAVE_PROPERTY_SENSOR_MASK] = sensor_map
for acoustic_property in possible_acoustic_properties:
if acoustic_property in self.component_settings:
try:
input_data[acoustic_property] = self.component_settings[acoustic_property]
except ValueError or KeyError:
self.logger.error("{} not specified.".format(acoustic_property))
return input_data, spacing_in_mm
def reorder_time_series_data(self, time_series_sensor_data, detection_geometry):
"""
Reorders the time series data to match the order that is assumed by kwave
during image reconstruction with TimeReversal.
The main issue here is, that, while forward modelling allows for the definition of
3D cuboid bounding boxes for the detector elements, TimeReversal does not implement
this feature.
Instead, a binary mask is given and these are indexed in a column-row-wise manner in
the output.
The default np.argsort() method does not yield the same result as expected by
k-Wave. Hence, this workaround.
"""
detector_positions = detection_geometry.get_detector_element_positions_base_mm()
angles = np.arctan2(detector_positions[:, 2], detector_positions[:, 0])
matlab_order = np.argsort(angles)
return time_series_sensor_data[matlab_order]
def reconstruction_algorithm(self, time_series_sensor_data, detection_geometry):
input_data = dict()
# If the detecttion_geometry is something else than linear, the time series data have to be reordered for matlab
if not isinstance(detection_geometry, LinearArrayDetectionGeometry):
time_series_sensor_data = self.reorder_time_series_data(time_series_sensor_data, detection_geometry)
input_data[Tags.DATA_FIELD_TIME_SERIES_DATA] = time_series_sensor_data
input_data, spacing_in_mm = self.get_acoustic_properties(input_data, detection_geometry)
acoustic_path = self.global_settings[Tags.SIMPA_OUTPUT_PATH] + ".mat"
possible_k_wave_parameters = [Tags.MODEL_SENSOR_FREQUENCY_RESPONSE,
Tags.KWAVE_PROPERTY_ALPHA_POWER, Tags.GPU, Tags.KWAVE_PROPERTY_PMLInside, Tags.KWAVE_PROPERTY_PMLAlpha, Tags.KWAVE_PROPERTY_PlotPML,
Tags.RECORDMOVIE, Tags.MOVIENAME,
Tags.SENSOR_DIRECTIVITY_PATTERN]
pa_device = detection_geometry
k_wave_settings = Settings({
Tags.SENSOR_NUM_ELEMENTS: pa_device.number_detector_elements,
Tags.SENSOR_DIRECTIVITY_SIZE_M: pa_device.detector_element_width_mm / 1000,
Tags.SENSOR_CENTER_FREQUENCY_HZ: pa_device.center_frequency_Hz,
Tags.SENSOR_BANDWIDTH_PERCENT: pa_device.bandwidth_percent,
Tags.SPACING_MM: spacing_in_mm
})
for parameter in possible_k_wave_parameters:
if parameter in self.component_settings:
k_wave_settings[parameter] = self.component_settings[parameter]
elif parameter in self.global_settings:
k_wave_settings[parameter] = self.global_settings[parameter]
if Tags.K_WAVE_SPECIFIC_DT in self.global_settings and Tags.K_WAVE_SPECIFIC_NT in self.global_settings:
k_wave_settings["dt"] = self.global_settings[Tags.K_WAVE_SPECIFIC_DT]
k_wave_settings["Nt"] = self.global_settings[Tags.K_WAVE_SPECIFIC_NT]
else:
num_samples = time_series_sensor_data.shape[1]
time_per_sample_s = 1 / (self.component_settings[Tags.SENSOR_SAMPLING_RATE_MHZ] * 1000000)
k_wave_settings["dt"] = time_per_sample_s
k_wave_settings["Nt"] = num_samples
input_data["settings"] = k_wave_settings
sio.savemat(acoustic_path, input_data, long_field_names=True)
if Tags.ACOUSTIC_SIMULATION_3D in self.component_settings and \
self.component_settings[Tags.ACOUSTIC_SIMULATION_3D]:
time_reversal_script = "time_reversal_3D"
axes = (0, 2)
else:
time_reversal_script = "time_reversal_2D"
axes = (0, 1)
base_script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
cmd = list()
cmd.append(self.component_settings[Tags.ACOUSTIC_MODEL_BINARY_PATH])
cmd.append("-nodisplay")
cmd.append("-nosplash")
cmd.append("-automation")
cmd.append("-wait")
cmd.append("-r")
cmd.append("addpath('" + base_script_path + "');" +
time_reversal_script + "('" + acoustic_path + "');exit;")
cur_dir = os.getcwd()
os.chdir(self.global_settings[Tags.SIMULATION_PATH])
self.logger.info(cmd)
subprocess.run(cmd)
reconstructed_data = sio.loadmat(acoustic_path + "tr.mat")[Tags.DATA_FIELD_RECONSTRUCTED_DATA]
reconstructed_data = np.flipud(np.rot90(reconstructed_data, 1, axes))
field_of_view_mm = detection_geometry.get_field_of_view_mm()
field_of_view_voxels = (field_of_view_mm / spacing_in_mm).astype(np.int32)
self.logger.debug(f"FOV (voxels): {field_of_view_voxels}")
# In case it should be cropped from A to A, then crop from A to A+1
x_offset_correct = 1 if (field_of_view_voxels[1] - field_of_view_voxels[0]) < 1 else 0
y_offset_correct = 1 if (field_of_view_voxels[3] - field_of_view_voxels[2]) < 1 else 0
z_offset_correct = 1 if (field_of_view_voxels[5] - field_of_view_voxels[4]) < 1 else 0
if len(
|
np.shape(reconstructed_data)
|
numpy.shape
|
'''
Stresses in a sphere compressed between rigid platens
Model from
HIRAMATSYU, Y., and OKA, Y., 1966, Int. J . Rock Mech. Min. Sci., 3, 89.
With corrections from
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
Compression testing spherical particles for strength: Theory of the meridian crack test and implementation for microscopic fused quartz,
Journal of the Mechanics and Physics of Solids,
Volume 99,
2017,
Pages 70-92,
ISSN 0022-5096,
https://doi.org/10.1016/j.jmps.2016.11.009.
This code implements the mathematical model of HIRAMATSYU and OKA. It models a
hard sphere being compressed between two stiff platens. It determines the stress
distribution within the sphere.
The platen has a spherical indents of radius a, so the force is distributed on the sphere
###
Code Written by:
<NAME>, at Oak Ridge National Laboratory
<EMAIL>
May 21, 2018
###
'''
#### Import BLock ####
# the import block imports needed modules, and spits out a json file with
# version numbers so the code can be repeatable
file = open("ModuleVersions.json", 'w')
modules = {}
import os
import itertools
import sys
modules['Python'] = dict([('version', sys.version_info)])
import json
modules['json'] = dict([('version', json.__version__)])
import numpy
modules['numpy'] = dict([('version', numpy.__version__)])
from numpy.polynomial.legendre import legval as Leg
json.dump(modules, file, indent=4, sort_keys=True)
file.close()
#### END Import Block ####
def DiskCordGeneration(density,tol):
'''
This function creates a uniform distrubution of points on the disk, to be
sampled or calculated.
It generates points at a constant angle with a constant spacing
Additional points are added when two angles diverge far enough from each other
points are spaced from angle .0001 to pi/2
angle 0 causes divide by zero issues in the calculations
points are spaced from radius 0 to 1-tol
the calculations have gibbs instability at radius=1, so points are created some
distance tol from the edge of the disk for numeric stability
Inputs:
###
density: the spacing between points
format: float
Example: 0.05
smaller values increase the number of sample points
tol: defines the distance between the disk edge and the edge points
Format: float
example: .005
###
The code outputs a tuple containing the angles and the radius
example: (angles,r)
angles is a list of values
r is a list of lists,
where r[n] contains a list of radi corresponding to the nth angle
'''
# create initial points, at the borders of the disk
angles=[]
r=[]
angles.append(.0001)
r.append(numpy.append(numpy.arange(0,1-tol,density),1-tol))
angles.append(numpy.pi/2)
r.append(numpy.append(numpy.arange(density,1-tol,density),1-tol))
# loop adding angles until there is no more space
CandidateAngle = numpy.pi/4 # first angle is in between the first two
loop=0
while 1==1:
# calculates the radius when the newer angle can place a point
r0=density/CandidateAngle
if r0>1:
break # end loop if the calculated radius exceeds 1
# creates new points starting ar r0 for all the new angles.
for arc in range(0,2**loop):
angles.append(numpy.pi/2-CandidateAngle-CandidateAngle*2*arc)
r.append(numpy.append(numpy.arange(r0,1-tol,density),1-tol))
# at each angle addition, the angle is divided by 2
CandidateAngle=CandidateAngle/2
loop=loop+1
return (angles,r)
def Calcs(a,v,MaxIter,density,rtol,vtol,Verbose=True):
'''
This function performs the stress calculations on the compressed sphere.
It calls the DiskCordGeneration to get points, and calculates the stresses
at each angle, using vectorized operations.
Inputs:
###
a: the radius of the indenter
format: float
Example: 0.1
v: The poisson ratio of the sphere
Format: float
example: .33
MaxIter: The maximum number of iterations to perform.
The calculations use Legendre Polynomials, and more iterations will be more accurate
Format: Integer
example: 1000
If this value is set too low, and the rtol value is set too low, inaccurate
results at the disk edge may be obtained.
density: the spacing between points
format: float
Example: 0.05
smaller values increase the number of sample points
rtol: defines the distance between the disk edge and the edge points
format: float
example: .005
vtol: Defines the tolerance of the iterative calculations
As each Legendre Polynomial is added, the result will change by some value. If
the change of all the values along an angle is less than this number, the
answer is deemed accurate and is outputed
format: float
example: .000001
Verbose: A switch to print diagnostic values to the console
Prints the iteration loop that it breaks on, and prints the number of angles left
to calculate
format: Bool
example: True
###
The code outputs a tuple containing the angles and radius of the sample points,
and the 3x3 stress matrix at each sample point.
example: (ang,r,StressTensor)
ang is a list of angle values
r is a list of radius values,
StressTensor is a list of 3x3 matricies, where StressTensor[n] is the stress
tensor at ang[n] and radius [n]
'''
# gets sample points for calculations
(angles,rlist)=DiskCordGeneration(density,rtol)
R=1 # unit sphere
alpha = numpy.arccos(1-2*(a/R)**2)/2 # sets alpha value, size of the platen contact area
# initialize values, legendre polynomials
Palpha=numpy.zeros(MaxIter)
PalphaM1=numpy.zeros(MaxIter)
PPrimeAlpha=numpy.zeros(MaxIter)
# precalc Leg, legendre polynomials, values at alpha, iterative formula
LegVals=[]
LegVals.append(1)
x=numpy.cos(alpha)
LegVals.append(x)
for n in range(2,2*MaxIter+1):
LegVals.append((2*n-1)/n*x*LegVals[n-1]-(n-1)/n*LegVals[n-2])
# precalc Palpha value used in calculations
for i in range(1,MaxIter):
Palpha[i] = LegVals[2*i]
PalphaM1[i] = LegVals[2*i-1]
PPrimeAlpha[i] = (2*i)/(numpy.cos(alpha)**2-1) * (numpy.cos(alpha)*Palpha[i]-PalphaM1[i])
# initialize list to store Stress matrix values
StressTensor = [[] for i in range(len(angles))]
k=0 # index for StressTensor list storage
# loop over each sample angle
for z,ang in enumerate(angles):
r=rlist[z] # get the list of radius points for the given angle
# get initial values of equations A1, A2, A3, and A4
SigmaTheta = -.5+numpy.zeros(len(r))
SigmaPhi = -.5+numpy.zeros(len(r))
SigmaR = -.5+numpy.zeros(len(r))
Shear = 0+numpy.zeros(len(r))
# checks change in value as the calculation is iterativerly performed
SigmaPhiold=numpy.array(SigmaPhi)
# precalc Leg values at ang, iterative formula
LegVals=[]
LegVals.append(1)
x=numpy.cos(ang)
LegVals.append(x)
for n in range(2,2*MaxIter+1):
LegVals.append((2*n-1)/n*x*LegVals[n-1]-(n-1)/n*LegVals[n-2])
# loop over the iterative formula
# each iteration is a better and better apporximation, using higher order
# legendre polynomials
# start iteration at
for i in range(1,MaxIter):
# A1 and A2 term 1
c1 = ( (-(4*i+1)*(i-1)+(4*i+1)**2*v)/(2*i*(4*i**2+2*i+1)+2*i*(4*i+1)*v) )*(r/R)**(2*i)
# A1 and A2 term 2
c2 = ( ((4*i+1)*(4*i**2+4*i-1)+2*(4*i+1)*v)/(2*(4*i**2-1)*(4*i**2+2*i+1)+2*(4*i+1)*(4*i**2-1)*v) )*(r/R)**(2*i-2)
# A1 and A2 term 3
c3 = 1+numpy.cos(alpha)
# A1 and A2 term 4
c4 = ( (-(4*i+1)*(2*i+5)+4*(4*i+1)*v)/(4*i*(2*i+1)*(4*i**2+2*i+1)+4*i*(2*i+1)*(4*i+1)*v) )*(r/R)**(2*i)
# A1 and A2 term 5
c5 = ( ((4*i+1)*(4*i**2+4*i-1)+2*(4*i+1)*v)/(4*i*(4*i**2-1)*(4*i**2+2*i+1)+4*i*(4*i+1)*(4*i**2-1)*v) )*(r/R)**(2*i-2)
# get P, legendre polynomial, terms
Pang = LegVals[2*i]
PangM1 = LegVals[2*i-1]
PangM2 = LegVals[2*i-2]
# and get the derivatives
dPang = (2*i)/(numpy.sin(ang)) * (numpy.cos(ang)*Pang-PangM1)
dPangM1 = (2*i)/(numpy.sin(ang)) * (numpy.cos(ang)*PangM1-PangM2)
d2Pdang = -(2*i)/(numpy.tan(ang)*numpy.sin(ang)) * (numpy.cos(ang)*Pang-PangM1) + (2*i)/(numpy.sin(ang)) * (-numpy.sin(ang)*Pang+numpy.cos(ang)*dPang-dPangM1)
# calculate A1 and A2, adding to previous terms
SigmaTheta = SigmaTheta -(1/2)*((c1+c2)*c3*PPrimeAlpha[i]*Pang+(c4+c5)*c3*PPrimeAlpha[i]*d2Pdang)
SigmaPhi = SigmaPhi -(1/2)*((c1+c2)*c3*PPrimeAlpha[i]*Pang+(c4+c5)*c3*PPrimeAlpha[i]*dPang/numpy.tan(ang))
# A3 term 1
c1 = (2*(1+v)*(1-2*v)*(4*i+1)*(numpy.cos(alpha)*Palpha[i]-PalphaM1[i]))
# A3 term 2
c2 = ((8*i**2+8*i+3)*(2*v)+(8*i**2+4*i+2)*(1-2*v))
# A3 term 3
c3 = ((4*i**2-2*i-3)*v)/((1+v)*(1-2*v))*(r/R)**(2*i)
# A3 term 4
c4 = (2*i+1)*(2*i-2)/(2*(1+v))*(r/R)**(2*i)
# A3 term 5
c5 = (4*i**2*(2*i+2)*v)/((2*i+1)*(1+v)*(1-2*v))*(r/R)**(2*i-2)
# A3 term 6
c6 = (2*i*(4*i**2+4*i-1))/(2*(2*i+1)*(1+v))*(r/R)**(2*i-2)
# calculate A3, adding to previous terms
SigmaR = SigmaR - (1/2)*(1/(1-numpy.cos(alpha)))*(c1/c2)*(c3+c4-c5-c6)*Pang
# A4 term 1
c1 = -(4*i+1)*(4*i**2+4*i-1)-2*(4*i+1)*v\
# A4 term 2
c2 = 4*i*(2*i+1)*(4*i**2+2*i+1)+4*i*(2*i+1)*(4*i+1)*v
# A4 term 3
c3 = (r/R)**(2*i)-(r/R)**(2*i-2)
# A4 term 4
c4 = 1+numpy.cos(alpha)
# calculate A4, adding to previous terms
Shear = Shear -(1/2)*(c1/c2)*c3*c4*PPrimeAlpha[i]*dPang
# ends calculation iterations when the change is less than vtol
test=numpy.abs(SigmaPhi-SigmaPhiold)
if numpy.max(test) < vtol:
if Verbose:
print(i)
break
# update the value to compare to next iteration
SigmaPhiold=numpy.array(SigmaPhi)
if Verbose:
print(len(angles)-z)
# save values to StressTensor
for i in range(0,len(r)):
StressTensor[k].append(stress)
k=k+1 # update index for next angle
# flatten the angle, r, and stress list of lists to a single list
ang=[ [angles[i]]*len(rlist[i]) for i in range(len(rlist))]
ang=list(itertools.chain(*ang))
r=list(itertools.chain(*rlist))
StressTensor=numpy.array(list(itertools.chain(*StressTensor)))
return (ang,r,StressTensor)
def StressPlotter(Title,ColorLevels,Label,ang,r,Stress,ShowSamplePoints=True):
### things to import
import matplotlib
matplotlib.use('Agg') # no UI backend
import matplotlib.pyplot as plt
###
### set up plotting area
plt.rc('xtick',labelsize=8)
plt.rc('ytick',labelsize=8)
dx=7.5
dy=6.5
fig = plt.figure(figsize=(dx, dy))
plot = fig.add_axes([.5/dx,.5/dy,5/dx,5/dy],projection='polar')
Stress=numpy.array(Stress)
if ShowSamplePoints==True:
plot.plot(ang,r,'.',markersize=.5,markeredgewidth=0.0,color=[1,0,0])
ax=plot.tricontourf(ang,r,Stress,levels=ColorLevels)
plot.set_theta_zero_location("N")
plot.set_theta_direction(-1)
plot.set_thetalim(0,numpy.pi/2)
###
### add labels and save fig
plot.set_title(Title,pad=40)
colorbar=fig.add_axes([6.25/dx,.5/dy,.5/dx,5/dy])
fig.colorbar(ax,cax=colorbar,ticks=ColorLevels)
colorbar.set_ylabel(Label)
RadiusShow=fig.add_axes([.5/dx,6/dy,a*5/dx,.1/dy])
RadiusShow.patch.set_color('b')
RadiusShow.xaxis.set_visible(False)
RadiusShow.yaxis.set_visible(False)
fig.savefig(Title+'.png',dpi=1000) #savefig, don't show
###
if __name__ == "__main__":
R=1 # radius of sphere
a=.1 # radius of indentation
v=.33 #poisson ratio
MaxIter = 7000
rtol=.01
vtol=.00001
density=.01
(ang,r,StressTensor)=Calcs(a,v,MaxIter,density,rtol,vtol)
Title='Tension, Compressive Radius = '+str(a)+', Poisson Ratio = '+str(v)
Label='Normalized Tensile First Principal Stress Distribution'
ColorLevels=numpy.linspace(0,1,num=30)
Tension=[]
for stress in StressTensor:
Pstress =
|
numpy.linalg.eig(stress)
|
numpy.linalg.eig
|
import os
import shutil
from typing import Iterable, Optional, Set
from urllib.request import urlopen
import numpy as np
import gzip
from fasttext import load_model
from sklearn.feature_extraction.text import TfidfVectorizer
from d3l.utils.constants import FASTTEXTURL, STOPWORDS
from d3l.utils.functions import shingles
class FasttextTransformer:
def __init__(
self,
token_pattern: str = r"(?<KEY>",
max_df: float = 0.5,
stop_words: Iterable[str] = STOPWORDS,
embedding_model_lang="en",
cache_dir: Optional[str] = None,
):
"""
Instantiate a new embedding-based transformer
Parameters
----------
token_pattern : str
The regex used to identify tokens.
The default value is scikit-learn's TfidfVectorizer default.
max_df : float
Percentage of values the token can appear in before it is ignored.
stop_words : Iterable[str]
A collection of stopwords to ignore that defaults to NLTK's English stopwords.
embedding_model_lang : str
The embedding model language.
cache_dir : Optional[str]
An exising directory path where the model will be stored.
If not given, the current working directory will be used.
"""
self._token_pattern = token_pattern
self._max_df = max_df
self._stop_words = stop_words
self._embedding_model_lang = embedding_model_lang
self._cache_dir = (
cache_dir if cache_dir is not None and os.path.isdir(cache_dir) else None
)
self._embedding_model = self.get_embedding_model(
overwrite=False,
)
def __getstate__(self):
d = self.__dict__
self_dict = {k: d[k] for k in d if k != "_embedding_model"}
return self_dict
def __setstate__(self, state):
self.__dict__ = state
self._embedding_model = self.get_embedding_model(overwrite=False)
@property
def cache_dir(self) -> Optional[str]:
return self._cache_dir
def _download_fasttext(self, model_file_name: str, chunk_size: int = 2 ** 13):
"""
Download pre-trained common-crawl vectors from fastText's website
https://fasttext.cc/docs/en/crawl-vectors.html
Parameters
----------
model_file_name : str
The model file name to download.
chunk_size : int
The Fasttext models are commonly large - several GBs.
The disk writing will therefore be made in chunks.
Returns
-------
"""
url = FASTTEXTURL + model_file_name
print("Downloading %s" % url)
response = urlopen(url)
downloaded = 0
write_file_name = (
os.path.join(self._cache_dir, model_file_name)
if self._cache_dir is not None
else model_file_name
)
download_file_name = write_file_name + ".part"
with open(download_file_name, "wb") as f:
while True:
chunk = response.read(chunk_size)
downloaded += len(chunk)
if not chunk:
break
f.write(chunk)
# print("{} downloaded ...".format(downloaded))
os.rename(download_file_name, write_file_name)
def _download_model(self, if_exists: str = "strict"):
"""
Download the pre-trained model file.
Parameters
----------
if_exists : str
Supported values:
- *ignore*: The model will not be downloaded
- *strict*: This is the defaul. The model will be downloaded only if it does not exist at the *cache_dir*.
- *overwrite*: The model will be downloaded even if it already exists at the *cache_dir*.
Returns
-------
"""
base_file_name = "cc.%s.300.bin" % self._embedding_model_lang
file_name = (
os.path.join(self._cache_dir, base_file_name)
if self._cache_dir is not None
else base_file_name
)
gz_file_name = "%s.gz" % base_file_name
if os.path.isfile(file_name):
if if_exists == "ignore":
return file_name
elif if_exists == "strict":
print("File exists. Use --overwrite to download anyway.")
return file_name
elif if_exists == "overwrite":
pass
absolute_gz_file_name = (
os.path.join(self._cache_dir, gz_file_name)
if self._cache_dir is not None
else gz_file_name
)
if not os.path.isfile(absolute_gz_file_name):
self._download_fasttext(gz_file_name)
with gzip.open(absolute_gz_file_name, "rb") as f:
with open(file_name, "wb") as f_out:
shutil.copyfileobj(f, f_out)
"""Cleanup"""
if os.path.isfile(absolute_gz_file_name):
os.remove(absolute_gz_file_name)
return file_name
def get_embedding_model(
self,
overwrite: bool = False,
):
"""
Download, if not exists, and load the pretrained FastText embedding model in the working directory.
Note that the default gzipped English Common Crawl FastText model has 4.2 GB
and its unzipped version has 6.7 GB.
Parameters
----------
overwrite : bool
If True overwrites the model if exists.
Returns
-------
"""
if_exists = "strict" if not overwrite else "overwrite"
model_file = self._download_model(if_exists=if_exists)
embedding_model = load_model(model_file)
return embedding_model
def get_embedding_dimension(self) -> int:
"""
Retrieve the embedding dimensions of the underlying model.
Returns
-------
int
The dimensions of each embedding
"""
return self._embedding_model.get_dimension()
def get_vector(self, word: str) -> np.ndarray:
"""
Retrieve the embedding of the given word.
If the word is out of vocabulary a zero vector is returned.
Parameters
----------
word : str
The word to retrieve the vector for.
Returns
-------
np.ndarray
A vector of float numbers.
"""
vector = self._embedding_model.get_word_vector(
str(word).strip().lower(), np.random.randn(self.get_embedding_dimension())
)
return vector
def get_tokens(self, input_values: Iterable[str]) -> Set[str]:
"""
Extract the most representative tokens of each value and return the token set.
Here, the most representative tokens are the ones with the lowest TF/IDF scores -
tokens that describe what the values are about.
Parameters
----------
input_values : Iterable[str]
The collection of values to extract tokens from.
Returns
-------
Set[str]
A set of representative tokens
"""
if len(input_values) < 1:
return set()
try:
vectorizer = TfidfVectorizer(
decode_error="ignore",
strip_accents="unicode",
lowercase=True,
analyzer="word",
stop_words=self._stop_words,
token_pattern=self._token_pattern,
max_df=self._max_df,
use_idf=True,
)
vectorizer.fit_transform(input_values)
except ValueError:
return set()
weight_map = dict(zip(vectorizer.get_feature_names(), vectorizer.idf_))
tokenset = set()
tokenizer = vectorizer.build_tokenizer()
for value in input_values:
value = value.lower().replace("\n", " ").strip()
for shingle in shingles(value):
tokens = [t for t in tokenizer(shingle)]
if len(tokens) < 1:
continue
token_weights = [weight_map.get(t, 0.0) for t in tokens]
min_tok_id = np.argmin(token_weights)
tokenset.add(tokens[min_tok_id])
return tokenset
def transform(self, input_values: Iterable[str]) -> np.ndarray:
"""
Extract the embeddings of the most representative tokens of each value and return their **mean** embedding.
Here, the most representative tokens are the ones with the lowest TF/IDF scores -
tokens that describe what the values are about.
Given that the underlying embedding model is a n-gram based one,
the number of out-of-vocabulary tokens should be relatively small or zero.
Parameters
----------
input_values : Iterable[str]
The collection of values to extract tokens from.
Returns
-------
np.ndarray
A Numpy vector representing the mean of all token embeddings.
"""
embeddings = [self.get_vector(token) for token in self.get_tokens(input_values)]
if len(embeddings) == 0:
return np.empty(0)
return np.mean(
|
np.array(embeddings)
|
numpy.array
|
import numpy as np
from scipy.signal import fftconvolve
class KDE(object):
"""
This class implements a Gaussian kernel density estimator in arbitrary dimensions with a first order renormalization
at the boundary of parameter space
"""
def __init__(self, bandwidth_scale=1, nbins=None):
"""
:param bandwidth_scale: scales the kernel bandwidth, or the variance of each Gaussian
:param nbins: number of bins in the KDE
"""
self.bandwidth_scale = bandwidth_scale
self._nbins = nbins
def _scotts_factor(self, n, d):
"""
Implements the kernel bandwidth using Scotts factor
:param n: number of data points
:param d: number of dimensions
:return: kernel bandwidth
"""
return 1.05 * n ** (-1. / (d + 4))
def _gaussian_kernel(self, inverse_cov_matrix, coords_centered, dimension, n_reshape):
"""
Computes the multivariate gaussian KDE from the covariance matrix and observation array
:param inverse_cov_matrix: inverse of the covariance matrix estimated from observations
:param coords_centered: array of observations transformed into pixel space
:param dimension: number of dimensions
:param n_reshape: shape of output
:return: gaussian KDE evalauted at coords
"""
def _gauss(_x):
return np.exp(-0.5 * np.dot(np.dot(_x, inverse_cov_matrix), _x))
z = [_gauss(coord) for coord in coords_centered]
return np.reshape(z, tuple([n_reshape] * dimension))
def _get_coordinates(self, ranges):
"""
Builds an array of coordinate values from the specified parameter ranges
:param ranges: parameter ranges
:return: array of coordinate values
"""
points = []
for i in range(0, len(ranges)):
points.append(np.linspace(ranges[i][0], ranges[i][1], self._nbins))
return points
def NDhistogram(self, data, weights, ranges):
"""
:param data: data to make the histogram. Shape (nsamples, ndim)
:param coordinates: np.linspace(min, max, nbins) for each dimension
:param ranges: parameter ranges corresponding to columns in data
:param weights: param weights
:return: histogram
"""
coordinates = self._get_coordinates(ranges)
histbins = []
for i, coord in enumerate(coordinates):
histbins.append(np.linspace(ranges[i][0], ranges[i][-1], len(coord) + 1))
H, _ = np.histogramdd(data, range=ranges, bins=histbins, weights=weights)
return H.T
def __call__(self, data, ranges, weights, boundary_order=1):
"""
:param data: data to make the histogram, shape = (n_observations, ndim)
:param ranges: a list of parameter ranges corresponding to each dimension
:param weights: importance weights for each observation
:return: the KDE estimate of the data
"""
# compute coordinate arrays for each parameter
coordinates = self._get_coordinates(ranges)
# shift coordinate arrays so that the center is at (0, 0)
X = np.meshgrid(*coordinates)
cc_center = np.vstack([X[i].ravel() - np.mean(ranges[i]) for i in range(len(X))]).T
try:
dimension = int(np.shape(data)[1])
except:
dimension = 1
histbins = []
for i, coord in enumerate(coordinates):
histbins.append(np.linspace(ranges[i][0], ranges[i][-1], len(coord) + 1))
# Compute the N-dimensional histogram
H = self.NDhistogram(data, weights, ranges)
# compute the covariance, scale by the bandwidth
bandwidth = self.bandwidth_scale * self._scotts_factor(data.shape[0], dimension)
covariance = bandwidth * np.cov(data.T)
# invert covariance matrix
if dimension > 1:
c_inv = np.linalg.inv(covariance)
else:
c_inv = 1 / covariance
n = len(coordinates[0])
gaussian_kernel = self._gaussian_kernel(c_inv, cc_center, dimension, n)
# now compute the guassian KDE
density = fftconvolve(H, gaussian_kernel, mode='same')
# renormalize the boundary to remove bias
if boundary_order == 1:
boundary_kernel = np.ones(
|
np.shape(H)
|
numpy.shape
|
#!/usr/bin/env python
import os
import pickle
import numpy as np
from numpy.testing import (assert_allclose, assert_, assert_raises,
assert_equal)
import pywt
def test_wavelet_packet_structure():
x = [1, 2, 3, 4, 5, 6, 7, 8]
wp = pywt.WaveletPacket(data=x, wavelet='db1', mode='symmetric')
assert_(wp.data == [1, 2, 3, 4, 5, 6, 7, 8])
assert_(wp.path == '')
assert_(wp.level == 0)
assert_(wp['ad'].maxlevel == 3)
def test_traversing_wp_tree():
x = [1, 2, 3, 4, 5, 6, 7, 8]
wp = pywt.WaveletPacket(data=x, wavelet='db1', mode='symmetric')
assert_(wp.maxlevel == 3)
# First level
assert_allclose(wp['a'].data, np.array([2.12132034356, 4.949747468306,
7.778174593052, 10.606601717798]),
rtol=1e-12)
# Second level
assert_allclose(wp['aa'].data, np.array([5., 13.]), rtol=1e-12)
# Third level
assert_allclose(wp['aaa'].data, np.array([12.727922061358]), rtol=1e-12)
def test_acess_path():
x = [1, 2, 3, 4, 5, 6, 7, 8]
wp = pywt.WaveletPacket(data=x, wavelet='db1', mode='symmetric')
assert_(wp['a'].path == 'a')
assert_(wp['aa'].path == 'aa')
assert_(wp['aaa'].path == 'aaa')
# Maximum level reached:
assert_raises(IndexError, lambda: wp['aaaa'].path)
# Wrong path
assert_raises(ValueError, lambda: wp['ac'].path)
def test_access_node_attributes():
x = [1, 2, 3, 4, 5, 6, 7, 8]
wp = pywt.WaveletPacket(data=x, wavelet='db1', mode='symmetric')
assert_allclose(wp['ad'].data, np.array([-2., -2.]), rtol=1e-12)
assert_(wp['ad'].path == 'ad')
assert_(wp['ad'].node_name == 'd')
assert_(wp['ad'].parent.path == 'a')
assert_(wp['ad'].level == 2)
assert_(wp['ad'].maxlevel == 3)
assert_(wp['ad'].mode == 'symmetric')
# tuple-based access is also supported
node = wp[('a', 'd')]
# can access a node's path as either a single string or in tuple form
assert_(node.path == 'ad')
assert_(node.path_tuple == ('a', 'd'))
def test_collecting_nodes():
x = [1, 2, 3, 4, 5, 6, 7, 8]
wp = pywt.WaveletPacket(data=x, wavelet='db1', mode='symmetric')
# All nodes in natural order
assert_([node.path for node in wp.get_level(3, 'natural')] ==
['aaa', 'aad', 'ada', 'add', 'daa', 'dad', 'dda', 'ddd'])
# and in frequency order.
assert_([node.path for node in wp.get_level(3, 'freq')] ==
['aaa', 'aad', 'add', 'ada', 'dda', 'ddd', 'dad', 'daa'])
assert_raises(ValueError, wp.get_level, 3, 'invalid_order')
def test_reconstructing_data():
x = [1, 2, 3, 4, 5, 6, 7, 8]
wp = pywt.WaveletPacket(data=x, wavelet='db1', mode='symmetric')
# Create another Wavelet Packet and feed it with some data.
new_wp = pywt.WaveletPacket(data=None, wavelet='db1', mode='symmetric')
new_wp['aa'] = wp['aa'].data
new_wp['ad'] = [-2., -2.]
# For convenience, :attr:`Node.data` gets automatically extracted
# from the :class:`Node` object:
new_wp['d'] = wp['d']
# Reconstruct data from aa, ad, and d packets.
assert_allclose(new_wp.reconstruct(update=False), x, rtol=1e-12)
# The node's :attr:`~Node.data` will not be updated
assert_(new_wp.data is None)
# When `update` is True:
assert_allclose(new_wp.reconstruct(update=True), x, rtol=1e-12)
assert_allclose(new_wp.data, np.arange(1, 9), rtol=1e-12)
assert_([n.path for n in new_wp.get_leaf_nodes(False)] ==
['aa', 'ad', 'd'])
assert_([n.path for n in new_wp.get_leaf_nodes(True)] ==
['aaa', 'aad', 'ada', 'add', 'daa', 'dad', 'dda', 'ddd'])
def test_removing_nodes():
x = [1, 2, 3, 4, 5, 6, 7, 8]
wp = pywt.WaveletPacket(data=x, wavelet='db1', mode='symmetric')
wp.get_level(2)
dataleafs = [n.data for n in wp.get_leaf_nodes(False)]
expected = np.array([[5., 13.], [-2, -2], [-1, -1], [0, 0]])
for i in range(4):
assert_allclose(dataleafs[i], expected[i, :], atol=1e-12)
node = wp['ad']
del(wp['ad'])
dataleafs = [n.data for n in wp.get_leaf_nodes(False)]
expected = np.array([[5., 13.], [-1, -1], [0, 0]])
for i in range(3):
assert_allclose(dataleafs[i], expected[i, :], atol=1e-12)
wp.reconstruct()
# The reconstruction is:
assert_allclose(wp.reconstruct(),
|
np.array([2., 3., 2., 3., 6., 7., 6., 7.])
|
numpy.array
|
import numpy as np
from numba import jit, njit, prange
def is_symmetric(A, rtol=1e-05, atol=1e-08):
# https://stackoverflow.com/questions/42908334/checking-if-a-matrix-is-symmetric-in-numpy
return np.allclose(A, A.T, rtol=rtol, atol=atol)
def is_square(A):
return A.shape[0] == A.shape[1]
@njit
def vec(A):
# https://stackoverflow.com/questions/25248290/most-elegant-implementation-of-matlabs-vec-function-in-numpy
return A.T.ravel().reshape(-1, 1)
def vech(A):
if not is_square(A):
raise Exception("It must be a square matrix.")
return A.T[np.triu_indices_from(A)].reshape(-1, 1)
@njit(fastmath=True)
def kron(A, B=None):
if B is not None:
return np.kron(
|
np.ascontiguousarray(A)
|
numpy.ascontiguousarray
|
#!/usr/bin/env python3
#
# Copyright 2020 IBM
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.IBM Confidential
#
# coding: utf-8
from __future__ import absolute_import
from flask import json
from six import BytesIO
from unittest import mock
import botocore
from botocore.response import StreamingBody
import numpy as np
from swagger_server.models.error import Error # noqa: E501
from swagger_server.models.parameter import Parameter # noqa: E501
from swagger_server.models.link import Link # noqa: E501
from swagger_server.models.prediction import Prediction # noqa: E501
from swagger_server.models.prediction_response import PredictionResponse # noqa: E501
from swagger_server.test import BaseTestCase
from swagger_server.controllers.run_controller import prediction
class TestRunController(BaseTestCase):
"""RunController integration test stubs"""
@mock.patch("swagger_server.controllers.run_controller.botocore.client.BaseClient")
@mock.patch("swagger_server.controllers.run_controller.boto3.client")
def test_prediction(self, mock_boto_client, mock_invoke_endpoint):
"""Test case for prediction
Call Prediction of specified deployment
"""
mock_boto_client.return_value = botocore.client.BaseClient()
buffer = BytesIO()
x = np.array(['this on returned', 2, 3])
|
np.save(buffer, x)
|
numpy.save
|
import numpy as np
from alphago.elo import compute_log_likelihood, run_mm, update_gamma
def test_compute_log_likelihood():
gamma = np.array([1, 2, 3])
wins = np.array([[0, 3, 4],
[1, 0, 2],
[2, 0, 0]])
expected = 3 * np.log(1/(1+2)) + 4 * np.log(1/(1+3)) + \
1 * np.log(2/(2+1)) + 2 * np.log(2/(2+3)) + 2 * np.log(3/(3+1))
computed = compute_log_likelihood(wins, gamma)
assert expected == computed
def test_run_mm():
initial_gamma = np.array([1, 1, 1])
wins = np.array([[0, 30, 40],
[1, 0, 20],
[2, 0, 0]])
initial_ll = compute_log_likelihood(wins, initial_gamma)
gamma = run_mm(initial_gamma, wins)
final_ll = compute_log_likelihood(wins, gamma)
assert initial_ll < final_ll
def test_run_mm_large():
# Generate fake data according to the Bradley-Terry model.
hidden_gamma = np.array([10, 20, 1, 1, 5, 3, 100, 8, 100, 10])
hidden_gamma = hidden_gamma /
|
np.sum(hidden_gamma)
|
numpy.sum
|
from matplotlib import pyplot as plt
import improc as imp
import numpy as np
import os
import scipy.io as scio
patchSize = [480, 480, 4]
patchSize = [240, 240, 4]
# patchSize = [32, 32, 4]
numPatches = 5000
numSelPtcs = 500
sortway = 'ascent'
sortway = 'descent'
sortway = None
seed = 2019
seed = None
startid = 0
noise = 'wgn'
# noise = None
SNR = 100
tranformway = 'orig'
tranformway = 'flipud'
tranformway = 'fliplr'
tranformway = 'transpose'
tranformway = 'rot90'
tranformway = 'flipud(fliplr)'
tranformway = 'fliplr(rot90)'
# tranformway = 'fliplr(transpose)'
WriteImg = False
# --------------------------------------
datasetname = 'RSSRAI2019TRAIN'
folderIN = '/mnt/d/DataSets/oi/rsi/RSSRAI2019/new/train/train/'
folderOUT = '/mnt/d/DataSets/oi/rsi/RSSRAI2019/new/train/samples3/'
num = [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20]
# datasetname = 'RSSRAI2019VALID'
# folderIN = '/mnt/d/DataSets/oi/rsi/RSSRAI2019/new/valid/valid/'
# folderOUT = '/mnt/d/DataSets/oi/rsi/RSSRAI2019/new/valid/samples3/'
# num = [10, 15]
folderA = 'img_2017'
folderB = 'img_2018'
folderC = 'mask'
folderAin = os.path.join(folderIN, folderA)
folderBin = os.path.join(folderIN, folderB)
folderCin = os.path.join(folderIN, folderC)
folderAout = os.path.join(folderOUT, folderA)
folderBout = os.path.join(folderOUT, folderB)
folderCout = os.path.join(folderOUT, folderC)
os.makedirs(folderAout, exist_ok=True)
os.makedirs(folderBout, exist_ok=True)
os.makedirs(folderCout, exist_ok=True)
imageNameA = 'image_2017_960_960_'
imageNameB = 'image_2018_960_960_'
imageNameC = 'mask_2017_2018_960_960_'
imgspathA = []
imgspathB = []
imgspathC = []
for n in num:
imgspathA.append(folderAin + '/' + imageNameA + str(n) + '.tif')
imgspathB.append(folderBin + '/' + imageNameB + str(n) + '.tif')
imgspathC.append(folderCin + '/' + imageNameC + str(n) + '.tif')
print(imgspathA)
print(imgspathB)
print(imgspathC)
A = []
B = []
C = []
cc = np.zeros((960, 960, 4), dtype='uint8')
for n in range(len(num)):
A.append(imp.imreadadv(imgspathA[n]))
B.append(imp.imreadadv(imgspathB[n]))
c = imp.imreadadv(imgspathC[n])
# print(c.shape)
cc[:, :, 0] = c
cc[:, :, 1] = c
cc[:, :, 2] = c
cc[:, :, 3] = c
# print(cc.min(), cc.max())
C.append(cc.copy())
# N-H-W-C --> H-W-C-N
A = np.transpose(np.array(A), (1, 2, 3, 0))
B = np.transpose(np.array(B), (1, 2, 3, 0))
C = np.transpose(np.array(C), (1, 2, 3, 0))
plt.figure()
plt.subplot(231)
plt.imshow(A[:, :, 0:3, 0])
plt.subplot(232)
plt.imshow(B[:, :, 0:3, 0])
plt.subplot(233)
plt.imshow(C[:, :, 0:3, 0])
print("===tranformway:", tranformway)
if tranformway is 'flipud':
A = np.flipud(A)
B = np.flipud(B)
C = np.flipud(C)
if tranformway is 'fliplr':
A = np.fliplr(A)
B = np.fliplr(B)
C = np.fliplr(C)
if tranformway is 'transpose':
A = np.transpose(A, (1, 0, 2, 3))
B = np.transpose(B, (1, 0, 2, 3))
C = np.transpose(C, (1, 0, 2, 3))
if tranformway is 'rot90':
A = np.rot90(A)
B =
|
np.rot90(B)
|
numpy.rot90
|
import numpy as np
import math
from e2cnn.kernels.basis import KernelBasis
from e2cnn.kernels.utils import offset_iterator, psi, psichi, chi
from e2cnn.group import Group, IrreducibleRepresentation
from e2cnn.group import cyclic_group, dihedral_group, so2_group, o2_group
from e2cnn.group import CyclicGroup, DihedralGroup, SO2, O2
from typing import Union, Tuple
class IrrepBasis(KernelBasis):
def __init__(self, group, in_irrep, out_irrep, dim):
r"""
Abstract class for bases implementing the kernel constraint solutions associated to irreducible input and output
representations.
Args:
group:
in_irrep:
out_irrep:
dim:
"""
self.group = group
self.in_irrep = in_irrep
self.out_irrep = out_irrep
super(IrrepBasis, self).__init__(dim, (out_irrep.size, in_irrep.size))
class R2FlipsSolution(IrrepBasis):
def __init__(
self, group, in_irrep, out_irrep, axis, max_frequency = None, max_offset = None,
):
if isinstance(group, int):
group = cyclic_group(2)
assert isinstance(group, CyclicGroup) and group.order() == 2
assert max_frequency is not None or max_offset is not None, (
"Error! Either the maximum frequency or the maximum offset for the"
" frequencies must be set"
)
self.max_frequency = max_frequency
self.max_offset = max_offset
assert max_frequency is None or (
isinstance(max_frequency, int) and max_frequency >= 0
)
assert max_offset is None or (isinstance(max_offset, int) and max_offset >= 0)
assert isinstance(axis, float)
self.axis = axis
if isinstance(in_irrep, int):
in_irrep = group.irrep(in_irrep)
elif isinstance(in_irrep, str):
in_irrep = group.irreps[in_irrep]
elif not isinstance(in_irrep, IrreducibleRepresentation):
raise ValueError(
"'in_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found"
)
if isinstance(out_irrep, int):
out_irrep = group.irrep(out_irrep)
elif isinstance(out_irrep, str):
out_irrep = group.irreps[out_irrep]
elif not isinstance(out_irrep, IrreducibleRepresentation):
raise ValueError(
"'out_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found"
)
self.N = 1
self.fi = in_irrep.attributes["frequency"]
self.fo = out_irrep.attributes["frequency"]
self.ts = []
self.gamma = ((self.fi + self.fo) % 2) * np.pi / 2
mus = []
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(
0, 1, self.max_offset, self.max_frequency, non_negative=True
):
# the current shifted frequency
mu = t
if self.max_offset is not None:
assert math.fabs(t) <= self.max_offset, (t, self.max_offset)
if self.max_frequency is not None:
assert math.fabs(mu) <= self.max_frequency, (t, mu, self.max_frequency)
if mu > 0 or self.gamma == 0.0:
# don't add sin(0*theta) as a basis since it is zero everywhere
mus.append(mu)
self.ts.append(t)
self.mu = np.array(mus).reshape(-1, 1)
self._non_zero_frequencies = self.mu != 0
self._has_non_zero_frequencies = np.any(self._non_zero_frequencies)
dim = self.mu.shape[0]
super(R2FlipsSolution, self).__init__(group, in_irrep, out_irrep, dim)
def sample(self, angles, out = None):
r"""
Sample the continuous basis elements on the discrete set of angles in ``angles``.
Optionally, store the resulting multidimentional array in ``out``.
A value of ``nan`` is interpreted as the angle of a point placed on the origin of the axes.
``angles`` must be an array of shape `(1, N)`, where `N` is the number of points.
Args:
angles (~numpy.ndarray): angles where to evaluate the basis elements
out (~numpy.ndarray, optional): pre-existing array to use to store the output
Returns:
the sampled basis
"""
assert len(angles.shape) == 2
assert angles.shape[0] == 1
if out is None:
out = np.empty((self.shape[0], self.shape[1], self.dim, angles.shape[1]))
assert out.shape == (self.shape[0], self.shape[1], self.dim, angles.shape[1])
# find points in the origin
origin = np.isnan(angles)
angles = angles.copy()
angles[origin] = 0.0
angles -= self.axis
if self.shape[0] == 1 and self.shape[1] == 1:
out[0, 0, ...] = np.cos(self.mu * angles + self.gamma)
else:
raise ValueError(f"Shape {self.shape} not recognized!")
if self._has_non_zero_frequencies:
# In the origin, only 0-frequencies are permitted.
# Therefore, any non-0 frequency base is set to 0 in the origin
if np.any(origin):
mask = self._non_zero_frequencies * origin
out *= 1 - mask
assert not np.isnan(out).any()
return out
def __getitem__(self, idx):
assert idx < self.dim
attr = {}
attr["frequency"] = self.mu[idx, 0]
attr["gamma"] = self.gamma
attr["offset"] = self.ts[idx]
attr["idx"] = idx
return attr
def __eq__(self, other):
if not isinstance(other, R2FlipsSolution):
return False
elif (
self.in_irrep != other.in_irrep
or self.out_irrep != other.out_irrep
or self.axis != other.axis
):
return False
else:
return np.allclose(self.mu, other.mu) and np.allclose(
self.gamma, other.gamma
)
def __hash__(self):
return (
hash(self.in_irrep)
+ hash(self.out_irrep)
+ hash(self.mu.tostring())
+ hash(self.gamma)
)
class R2DiscreteRotationsSolution(IrrepBasis):
def __init__(
self, group, in_irrep, out_irrep, max_frequency = None, max_offset = None,
):
if isinstance(group, int):
group = cyclic_group(group)
assert isinstance(group, CyclicGroup)
assert max_frequency is not None or max_offset is not None, (
"Error! Either the maximum frequency or the maximum offset for the"
" frequencies must be set"
)
self.max_frequency = max_frequency
self.max_offset = max_offset
assert max_frequency is None or (
isinstance(max_frequency, int) and max_frequency >= 0
)
assert max_offset is None or (isinstance(max_offset, int) and max_offset >= 0)
if isinstance(in_irrep, int):
in_irrep = group.irrep(in_irrep)
elif isinstance(in_irrep, str):
in_irrep = group.irreps[in_irrep]
elif not isinstance(in_irrep, IrreducibleRepresentation):
raise ValueError(
"'in_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found"
)
self.n = in_irrep.attributes["frequency"]
if isinstance(out_irrep, int):
out_irrep = group.irrep(out_irrep)
elif isinstance(out_irrep, str):
out_irrep = group.irreps[out_irrep]
elif not isinstance(out_irrep, IrreducibleRepresentation):
raise ValueError(
"'out_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found"
)
self.m = out_irrep.attributes["frequency"]
self.N = group.order()
self.ts = []
if in_irrep.size == 2 and out_irrep.size == 2:
# m, n > 0
gammas = []
mus = []
ss = []
for gamma in [0.0, np.pi / 2]:
for s in [0, 1]:
k = self.m - self.n * (-1) ** s
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(
k, self.N, self.max_offset, self.max_frequency
):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert math.fabs(t) <= self.max_offset, (t, self.max_offset)
if self.max_frequency is not None:
assert math.fabs(mu) <= self.max_frequency, (
k,
t,
mu,
self.max_frequency,
)
gammas.append(gamma)
mus.append(mu)
ss.append(s)
self.ts.append(t)
self.gamma = np.array(gammas).reshape(-1, 1)
self.mu = np.array(mus).reshape(-1, 1)
self.s = np.array(ss).reshape(-1, 1)
elif in_irrep.size == 2 and out_irrep.size == 1:
assert self.m == 0 or (self.m == self.N // 2 and self.N % 2 == 0)
# n > 0, m = 0 or N/2
gammas = []
mus = []
for gamma in [0.0, np.pi / 2]:
k = self.n + self.m
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(
k, self.N, self.max_offset, self.max_frequency
):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert math.fabs(t) <= self.max_offset, (t, self.max_offset)
if self.max_frequency is not None:
assert math.fabs(mu) <= self.max_frequency, (
k,
t,
mu,
self.max_frequency,
)
gammas.append(gamma)
mus.append(mu)
self.ts.append(t)
self.gamma = np.array(gammas).reshape(-1, 1)
self.mu = np.array(mus).reshape(-1, 1)
elif in_irrep.size == 1 and out_irrep.size == 2:
assert self.n == 0 or (self.n == self.N // 2 and self.N % 2 == 0)
# m > 0, n = 0 or N/2
gammas = []
mus = []
for gamma in [0.0, np.pi / 2]:
k = self.n + self.m
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(
k, self.N, self.max_offset, self.max_frequency
):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert math.fabs(t) <= self.max_offset, (t, self.max_offset)
if self.max_frequency is not None:
assert math.fabs(mu) <= self.max_frequency, (
k,
t,
mu,
self.max_frequency,
)
gammas.append(gamma)
mus.append(mu)
self.ts.append(t)
self.gamma = np.array(gammas).reshape(-1, 1)
self.mu = np.array(mus).reshape(-1, 1)
elif in_irrep.size == 1 and out_irrep.size == 1:
assert self.n == 0 or (self.n == self.N // 2 and self.N % 2 == 0)
assert self.m == 0 or (self.m == self.N // 2 and self.N % 2 == 0)
gammas = []
mus = []
for gamma in [0.0, np.pi / 2]:
k = self.m - self.n
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(
k, self.N, self.max_offset, self.max_frequency, non_negative=True
):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert math.fabs(t) <= self.max_offset, (t, self.max_offset)
if self.max_frequency is not None:
assert math.fabs(mu) <= self.max_frequency, (
k,
t,
mu,
self.max_frequency,
)
if mu > 0 or gamma == 0.0:
# don't add sin(0*theta) as a basis since it is zero everywhere
gammas.append(gamma)
mus.append(mu)
self.ts.append(t)
self.gamma = np.array(gammas).reshape(-1, 1)
self.mu = np.array(mus).reshape(-1, 1)
self._non_zero_frequencies = self.mu != 0
self._has_non_zero_frequencies = np.any(self._non_zero_frequencies)
dim = self.gamma.shape[0]
super(R2DiscreteRotationsSolution, self).__init__(
group, in_irrep, out_irrep, dim
)
def sample(self, angles, out = None):
r"""
Sample the continuous basis elements on the discrete set of angles in ``angles``.
Optionally, store the resulting multidimentional array in ``out``.
A value of ``nan`` is interpreted as the angle of a point placed on the origin of the axes.
``angles`` must be an array of shape `(1, N)`, where `N` is the number of points.
Args:
angles (~numpy.ndarray): angles where to evaluate the basis elements
out (~numpy.ndarray, optional): pre-existing array to use to store the output
Returns:
the sampled basis
"""
assert len(angles.shape) == 2
assert angles.shape[0] == 1
if out is None:
out = np.empty((self.shape[0], self.shape[1], self.dim, angles.shape[1]))
assert out.shape == (self.shape[0], self.shape[1], self.dim, angles.shape[1])
# find points in the origin
origin = np.isnan(angles)
angles = angles.copy()
angles[origin] = 0.0
# the basis vectors depends on the shape of the input and output irreps,
# while their frequencies depend on the irreps frequencies
if self.shape[0] == 2 and self.shape[1] == 2:
out = psichi(angles, s=self.s, k=self.mu, gamma=self.gamma, out=out)
elif self.shape[0] == 1 and self.shape[1] == 2:
out[0, 0, ...] =
|
np.cos(self.mu * angles + self.gamma)
|
numpy.cos
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Perform ResNet autoTVM tuning on VTA using Relay."""
import argparse, os, time
from mxnet.gluon.model_zoo import vision
import numpy as np
from PIL import Image
import topi
import tvm
from tvm import rpc, autotvm, relay
from tvm.autotvm.measure.measure_methods import request_remote
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib import graph_runtime, util, download
from tvm.contrib.debugger import debug_runtime
import vta
from vta.testing import simulator
from vta.top import graph_pack
from tvm.autotvm.task import extract_from_program
def parse_arguments():
parser = argparse.ArgumentParser(description='Train a model for image classification.')
parser.add_argument('--model', type=str, default='resnet18_v1', choices=['resnet18_v1'],
help='Input model name.')
parser.add_argument('--start-name', type=str, default='nn.max_pool2d',
help='The name of the node where packing starts')
parser.add_argument('--stop-name', type=str, default='nn.global_avg_pool2d',
help='The name of the node where packing stops')
parser.add_argument('--debug-profile', action='store_true',
help='Show layer-wise time cost profiling results')
parser.add_argument('--device', default='vta', choices=['vta', 'arm_cpu'],
help='Select device target')
parser.add_argument('--measurements', type=int, default=1,
help='Number of measurements during AutoTVM search')
parser.add_argument('--tuner', type=str, default="random",
help='AutoTVM search strategy')
parser.add_argument('--log-filename', type=str, default="resnet-18.log",
help='AutoTVM log file name')
return parser.parse_args()
def register_vta_tuning_tasks():
from tvm.autotvm.task.topi_integration import TaskExtractEnv, deserialize_args
@tvm.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.const(a_min, x.dtype)
const_max = tvm.const(a_max, x.dtype)
x = tvm.compute(x.shape, lambda *i: tvm.min(x(*i), const_max), name="clipA")
x = tvm.compute(x.shape, lambda *i: tvm.max(x(*i), const_min), name="clipB")
return x
# init autotvm env to register VTA operator
TaskExtractEnv()
@autotvm.task.register("topi_nn_conv2d", override=True)
def _topi_nn_conv2d(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
with tvm.target.vta():
res = topi.nn.conv2d(*args, **kwargs)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == 'vta':
s = topi.generic.schedule_conv2d_nchw([res])
else:
s = tvm.create_schedule([res.op])
return s, [A, W, res]
@autotvm.task.register("topi_nn_dense", override=True)
def _topi_nn_dense(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
with tvm.target.vta():
res = topi.nn.dense(*args, **kwargs)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == 'vta':
s = topi.generic.schedule_dense([res])
else:
s = tvm.create_schedule([res.op])
return s, [A, W, res]
def compile_network(opt, env, target):
# Populate the shape and data type dictionary
dtype_dict = {"data": 'float32'}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
# Get off the shelf gluon model, and convert to relay
gluon_model = vision.get_model(opt.model, pretrained=True)
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
# Update shape and type dictionary
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
# Perform quantization in Relay
# Note: We set opt_level to 3 in order to fold batch norm
with relay.build_config(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0,
skip_conv_layers=[0]):
relay_prog = relay.quantize.quantize(mod["main"], params=params)
# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
relay_prog,
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=opt.start_name,
stop_name=opt.stop_name)
return relay_prog, params
def tune_tasks(tasks,
measure_option,
tuner='xgb',
n_trial=1000,
early_stopping=None,
log_filename='tuning.log',
use_transfer_learning=True,
try_winograd=True):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i+1, len(tasks))
# create tuner
if tuner == 'xgb' or tuner == 'xgb-rank':
tuner_obj = XGBTuner(tsk, loss_type='rank')
elif tuner == 'ga':
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == 'random':
tuner_obj = RandomTuner(tsk)
elif tuner == 'gridsearch':
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
n_trial_ = min(n_trial, len(tsk.config_space))
tuner_obj.tune(n_trial_,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(n_trial_, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file)])
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
if __name__ == '__main__':
opt = parse_arguments()
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
# Read in VTA environment
env = vta.get_env()
# Get remote from fleet node
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracker_host or not tracker_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
# Get remote
if env.TARGET != "sim":
# Measure build start time
reconfig_start = time.time()
# Get remote from fleet node
remote = autotvm.measure.request_remote(env.TARGET, tracker_host, int(tracker_port), timeout=10000)
# Reconfigure the JIT runtime and FPGA.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
# Report on reconfiguration time
reconfig_time = time.time() - reconfig_start
print("Reconfigured FPGA and RPC runtime in {0:.2f}s!".format(reconfig_time))
# In simulation mode, host the RPC server locally.
else:
remote = rpc.LocalSession()
# VTA target and execution context
target = env.target if opt.device == "vta" else env.target_vta_cpu
ctx = remote.ext_dev(0) if opt.device == "vta" else remote.cpu(0)
# Compile Relay program
print("Initial compile...")
relay_prog, params = compile_network(opt, env, target)
# Register VTA tuning tasks
register_vta_tuning_tasks()
# Perform task extraction on Relay program
print("Extracting tasks...")
tasks = extract_from_program(func=relay_prog,
params=params,
ops=(tvm.relay.op.nn.conv2d,),
target=target,
target_host=env.target_host)
# Perform Autotuning
print("Tuning...")
tuning_opt = {
'log_filename': opt.log_filename,
'tuner': opt.tuner,
'n_trial': 1e9,
'early_stopping': None,
'measure_option': autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func=vta.vta_autotvm_build_func),
runner=autotvm.RPCRunner(env.TARGET, tracker_host, tracker_port,
number=4, min_repeat_ms=150, repeat=opt.measurements, timeout=60,
check_correctness=True))
}
tune_tasks(tasks, **tuning_opt)
# Compile kernels with history best records
with autotvm.tophub.context(target, extra_files=[opt.log_filename]):
# Compile network
print("Compiling network with best tuning parameters...")
with relay.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
if target.device_name != "vta":
graph, lib, params = relay.build(
relay_prog, target=target,
params=params, target_host=env.target_host)
else:
with vta.build_config():
graph, lib, params = relay.build(
relay_prog, target=target,
params=params, target_host=env.target_host)
# Export library
temp = util.tempdir()
lib.save(temp.relpath("graphlib.o"))
remote.upload(temp.relpath("graphlib.o"))
lib = remote.load_module("graphlib.o")
# If detailed runtime info is needed build with debug runtime
if opt.debug_profile:
m = debug_runtime.create(graph, lib, ctx)
else:
m = graph_runtime.create(graph, lib, ctx)
# Set the network parameters and synthetic input
image = tvm.nd.array(
(np.random.uniform(size=(1, 3, 224, 224))).astype('float32'))
m.set_input(**params)
m.set_input('data', image)
# Perform inference
timer = m.module.time_evaluator("run", ctx, number=4, repeat=opt.measurements)
tcost = timer()
prof_res =
|
np.array(tcost.results)
|
numpy.array
|
import numpy as np
from skimage.io import imread
from skimage.transform import resize
from sklearn.feature_extraction.image import extract_patches_2d
from keras.utils import Sequence, to_categorical
from numpy.core.defchararray import add, replace
import matplotlib.pyplot as plt
class ImageGenerator(Sequence):
'''
Class for generating image batches from the image files
:param image_filenames: 1D numpy array (or list) of file names of the images
:param labels: 1D numpy array with the labels corresponding to each image
:param batch_size: integer giving the batch size to be used in training the network
:param image_shape: tuple of two integers. All images will be compressed to this shape
'''
def __init__(self, image_filenames, labels, batch_size, image_shape):
self.image_filenames, self.labels = image_filenames, labels
self.image_shape, self.batch_size = image_shape, batch_size
def __len__(self):
return int(np.ceil(len(self.image_filenames) / float(self.batch_size)))
# Helper function to read and preprocess images
def _read_image(self, filename):
image = resize(imread(filename), self.image_shape)
# Normalize pixel values between 0 and 1
image = image / 255
return image
def __getitem__(self, idx):
batch_x = self.image_filenames[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([self._read_image(file_name) for file_name in batch_x]),\
to_categorical(np.array(batch_y), num_classes=5)
class PatchGenerator(ImageGenerator):
def __init__(self, image_filenames, labels, batch_size, patch_shape, n_patches):
self.image_filenames, self.labels = image_filenames, labels
self.batch_size = batch_size
self.patch_shape, self.n_patches = patch_shape, n_patches
def _read_image(self, filename):
image = imread(filename)
# Normalize pixel values between 0 and 1
image = image / 255
patches = extract_patches_2d(image, patch_size=self.patch_shape,
max_patches=self.n_patches, random_state=38)
return patches
class ArrayGenerator(Sequence):
'''
Class for generating arrays for training
:param filenames: 1D array of filenames to read from, ending with .npy
:param labels: 1D array of strings, labels
:param batch_size: integer giving the batch size to be used in training the network
'''
def __init__(self, filenames, labels, batch_size):
self.filenames = filenames
self.labels, self.batch_size = labels, batch_size
def __len__(self):
return int(np.ceil(len(self.filenames) / float(self.batch_size)))
def read_array(self, filename):
array = np.load(filename)
return array
def __getitem__(self, idx):
batch_x = self.filenames[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([np.load(file_name) for file_name in batch_x]),\
to_categorical(np.array(batch_y), num_classes=5)
def get_generators(n_total, batch_size, image_shape=None, type='array', zeros_left=5000):
'''
Construct generators for training and validation data
Zero grade images are downsampled
:param n_total: number of total images to use (training plus validation)
:param batch_size: batch size used in training
:param image_shape: image size used in training
:param zeros_left: how many images of grade zero should be left in the pool
use a negative value to keep all the zeros
:return: train_gen: generator of training data
test_gen: generator of validation data
'''
# Set the number of training samples
n_train = int(np.ceil(n_total * 0.8))
n_test = int(np.floor(n_total * 0.2))
# Read filenames from a text file listing all the images
full_filenames = np.genfromtxt('../data/train_filenames.txt', dtype=str)
# Read the labels file
full_labels = np.genfromtxt('../data/trainLabels.csv', skip_header=1, dtype=str, delimiter=',')
# Keep only labels of data that can be used in training
full_samples = replace(full_filenames, ".jpeg", "")
full_mask = np.isin(full_labels[:, 0], full_samples)
trainable_labels = np.copy(full_labels[full_mask, :])
# Downsample the zero grade, keeping only the first 5000
# Randomize order
np.random.seed(1234)
np.random.shuffle(trainable_labels)
# Arrange by a stable sort (mergesort)
trainable_labels = np.copy(trainable_labels[trainable_labels[:,1].argsort(kind='mergesort')])
# Remove extra zeros
if zeros_left > 0:
_, counts =
|
np.unique(trainable_labels[:,1], return_counts=True)
|
numpy.unique
|
from scipy import ndimage, interpolate
import numpy as np
from makebeam import makebeam
import matplotlib.pyplot as plt
from sauron_colormap import sauron
from astropy.convolution import convolve_fft
#Masking Code
def emisChannels(cube, start, stop):
emis = cube[:,:,start:stop]
noise = np.concatenate((cube[:,:,:start],cube[:,:,stop:]), axis=2)
return emis, noise
def innersquare(cube, N):
'''
cuts out the inner square (spatial) of the data cube or image
'''
start = int(len(cube[1])*(7./16.)) #If really bad, use 7/16 and 9/16
stop = int(len(cube[1])*(9./16.))
if N == 3:
return cube[start:stop, start:stop, :]
elif N == 2:
return cube[start:stop, start:stop]
else:
print("Enter a correct value for N")
def clip(emiscube, noisecube, cutoff):
'''
"clip" the cube: set all pixels with a value lower than a certain times sigma to zero
'''
#Since the PB correction has already been applied, measure the rms in the inner square of the spatial axes (PB correction makes the outer edges more noisy, so we don't want to use those if we measure the rms
innersq = innersquare(noisecube, 3)
sigma = np.std(innersq[np.isfinite(innersq)])
emiscube[emiscube<cutoff*sigma] = 0
return emiscube
def smoothclip(cube, cutoff, beamsize, cellsize, start, stop, psf):
'''
Apply a Gaussian smoothing to the data cube.
cube = the raw data cube
emiscube = the part of the cube that contains the emission line
cutoff = the level at which you want to clip (in multiples of the rms noise of the cube)
beamsize = Array of [Bmin, Bmaj, Bpa]
start = first channel that contains emission
stop = last channel that contains emission
'''
#copy the datacube
cube_copy = cube.copy() #a copy of the data cube will be used to do the smoothing. This smoothed cube will then be used as a mask to clip the orignial cube
emiscube = cube.copy()[:,:,start:stop]
# extract relevant information from header (pixel size, beam size)
beam = beamsize[1] / cellsize #beam size in pixels, use the major axis
# convert the FWHM of the beam to the std dev in the Gaussian distribution and use 1.5 times the beam size to convolve with
sigma = 1.5 * beam / np.sqrt(8.*np.log(2.))
#apply a Gaussian blur, using sigma = 4 in the velocity direction (seems to work best). The mode 'nearest' seems to give the best results.
# cube_smoothed = ndimage.filters.gaussian_filter(cube_copy, (4.,sigma,sigma), order=0, mode='nearest')
cube_smoothed = cube_copy.copy() * 0
w2do = np.where(cube.sum(axis=0).sum(axis=0) >0)[0]
for i in range(0,w2do.size): cube_smoothed[:,:,w2do[i]] = convolve_fft(cube[:,:,w2do[i]], psf)
#clip the copied cube, using the smaller cube where the emission is
smoothemis, smoothnoise = emisChannels(cube_smoothed, start, stop) #this separates the channels with emission from the ones without emission and glues the latter bits together in one "noise cube"
clipped = clip(smoothemis, smoothnoise, cutoff)
#mask the original cube with the smoothed cube
emiscube[clipped == 0] = 0
return emiscube
#Plotting
def makeplots(f,xsize,ysize,vsize,cellsize,dv,beamsize,posang=0,overcube=False,pvdthick=2,nconts=11.,title=False, **kwargs):
# ;;;; Create plot data from cube ;;;;
mom0rot=f.sum(axis=2)
if np.any(overcube): mom0over=overcube.sum(axis=2)
x1=
|
np.arange(-xsize/2.,xsize/2.,cellsize)
|
numpy.arange
|
# Licensed under a 3-clause BSD style license - see LICENSE
'''
Provides the mathematical framework for simulating distributions of proper
motions based on the distance and Galactic coordinates of a set of Galactic
stars.
'''
import numpy as np
import multiprocessing
import itertools
__all__ = ['calculate_proper_motions']
def calculate_proper_motions(d, l, b, temp, N):
'''
Calculates the statistical distribution of proper motions of a sightline,
from a set of theoretical sources within the particular sky area.
Parameters
----------
d : numpy.ndarray
The set of distances to each theoretical source.
l : numpy.ndarray
The Galactic longitudes of each simulated source.
b : numpy.ndarray
The Galactic latitude of each source.
temp : numpy.ndarray
The simulated effective temperature of each object.
N : integer
The number of realisations of each object's Galactic velocities to draw
from its dispersion relation.
Returns
-------
pm : numpy.ndarray
The Galactic and Equatorial proper motions of the ``len(d)`` simulated
sources, for the three simulated Galactic components each with ``N``
velocity dispersion realisations.
type_fracs : numpy.ndarray
The density-based weightings of each of the three Galactic components
for each source in ``d``, broadcast to shape ``(len(d), 3, N)`` to match
the shape of ``pm``.
'''
# Height of the Sun above the Galactic plane from Juric et al. (2008, ApJ, 673, 864).
z_sol = 0.025
# Solar Galactic radius from Mroz et al. (2019, ApJ, 870, 10).
r_sol = 8.09
# Scale lengths also from Juric et al. (2008).
l_thin, h_thin = 2.6, 0.3
l_thick, h_thick = 3.6, 0.9
f_thick = 0.13
f_h, q, n = 0.0051, 0.64, 2.77
h_b = 0.8
# Bulge-to-disc normalisation, from Jackson et al. (2002, MNRAS, 337, 749).
zeta_b = 2
# These coordinates do not account for any z_sol offset, and hence z = 0 is at b = 0.
r, z = convert_dist_coord_to_cylindrical(d, l, b, r_sol)
# This comes out with shape (len(d), 3); have to factor an additional 1/N in the weights
# so that when we histogram an extra N times as many proper motions we smooth to the same
# counts as the original data.
fractions = fraction_density_component(r, z, r_sol, z_sol, l_thin, h_thin, l_thick, h_thick,
f_thick, f_h, q, n, zeta_b, h_b).T / N
# We then tile it to (len(d), len(types), N)
type_fracs = np.tile(fractions[:, :, np.newaxis], (1, 1, N))
# V_a values come from Robin et al. (2003), A&A, 409, 523; Pasetto et al. (2012), A&A, 547,
# A70; & (220km/s Halo drift velocity from Reddy (2009), Proc. IAU Symp., 265)
# 240 km/s Halo V_a from Golubov et al. (2013), A&A, 557, A92.
drift_vels = np.array([10, 49, 240], dtype=float)
# Assume our rotation curve was constructed from thin disc objects, so take the relative
# asymmetric drift from that. Objects in Mroz et al. are Classical Cepheids, so that's fine.
drift_vels -= drift_vels[0]
# Derive Oort constants from Pecaut & Mamajek (2013, ApJS, 208, 9) and
# <NAME> (2003, ApJ, 599, 275).
# First, get (B - V)_0 from Teff based on P&M scalings:
bv0 = np.empty_like(temp)
bv0[temp < 10000] = 5.07836 * np.exp(-0.27083 * temp[temp < 10000] / 1000) - 0.40739
bv0[temp >= 10000] = 0.69012 * np.exp(-0.08179 * temp[temp >= 10000] / 1000) - 0.35093
# Next, get A/B from O&D based on intrinsic B-V colour:
A = 1.94553 * bv0 + 11.33138
B = -2.63360 * bv0 - 13.60611
rng = np.random.default_rng()
pm = np.empty((len(d), 3, N, 4))
n_pool = 40
pool = multiprocessing.Pool(n_pool)
counter = np.arange(0, len(d))
iter_group = zip(counter, itertools.repeat(drift_vels), itertools.repeat(rng),
itertools.repeat([l, b, d]), itertools.repeat(N), itertools.repeat([r, z]),
itertools.repeat([r_sol, z_sol]), itertools.repeat([A, B]),
itertools.repeat([l_thin, l_thick]))
for stuff in pool.imap_unordered(calc_pm, iter_group,
chunksize=max(1, len(d) // n_pool)):
j, mu_a, mu_d, mu_l, mu_b = stuff
pm[j, :, :, 0] = mu_a
pm[j, :, :, 1] = mu_d
pm[j, :, :, 2] = mu_l
pm[j, :, :, 3] = mu_b
pool.close()
return pm, type_fracs
def convert_dist_coord_to_cylindrical(d, l, b, r_sol):
'''
Convert distance and Galactic lat/lon to Galactocentric Cartesian coordinates.
Parameters
----------
d : numpy.ndarray
Distance to a set of theoretical objects.
l : numpy.ndarray
Galactic longitude for each source.
b : numpy.ndarray
Galactic latitude for all sources.
r_sol : float
Galactocentric Cylindrical radius of the Sun from the Galactic center
in kpc.
Returns
-------
r : numpy.ndarray
The Galactocentric Cylindrical radius of each object from the Galactic
center.
z : numpy.ndarray
The Galactocentric Cylindrical height of each object from the Galactic
plane.
'''
# Following notation where X points towards the GC from the Sun, with a right-handed system,
# but choice of handedness is irrelevant here since we only care about radial distance.
x = d * np.cos(np.radians(l)) * np.cos(np.radians(b)) - r_sol
y = d * np.sin(np.radians(l)) * np.cos(np.radians(b))
z = d * np.sin(np.radians(b))
r = np.sqrt(x**2 + y**2)
return r, z
def fraction_density_component(r, z, r_sol, z_sol, l_thin, h_thin, l_thick, h_thick, f_thick,
f_halo, q, n, zeta_b, h_b):
'''
Calculates the relative densities of each of the Galactic components used
in simulating potential proper motions of objects. At present, this is the
thin and thick discs, and the (outer) Galactic halo.
Parameters
----------
r : numpy.ndarray
Galactocentric Cylindrical radii for each source used to simulate
proper motions for the particular sightline.
z : numpy.ndarray
Galactocentric Cylindrical heights above the Galactic plane for each
source.
r_sol : float
The Sun's Galactocentric Cylindrical radius.
z_sol : float
Solar Galactocentric Cylindrical height above the plane.
l_thin : float
Scale length of the thin disc in the radial direction.
h_thin : float
Vertical scale length of the thin disc.
l_thick : float
Thick disc radial scale length.
h_thick : float
Thick disc vertical scale length.
f_thick : float
Relative normalisation of the thick disc to the thin disc density.
f_halo : float
Normalisation of the outer Galactic halo to the thin disc.
q : float
Oblateness of the outer Galactic halo in the vertical direction.
n : float
Scaling relation of the halo with Galactocentric (oblate) Spherical
distance.
zeta_b : float
Relative normalisation of the Galactic bulge to the thin disc.
h_b : float
Spherical scale length of the Galactic bulge.
Returns
-------
rel_dens : numpy.ndarray
The relative densities of the implemented Galactic components.
'''
# First three densities from eq 22-24 and table 10 of Juric et al. (2008), plus
# corrections and extension from eq 9-12 of Ivezic et al. (2008, ApJ, 684, 287), albeit with the
# exception of the removal of rho_d(r_sol, 0) as a normalising factor in all functions
f_thin = thin_disc_density(r, z, r_sol, z_sol, l_thin, h_thin)
f_thick = thick_disc_density(r, z, r_sol, z_sol, l_thick, h_thick, f_thick)
f_halo = halo_density(r, z, r_sol, f_halo, q, n)
# To avoid a singularity within the solar circle, just set the halo component density
# to its value at r_sol, allowing for the thin+thick disc to overcome it towards
# the Galactic center. Test with e.g. Sandage (1987), AJ, 93, 610, Fig 2b.
f_halo[r < r_sol] = f_halo[np.argmin(np.abs(r - r_sol))]
# Currently not using f_bulge due to no parameterisation for its proper
# motion at present.
# f_bulge = bulge_density(r, z, zeta_b, h_b, q)
norm = f_thin + f_thick + f_halo # + f_bulge
return np.array([f_thin, f_thick, f_halo]) / norm
def thin_disc_density(r, z, r_sol, z_sol, l_thin, h_thin):
'''
Calculates the relative density of the thin disc.
Parameters
----------
r : numpy.ndarray
Galactocentric Cylindrical radii for each source used to simulate
proper motions for the particular sightline.
z : numpy.ndarray
Galactocentric Cylindrical heights above the Galactic plane for each
source.
r_sol : float
The Sun's Galactocentric Cylindrical radius.
z_sol : float
Solar Galactocentric Cylindrical height above the plane.
l_thin : float
Scale length of the thin disc in the radial direction.
h_thin : float
Vertical scale length of the thin disc.
Returns
-------
disc_density : numpy.ndarray
The density of the thin disc evaluated at ``r`` and ``z``.
'''
return disc_density(r, z, l_thin, h_thin, r_sol, z_sol)
def disc_density(r, z, l, h, r_sol, z_sol):
'''
Calculates the relative density of an exponential disc, either
the thin or thick discs, depending on ``l`` and ``h``.
Parameters
----------
r : numpy.ndarray
Galactocentric Cylindrical radii for each source used to simulate
proper motions for the particular sightline.
z : numpy.ndarray
Galactocentric Cylindrical heights above the Galactic plane for each
source.
r_sol : float
The Sun's Galactocentric Cylindrical radius.
z_sol : float
Solar Galactocentric Cylindrical height above the plane.
l : float
Scale length of the disc in the radial direction.
h : float
Vertical scale length of the disc.
Returns
-------
disc_density : numpy.ndarray
The density of the disc evaluated at ``r`` and ``z``.
'''
return np.exp(-(r - r_sol) / l - np.abs(z + z_sol)/h)
def thick_disc_density(r, z, r_sol, z_sol, l_thick, h_thick, f_thick):
'''
Calculates the relative density of the thick disc.
Parameters
----------
r : numpy.ndarray
Galactocentric Cylindrical radii for each source used to simulate
proper motions for the particular sightline.
z : numpy.ndarray
Galactocentric Cylindrical heights above the Galactic plane for each
source.
r_sol : float
The Sun's Galactocentric Cylindrical radius.
z_sol : float
Solar Galactocentric Cylindrical height above the plane.
l_thick : float
Scale length of the thick disc in the radial direction.
h_thick : float
Vertical scale length of the thick disc.
f_thick : float
Relative scaling between the thin disc and thick disc.
Returns
-------
disc_density : numpy.ndarray
The density of the thick disc evaluated at ``r`` and ``z``.
'''
return f_thick * disc_density(r, z, l_thick, h_thick, r_sol, z_sol)
def halo_density(r, z, r_sol, f_h, q, n):
'''
Calculates the relative density of the halo.
Parameters
----------
r : numpy.ndarray
Galactocentric Cylindrical radii for each source used to simulate
proper motions for the particular sightline.
z : numpy.ndarray
Galactocentric Cylindrical heights above the Galactic plane for each
source.
r_sol : float
The Sun's Galactocentric Cylindrical radius.
f_h : float
The normalising constant between thin disc and halo parameterisations.
q : float
Oblateness of the halo.
n : float
Power law scaling relation for halo density.
Returns
-------
halo_density : numpy.ndarray
The density of the halo evaluated at ``r`` and ``z``.
'''
return f_h * (r_sol / np.sqrt(r**2 + (z/q)**2))**n
def bulge_density(r, z, zeta_b, h_b):
'''
Calculates the relative density of the Galactic bulge. Currently
not implemented.
Parameters
----------
r : numpy.ndarray
Galactocentric Cylindrical radii for each source used to simulate
proper motions for the particular sightline.
z : numpy.ndarray
Galactocentric Cylindrical heights above the Galactic plane for each
source.
zeta_b : float
Normalising constant, setting the relative densities between the
bulge and thin disc.
h_b : float
Scale length of the bulge density.
Returns
-------
bulge_density : numpy.ndarray
The density of the bulge evaluated at ``r`` and ``z``.
'''
# Bulge parameters from Jackson et al. (2002), equation 8.
x = np.sqrt(r**2 + z**2)
# We drop C entirely, assuming it to be the equivalent of rho_d(r_sol, 0) for Jackson et al.
return zeta_b * np.exp(-x/h_b)
def calc_pm(iterable):
'''
Calculate an individual simulated object's distribution of proper motions
based on its different motions if it were a thin disc, thick disc, or outer
halo object, combined with relative weightings for its assignment to those
components of the Galaxy.
Parameters
----------
iterable : list
The list of various variables passed through to ``calc_pm`` by
``multiprocessing``.
Returns
-------
j : integer
The index of the particular simulated source having proper motions
derived for it.
mu_a : numpy.ndarray
The simulated proper motions for this object, ``N`` simulated velocities
per ``drift_vels`` Galactic component, in right ascension.
mu_d : numpy.ndarray
The corresponding declination proper motions to ``mu_a``.
_mu_l : numpy.ndarray
Transposed Galactic longitude proper motions for the source's ``mu_a``
and ``mu_d``.
_mu_b : numpy.ndarray
Transposed Galactic latitude proper motions for the source's ``mu_a``
and ``mu_d``.
'''
j, drift_vels, rng, [l, b, d], N, [r, z], [r_sol, z_sol], \
[A, B], [l_thin, l_thick] = iterable
mu_a, mu_d = np.empty((len(drift_vels), N), float), np.empty((len(drift_vels), N), float)
_mu_l, _mu_b = np.empty((len(drift_vels), N), float), np.empty((len(drift_vels), N), float)
sinl, cosl = np.sin(np.radians(l[j])), np.cos(np.radians(l[j]))
sinb, cosb = np.sin(np.radians(b[j])), np.cos(np.radians(b[j]))
d_ip = cosb * d[j]
# Rotation+mirror matrix, to put R-phi-z dispersion along Vr-Vt-Vz plane
rot = np.array(
[[(r[j]**2 + d_ip**2 - r_sol**2) / (2 * r[j] * d_ip), r_sol / r[j] * sinl, 0],
[r_sol / r[j] * sinl, -(r[j]**2 + d_ip**2 - r_sol**2) / (2 * r[j] * d_ip), 0],
[0, 0, 1]])
Us, Vs, Ws = 0, 0, 0
Usol, Vsol, Wsol = 11.1, 12.2, 7.3 # km/s
sinbeta = d[j] * sinl / r[j]
cosbeta = (r_sol**2 + r[j]**2 - d[j]**2) / (2 * r_sol * r[j])
Theta_sol = 233.6 # km/s
__b = 0.72
a1, a2, a3 = 235.0, 0.89, 1.31
x = (r[j] / r_sol) / a2
theta_d_sq = a1**2 * __b * 1.97 * x**1.22 / (x**2 + 0.78**2)**1.43
theta_h_sq = a1**2 * (1 - __b) * x**2 * (1 + a3**2) / (x**2 + a3**2)
Theta_r = np.sqrt(theta_d_sq + theta_h_sq)
U1 = Us * cosbeta + (Vs + Theta_r) * sinbeta - Usol
V1 = -Us*sinbeta + (Vs + Theta_r) * cosbeta - Vsol - Theta_sol
W1 = Ws - Wsol
# Based on Mroz et al. (2019), our components of Heliocentric Cylindrical
# coordinates are:
Vr = U1 * cosl + V1 * sinl
Vt = V1 * cosl - U1 * sinl
Vz = W1
for i in range(len(drift_vels)):
drift_vel_rtz = np.matmul(rot, np.array([[0], [-drift_vels[i]], [0]]))
mean = np.array([Vr + drift_vel_rtz[0, 0], Vt + drift_vel_rtz[1, 0], Vz])
if i == 0:
cov = find_thin_disc_dispersion(r[j], z[j], l[j], b[j], d[j], A[j], B[j], l_thin,
r_sol)
if i == 1:
cov = find_thick_disc_dispersion(r[j], l[j], b[j], d[j], l_thick, r_sol)
if i == 2:
cov = find_halo_dispersion(l[j], b[j], d[j])
new_uvw = rng.multivariate_normal(mean, cov, N)
v_d, v_l, v_z = new_uvw[:, 0], new_uvw[:, 1], new_uvw[:, 2]
# 1 km/s/kpc = 0.2108 mas/year
mu_lstar = v_l / d[j] * 0.2108
mu_b = 0.2108/d[j] * (v_z * cosb - v_d * sinb)
mu_a[i, :], mu_d[i, :] = galactic_to_equatorial(np.radians(l[j]), np.radians(b[j]),
mu_lstar, mu_b)
_mu_l[i, :], _mu_b[i, :] = mu_lstar, mu_b
return (j, mu_a, mu_d, _mu_l, _mu_b)
def find_thin_disc_dispersion(r, z, l, b, d, A, B, h, r_sol):
'''
Calculate the dispersion in the thin disc as a function of Galactic position.
Parameters
----------
r : float
The Galactic Cylindrical radius of the object.
z : float
The Galactic Cylindrical height of the source.
l : float
Galactic longitude of the star.
b : float
Galactic latitude of the star.
A : float
Oort constant.
B : float
Oort constant.
h : float
Scale length of the thin disc.
r_sol : float
Galactic Cylindrical radius of the Sun.
Returns
-------
cov_rtz : numpy.ndarray
The covariance matrix dispersion vector, in Galactic Cylindrical
coordinates.
'''
# Data from Pasetto et al. (2012), A&A, 547, A71, tables 4-9
_r_sol = 8.5 # kpc; have to use the Pasetto et al. result of R0 ~ 8.4-8.6 kpc for consistency
# Assume that sig_rr^2 goes as R^2 exp(-2 R / h) by a stable Toomre Parameter (Lewis & Freeman
# 1989, AJ, 97, 139) where rotation curve is flat (Mroz et al.), where h is the scale length
# of the disc. Pasetto et al. have a sig_rr^2(R0, 0) ~ 715 km^2/s^2
sig_rr2_00 = 715.93 # km^2/s^2
sig_rr2_r0 = sig_rr2_00 * (r / _r_sol)**2 * np.exp(-2 * (r - _r_sol) / h)
# Now assume that the vertical gradient of the thin disc of Pasetto et al. goes for ~1kpc
# with a gradient of roughly 1200km^2/s^2/kpc at R=R0, but scaling the same as the dispersion.
sig_rr2_z_grad_0 = 1236.97 # km^2/s^2/kpc
sig_rr2_z_grad = sig_rr2_z_grad_0 # * (r / r_sol)**2 * np.exp(-2 * (r - r_sol) / h)
sig_rr2 = sig_rr2_r0 + min(1, np.abs(z)) * sig_rr2_z_grad
# Assume that the phi variance sigma can be approximated from the relation with
# the R sigma:
sig_phiphi2 = (-B / (A - B)) * sig_rr2
# We assume the correlation along the R-phi covariance is zero, following Vallenari et al.
sig_rphi2 = 0
# Following the Pasetto et al. discussion, we conclude that the phi-z dispersion is
# essentially zero, to their uncertainties, and force it to be so:
sig_phiz2 = 0
# Similar to sig_rr, we assume that sig_zz^2 = sig_zz^2(R0, 0) * exp(-(r - r_sol) / h)
# where sig_zz^2(R0, 0) ~ 243 km^2/s^2
sig_zz2_00 = 243.71 # km^2/s^2
sig_zz2_r0 = sig_zz2_00 * np.exp(-(r - _r_sol) / h)
# Now, again, extrapolate the vertical gradient seen in sig_zz(R, z) to 1 kpc above/below plane
# with a gradient, again, that scales with the radial gradient. Assume ~300km^2/s^2/kpc R = R0.
sig_zz2_z_grad_0 = 306.84 # km^2/s^2/kpc
sig_zz2_z_grad = sig_zz2_z_grad_0 # * np.exp(-(r - r_sol) / h)
sig_zz2 = sig_zz2_r0 + min(1, np.abs(z)) * sig_zz2_z_grad
# Given Vallenari et al. (2006), A&A, 451, 125, following Cuddeford & Amendt (1991), we
# assume that the vertical tilt goes as the gradient of sig^2_rz, which is given by
# the difference between sig^2_rr(R, 0) and sig^2_zz(R, 0); we assume lambda = 0.6, for now.
dsig2_rz_dz = 0.6 * (sig_rr2_r0 - sig_zz2_r0) / r
sig_rz2 = z * dsig2_rz_dz
# If correlation gets above one, force sig_rz2 = +- sqrt(sig_rr^2) * sqrt(sig_zz^2) at largest
if np.abs(sig_rz2) > np.sqrt(sig_rr2) * np.sqrt(sig_zz2):
sig_rz2 = 0.99 * np.sign(sig_rz2) * np.sqrt(sig_rr2) * np.sqrt(sig_zz2)
cov = np.empty((3, 3), float)
cov[0, 0] = sig_rr2
cov[1, 1] = sig_phiphi2
cov[2, 2] = sig_zz2
cov[0, 1] = cov[1, 0] = sig_rphi2
cov[0, 2] = cov[2, 0] = sig_rz2
cov[1, 2] = cov[2, 1] = sig_phiz2
d_ip = np.cos(np.radians(b)) * d
# Rotation matrix, to put R-phi-z dispersion along Vr-Vt-Vz plane
sinl = np.sin(np.radians(l))
rot = np.array(
[[(r**2 + d_ip**2 - r_sol**2) / (2 * r * d_ip), r_sol / r * sinl, 0],
[r_sol / r * sinl, -(r**2 + d_ip**2 - r_sol**2) / (2 * r * d_ip), 0],
[0, 0, 1]])
cov_rtz = np.matmul(rot, np.matmul(cov, rot.T))
return cov_rtz
def find_thick_disc_dispersion(r, l, b, d, h, r_sol):
'''
Calculate the dispersion in the thick disc as a function of Galactic position.
Parameters
----------
r : float
The Galactic Cylindrical radius of the object.
z : float
The Galactic Cylindrical height of the source.
l : float
Galactic longitude of the star.
b : float
Galactic latitude of the star.
A : float
Oort constant.
B : float
Oort constant.
h : float
Scale length of the thick disc.
r_sol : float
Galactic Cylindrical radius of the Sun.
Returns
-------
cov_rtz : numpy.ndarray
The covariance matrix dispersion vector, in Galactic Cylindrical
coordinates.
'''
# Data from Pasetto et al. (2012), A&A, 547, A70, table 3-4
# Assume sig_phiphi / sig_rr = const, so scale both the same. Currently assume there are no
# cross-term products.
_r_sol = 8.5 # kpc; use the Pasetto et al. thin disc rough R0, figuring it should be equal
if r < _r_sol:
sig_rr2 = 60.2**2 * (r / _r_sol)**2 * np.exp(-2 * (r - _r_sol) / h)
sig_rphi2 = 0 # 37.6 km/s(!)
sig_rz2 = 0 # 13.3 km/s(!)
sig_phiphi2 = 44.7**2 * (r / _r_sol)**2 * np.exp(-2 * (r - _r_sol) / h)
sig_phiz2 = 0 # 4.0 km/s(!)
sig_zz2 = 37.2**2 * np.exp(-(r - _r_sol) / h)
else:
sig_rr2 = 55.8**2 * (r / _r_sol)**2 * np.exp(-2 * (r - _r_sol) / h)
sig_rphi2 = 0 # 35.5 km/s(!)
sig_rz2 = 0 # 9.6 km/s(!)
sig_phiphi2 = 45.2**2 * (r / _r_sol)**2 * np.exp(-2 * (r - _r_sol) / h)
sig_phiz2 = 0 # 3.8 km/s(!)
sig_zz2 = 36.3**2 *
|
np.exp(-(r - _r_sol) / h)
|
numpy.exp
|
import numpy as np
from scipy.spatial.distance import cdist
from time import time
# this is to recompute each time the attractivities of cells is modified during simulation
def sum_by_group(values, groups):
""" see: https://stackoverflow.com/questions/4373631/sum-array-by-number-in-numpy
alternative method with meshgrid led to memory error """
order = np.argsort(groups)
groups = groups[order]
values = values[order]
values.cumsum(out=values)
index = np.ones(groups.shape[0], 'bool')
index[:-1] = groups[1:] != groups[:-1]
values = values[index]
groups = groups[index]
values[1:] = values[1:] - values[:-1]
return values, groups
def vectorized_choice(prob_matrix, axis=1):
"""
selects index according to weights in `prob_matrix` rows (if `axis`==0), cols otherwise
see https://stackoverflow.com/questions/34187130/fast-random-weighted-selection-across-all-rows-of-a-stochastic-matrix
"""
s = prob_matrix.cumsum(axis=axis)
r = np.random.rand(prob_matrix.shape[1-axis]).reshape(2*(1-axis)-1, 2*axis - 1)
k = (s < r).sum(axis=axis)
k = k.astype(np.uint32)
return k
for _ in range(100):
n_cells, n_squares_per_side = 100, 10
n_squares = n_squares_per_side ** 2
cells = np.arange(0, n_cells)
squares = np.arange(0, n_squares)
attractivity_cells = np.random.uniform(size=n_cells).astype(np.float32)
cells_squares = np.random.choice(squares, size=n_cells)
squares_with_cells =
|
np.unique(cells_squares)
|
numpy.unique
|
import os
import numpy as np
import multiprocessing as mp
import math
import sys
sys.path.append('/ibex/scratch/projects/c2052/Lung_CAD_NMI/source_codes')
import Tool_Functions.Functions as Functions
import analysis.connected_region2d_and_scale_free_stat as connected_region
import analysis.connect_region_detect as connect_3d
np.set_printoptions(threshold=np.inf)
ibex = False
if not ibex:
top_directory_rescaled_ct = '/home/zhoul0a/Desktop/prognosis_project/original_follow_up/rescaled_ct_follow_up/'
# where the rescaled ct arrays saved: top_directory_rescaled_ct/patient_id/patient_id_time.npy
top_directory_check_point = '/home/zhoul0a/Desktop/prognosis_project/check_points/'
# where the checkpoints stored: top_directory_check_point/model_type/direction/best_model-direction.pth
top_directory_masks = '/home/zhoul0a/Desktop/prognosis_project/original_follow_up/rescaled_masks_refined/'
# where to save the predicted masks, which will form: top_directory_output/mask_type/patient_id/id_time_mask.npz
top_directory_enhanced = '/home/zhoul0a/Desktop/prognosis_project/original_follow_up/rescaled_ct_enhanced_general_sampling/'
# where to save the normalized ct without the air-way and blood-vessels and enhanced the lesion
os.environ["CUDA_VISIBLE_DEVICES"] = '0, 1' # use two V100 GPU
else:
top_directory_rescaled_ct = '/ibex/scratch/projects/c2052/prognosis_project/'
top_directory_check_point = '/ibex/scratch/projects/c2052/prognosis_project/'
top_directory_output = '/ibex/scratch/projects/c2052/prognosis_project/'
def distance_l2(loc_1, loc_2):
"""
:param loc_1: a tuple (x_1, y_1)
:param loc_2: a tuple (x_2, y_2)
:return: L2 distance between loc_1 and loc_2
"""
x_difference = loc_1[0] - loc_2[0]
y_difference = loc_1[1] - loc_2[1]
return math.sqrt(x_difference*x_difference + y_difference*y_difference)
def func_parallel(func, list_inputs, leave_cpu_num=1):
"""
:param func: func(list_inputs[i])
:param list_inputs: each element is the input of func
:param leave_cpu_num: num of cpu that not use
:return: [return_of_func(list_inputs[0]), return_of_func(list_inputs[1]), ...]
"""
cpu_cores = mp.cpu_count() - leave_cpu_num
pool = mp.Pool(processes=cpu_cores)
list_outputs = pool.map(func, list_inputs)
pool.close()
return list_outputs
def get_max_diameter_one_region(loc_list, strict=False):
"""
:param strict: if false, we believe the region is near round, then accelerate the speed by a great extent.
:param loc_list: [(x_1, y_1), (x_2, y_2), ...]
:return: a float, refer to the max L2 distance among these locations
"""
max_diameter = 0
num_locations = len(loc_list)
if not strict:
fist_loc = loc_list[0]
for loc in loc_list[1::]:
distance = distance_l2(fist_loc, loc)
if distance > max_diameter:
max_diameter = distance
mid_loc = loc_list[int(num_locations / 3)]
for loc in loc_list:
distance = distance_l2(mid_loc, loc)
if distance > max_diameter:
max_diameter = distance
mid_loc = loc_list[int(2 * num_locations / 3)]
for loc in loc_list:
distance = distance_l2(mid_loc, loc)
if distance > max_diameter:
max_diameter = distance
return max_diameter
else:
for central_loc in loc_list[0:int(num_locations/2)]:
for loc in loc_list:
distance = distance_l2(central_loc, loc)
if distance > max_diameter:
max_diameter = distance
return max_diameter
def find_max_diameter_one_slice(id_loc_dict_rim):
"""
:param id_loc_dict_rim: {connect_id: [(x_1, y_1), (x_2, y_2), ...]}
:return: a dict, {connect_id: max L2 distance inside this region}
"""
key_list = list(id_loc_dict_rim.keys())
id_diameter_dict = {}
for key in key_list:
max_diameter = get_max_diameter_one_region(id_loc_dict_rim[key])
id_diameter_dict[key] = max_diameter
return id_diameter_dict
def find_max_diameter(rim_info_list):
"""
:param rim_info_list: the return of: connected_region.abstract_connected_regions(z_axes_list, 'rim'):
each element is [return_array, id_length_dict, id_loc_dict], return_array[:, :, 0] is the length map,
return_array[:, :, 1] is the id map
:return: a list of dict: [{connect_id: max_diameter}], each list element corresponding to rim_info_list.
"""
return_list = []
for rim_info in rim_info_list:
id_loc_dict = rim_info[2]
id_diameter_dict = find_max_diameter_one_slice(id_loc_dict)
return_list.append(id_diameter_dict)
return return_list
def extend_one_round_one_region(loc_rim, loc_region):
"""
make the region bigger
:param loc_rim: a set, record the locations of the rim points for this region, [(x_1, y_1), (x_2, y_2), ...]
:param loc_region: a set, record ALL locations of this region, [(x_1, y_1), (x_2, y_2), ...]
:return: the new loc_rim and the new loc_region (extended by one round)
"""
new_loc_rim = set()
for loc in loc_rim:
new_loc_rim.add((loc[0] - 1, loc[1]))
new_loc_rim.add((loc[0] + 1, loc[1]))
new_loc_rim.add((loc[0], loc[1] - 1))
new_loc_rim.add((loc[0], loc[1] + 1))
# print("new_loc_rim", new_loc_rim)
new_loc_rim = new_loc_rim - (new_loc_rim & loc_region) # note for set, &, -, | are of same priority.
# print("new", new_loc_rim)
new_loc_region = new_loc_rim | loc_region
return new_loc_rim, new_loc_region
def subtract_one_round_one_region(loc_rim, loc_region):
"""
make the region smaller
:param loc_rim: a set, record the locations of the rim points for this region, [(x_1, y_1), (x_2, y_2), ...]
:param loc_region: a set, record ALL locations of this region, [(x_1, y_1), (x_2, y_2), ...]
:return: the new loc_rim and the new loc_region (shrieked by one round)
"""
new_loc_rim = set()
for loc in loc_rim:
new_loc_rim.add(loc)
new_loc_rim.add((loc[0] - 1, loc[1]))
new_loc_rim.add((loc[0] + 1, loc[1]))
new_loc_rim.add((loc[0], loc[1] - 1))
new_loc_rim.add((loc[0], loc[1] + 1))
new_loc_rim = (new_loc_rim & loc_region) - loc_rim
new_loc_region = loc_region - loc_rim
return new_loc_rim, new_loc_region
def extend_one_slice(id_loc_dict_rim, id_loc_dict_region, extend_ratio=1.25, max_diameter=70):
max_diameter_dict = find_max_diameter_one_slice(id_loc_dict_rim)
key_list_2 = list(max_diameter_dict.keys())
key_list = list(id_loc_dict_region.keys())
assert len(key_list_2) >= len(key_list)
for key in key_list:
if max_diameter_dict[key] > max_diameter:
max_diameter_dict[key] = max_diameter
for region_id in key_list:
loc_rim = set(id_loc_dict_rim[region_id])
loc_region = set(id_loc_dict_region[region_id])
if extend_ratio > 1:
num_extend = round((extend_ratio - 1) * round(max_diameter_dict[region_id]) / 2)
for layer in range(num_extend):
new_loc_rim, new_loc_region = extend_one_round_one_region(loc_rim, loc_region)
loc_rim = new_loc_rim
loc_region = new_loc_region
'''
# observe how the rim is expand layer by layer
image = np.zeros([512, 512], 'float32')
print("loc_rim", loc_rim)
for loc in loc_rim:
image[loc[0], loc[1]] = 1
print(loc_rim)
print("layer", layer)
Functions.image_show(image)
'''
id_loc_dict_rim[region_id] = loc_rim
id_loc_dict_region[region_id] = loc_region
else:
num_subtract = int((1 - extend_ratio) * int(max_diameter_dict[region_id]) / 2)
for layer in range(num_subtract):
new_loc_rim, new_loc_region = subtract_one_round_one_region(loc_rim, loc_region)
loc_rim = new_loc_rim
loc_region = new_loc_region
'''
# observe how the rim is expand layer by layer
image = np.zeros([512, 512], 'float32')
print("loc_rim", loc_rim)
for loc in loc_rim:
image[loc[0], loc[1]] = 1
print(loc_rim)
print("layer", layer)
Functions.image_show(image)
'''
id_loc_dict_rim[region_id] = loc_rim
id_loc_dict_region[region_id] = loc_region
return id_loc_dict_rim, id_loc_dict_region
def extend_tubes(input_mask, leave_connected_component=None, extend_ratio=1.25, max_diameter=50):
"""
:param input_mask: binary numpy array with shape [x, y, z]
:param leave_connected_component: how many 3D connected region we leave? if None means do not check 3D connectivity.
:param extend_ratio: the mask perform good for bronchial but not very good for big air-way. extend the mask
according to there diameter, which will avoid uncovered airway walls.
:param max_diameter: if the diameter of the connected component is greater than the max_diameter, replace it by the
max_diameter.
:return: binary numpy array with shape [x, y, z] in 'float32'
"""
if leave_connected_component is not None:
id_loc_dict = connect_3d.get_sorted_connected_regions(input_mask, strict=False)
refined_mask = np.zeros(np.shape(input_mask), 'float32')
key = 1
if len(list(id_loc_dict.keys())) < leave_connected_component:
leave_connected_component = len(list(id_loc_dict.keys()))
while key < leave_connected_component + 1:
locations = id_loc_dict[key]
for loc in locations:
refined_mask[loc] = 1
key += 1
print("finally there are:", np.sum(refined_mask), "positive points")
else:
refined_mask = input_mask
refined_mask = np.swapaxes(refined_mask, 0, 2)
z_axes_list = list(refined_mask)
extended_mask = np.zeros(np.shape(refined_mask), 'float32')
rim_info_list, region_info_list = connected_region.abstract_connected_regions(z_axes_list, 'both')
# the length of the rim_info_list is equal to that of region_info_list, like 512
num_slices = len(z_axes_list)
for slice_id in range(num_slices):
_, extended_region_loc_dict = extend_one_slice(rim_info_list[slice_id][2], region_info_list[slice_id][2],
extend_ratio, max_diameter=max_diameter)
key_list = list(extended_region_loc_dict.keys())
for key in key_list:
for loc in extended_region_loc_dict[key]:
extended_mask[slice_id, loc[0], loc[1]] = 1
extended_mask = np.swapaxes(extended_mask, 0, 2)
return extended_mask
def remove_airway_and_blood_vessel_one_scan_upperlobe(patient_id, time, extend_ratio=1.1, max_diameter=50, show=True):
rescaled_ct = np.load(top_directory_rescaled_ct + patient_id + '/' + patient_id + '_' + time + '.npy')
lung_mask = np.load(top_directory_masks + 'lung_masks/' + patient_id + '/' + patient_id + '_' + time +
'_mask_refine.npz')['array']
lung_mask_box = Functions.get_bounding_box(lung_mask)
print("lung_mask_box:", lung_mask_box)
lung_length_z = lung_mask_box[2][1] - lung_mask_box[2][0]
superior_start = int(lung_mask_box[2][1] - lung_length_z * 0.32988803223955687)
superior_end = lung_mask_box[2][1]
air_way_merge_z_bounding_box = Functions.get_bounding_box(lung_mask[:, :, superior_start])
upper_start = air_way_merge_z_bounding_box[0][0]
upper_end = air_way_merge_z_bounding_box[0][1] - int((air_way_merge_z_bounding_box[0][1] -
air_way_merge_z_bounding_box[0][0])/2)
print("lung length on z:", lung_length_z, "superior range:", superior_start, superior_end,
"upper range:", upper_start, upper_end)
upper_superior_mask = np.zeros(np.shape(lung_mask), 'float32')
upper_superior_mask[upper_start: upper_end, :, superior_start: superior_end] = 1.0
upper_superior_mask = upper_superior_mask * lung_mask
refined_airway_mask = np.load(top_directory_masks + 'air_way_mask_stage_two/' + patient_id + '/' + patient_id + '_'
+ time + '_mask_refine.npz')['array']
refined_blood_vessel_mask = np.load(top_directory_masks + 'blood_vessel_mask_stage_two/' + patient_id + '/' +
patient_id + '_' + time + '_mask_refine.npz')['array']
rescaled_ct = rescaled_ct * lung_mask
visible_non_infection = np.array((refined_blood_vessel_mask + refined_airway_mask) * lung_mask > 0.5, 'float32')
rescaled_ct_original = np.array(rescaled_ct)
assert extend_ratio > 1
print("extending air way")
# visible_extended_outer = extend_tubes(visible_non_infection, None, extend_ratio + 0.1, max_diameter)
visible_extended = extend_tubes(visible_non_infection, None, extend_ratio, max_diameter)
context_mask = visible_extended - visible_non_infection
context_mask = context_mask * upper_superior_mask
num_context_points = np.sum(context_mask)
context = context_mask * rescaled_ct_original + context_mask * 10
context = np.reshape(context, (-1,))
context = np.sort(context)
total_points = len(context)
percentile = 50
threshold = context[total_points - int(num_context_points * (100 - percentile) / 100)] - 10
print("the context is:", threshold, 'at', percentile)
rescaled_ct[np.where(visible_non_infection >= 0.5)] = threshold
rescaled_ct = rescaled_ct - threshold * lung_mask # threshold is the value of lung parenchyma
rescaled_ct_original = rescaled_ct_original - threshold * lung_mask
save_array = np.zeros([512, 1024, 512], 'float32')
save_array[:, 0:512, :] = rescaled_ct_original
save_array[:, 512::, :] = rescaled_ct
if show:
image = np.zeros([512, 1024], 'float32')
image[:, 0: 512] = rescaled_ct_original[:, :, 220]
image[:, 512::] = rescaled_ct[:, :, 220]
image = np.clip(image, 0, 0.2)
Functions.image_save(image, '/home/zhoul0a/Desktop/prognosis_project/visualize/remove_visible_upperlobe/'+patient_id + '_'+time+'_compare.png', high_resolution=True,
gray=True)
superior_middle = int((superior_end + superior_start) / 2)
image = np.zeros([512, 1024, 3], 'float32')
image[:, 0: 512, 0] = rescaled_ct_original[:, :, superior_middle]
image[:, 0: 512, 1] = rescaled_ct_original[:, :, superior_middle]
image[:, 0: 512, 2] = rescaled_ct_original[:, :, superior_middle]
image[:, 512::, 0] = rescaled_ct_original[:, :, superior_middle] + context_mask[:, :, superior_middle]
image[:, 512::, 1] = rescaled_ct_original[:, :, superior_middle] - context_mask[:, :, superior_middle]
image[:, 512::, 2] = rescaled_ct_original[:, :, superior_middle] - context_mask[:, :, superior_middle]
image = np.clip(image, 0, 0.2)
Functions.image_save(image, '/home/zhoul0a/Desktop/prognosis_project/visualize/remove_visible_upperlobe/' + patient_id + '_' + time+ '_airway.png', high_resolution=True,
gray=False)
Functions.save_np_array(top_directory_enhanced, patient_id + '_' + time, save_array)
def remove_airway_and_blood_vessel_one_scan_general_sampling(patient_id, time, extend_ratio=1.1, max_diameter=50, show=True):
rescaled_ct =
|
np.load(top_directory_rescaled_ct + patient_id + '/' + patient_id + '_' + time + '.npy')
|
numpy.load
|
import threading
import numpy as np
from scipy.spatial.distance import cosine
class MuseReader(threading.Thread):
def __init__(self, window, path, msg_queue):
threading.Thread.__init__(self)
self.window = window
self.path = path
self.embeds = dict()
self.msg_queue = msg_queue
self.start()
def run(self):
with open(self.path, 'r') as _file:
num_emb, dim = map(int, _file.readline().split())
for _ in range(num_emb):
if self.window.stop:
break
line = _file.readline().split()
key = ' '.join(line[:-dim])
value = list(map(float, line[-dim:]))
self.embeds[key] = np.array(value)
self.msg_queue.put(self.embeds)
def load_embeddings(path_en, path_pt):
file_en = open(path_en, 'r')
file_pt = open(path_pt, 'r')
emb_en = dict()
emb_pt = dict()
num_emb, dim = map(int, file_en.readline().split())
file_pt.readline()
for _ in range(num_emb):
line_pt = file_pt.readline().split()
key_pt = ' '.join(line_pt[:-dim])
value_pt = list(map(float, line_pt[-dim:]))
emb_pt[key_pt] =
|
np.array(value_pt)
|
numpy.array
|
#!/usr/bin/env python
# Phylogenetic placement of new sequences to PhyCLIP clustered reference tree
# Authors: <NAME> and <NAME>
from scipy.stats import levene
import re
import os
import subprocess
import argparse
import numpy as np
import pandas as pd
import statsmodels.api as sm
import json
import multiprocessing as mp
import itertools
import ete3
# parse phyclip tree output (NEXUS format)
def parse_phyclip_output(filename):
id_to_taxon = {}
cluster_to_taxon = {}
fhandle = open(filename, "r").readlines()
for line in fhandle:
# parse for id, taxon and cluster
try:
id, taxon = re.search("^\s+(\d+)\s+([^,;]+)[,;]*$", line).group(1, 2)
taxon = re.sub("(^'|'$|\*)", "", taxon)
try:
cluster = re.search("_cluster([\d\.a-z]+)", taxon).group(1)
taxon = re.sub("_cluster[\d\.a-z]+", "", taxon)
except:
cluster = "unclustered"
taxon = taxon.strip()
id_to_taxon[id] = taxon
try:
cluster_to_taxon[cluster].append(taxon)
except:
cluster_to_taxon[cluster] = [taxon]
except:
pass
# parse tree
try:
tree = re.search("tree[^(]+(\([^;]+;)", line).group(1)
tree = re.sub("\[[^\]]+\]", "", tree)
except:
pass
# invalid file
if len(id_to_taxon) == 0:
sys.exit(1)
# replace id with taxon name
new_tree = []
prev_end = 0
tree = re.sub("'", "", tree)
for expr in re.finditer("[(,](\d+):", tree):
new_tree.append(tree[prev_end:expr.start()+1])
id = expr.group(1)
new_tree.append(id_to_taxon[id])
prev_end = expr.end()-1
new_tree.append(tree[prev_end:])
return "".join(new_tree), cluster_to_taxon
# searches clade tree strings of clusters
def cluster_to_clade_tree(ref_tree, c_to_t):
eteTree = ete3.Tree(ref_tree)
# resolve polytomy
eteTree.resolve_polytomy()
eteTree.ladderize()
data = {"CLUSTER":["REF"], "TRUNK":[True],
"TSTRING":[eteTree.write(format=5)], "REFSEQ":[eteTree.get_leaf_names()]}
for n, node in enumerate(eteTree.traverse(strategy='levelorder')):
n = str(n)
if n in c_to_t.keys():
ref_taxa = c_to_t[n]
leaves = node.get_leaf_names()
unclustered_taxa_subtended = list(set(c_to_t["unclustered"])&set(leaves))
if len(unclustered_taxa_subtended) > 0:
ref_taxa = list(set(ref_taxa)|set(unclustered_taxa_subtended))
clade_tree_string = node.write(format=5)
# terminal clade tree
if set(leaves) != set(ref_taxa):
trunk_clade = False
# trunk clusters
else:
cTree = ete3.Tree(clade_tree_string)
cTree.prune(ref_taxa)
clade_tree_string = cTree.write(format=5)
trunk_clade = True
data["CLUSTER"].append(n)
data["TRUNK"].append(trunk_clade)
data["TSTRING"].append(clade_tree_string)
data["REFSEQ"].append(ref_taxa)
return pd.DataFrame(data), eteTree.get_leaf_names()
def parse_aln(filename):
data = {}
fhandle = open(filename, "r").readlines()
for key, group in itertools.groupby(fhandle, key=lambda _: re.search("^>", _)):
if key:
header = re.sub("^>", "", next(group)).strip()
try:
data["HEADER"].append(header)
except:
data["HEADER"] = [header]
else:
sequence = "".join([line.strip() for line in list(group)])
try:
data["SEQUENCE"].append(sequence)
except:
data["SEQUENCE"] = [sequence]
return pd.DataFrame(data)
def weighted_high_median(a, wts):
N = len(a)
wtotal = 0
wdiscardedlow = 0
for i in range(N):
wtotal += wts[i]
nn = N
while True:
assert (nn > 0 and len(a) == nn)
trial = sorted(a)[int(nn/2)]
# Count up the weight to the left of and at the trial point.
# Weight to the right of it isn't needed
wleft = wtrial = 0
for i in range(nn):
if a[i] < trial:
wleft += wts[i]
elif a[i] == trial:
wtrial += wts[i]
if 2*(wdiscardedlow + wleft) > wtotal:
# Trial value is too high
ncandidates = 0
#for i = 1:nn
for i in range(nn):
if a[i] < trial:
a[ncandidates] = a[i]
wts[ncandidates] = wts[i]
ncandidates += 1
nn = ncandidates
elif 2*(wdiscardedlow + wleft + wtrial) > wtotal:
# Trial value is just right
return trial
else:
# Trial value is too low
ncandidates = 0
#for i = 1:nn
for i in range(nn):
if a[i] > trial:
a[ncandidates] = a[i]
wts[ncandidates] = wts[i]
ncandidates += 1
nn = ncandidates
wdiscardedlow += wleft+wtrial
a=a[:nn]
wts=wts[:nn]
def qn(data):
# sort data
data = np.sort(data)
n = len(data)
h = int(n/2) + 1
k = int(h*(h-1)/2)
left = np.arange(n+1,1,-1)
right = np.full(n,n, dtype= np.int64)
work = np.zeros(n) # dtype = np.float64
weight = np.zeros(n, np.int64)
P =
|
np.zeros(n, np.int64)
|
numpy.zeros
|
# coding=UTF-8
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
from scipy.stats import norm
from scipy.stats import binom
from scipy.stats import expon
from scipy.stats import genexpon
import math
import random
import numpy as np
from mdkp import Colony
from test_dmachine import AllocationOfMachine
import heapq
from dconnection import *
#import dconnection
import time
import _thread
import logging
import json
import jsonpickle
import os
import threading
import matplotlib.pyplot as plt
from log import slogger
#import log
machine_queue = []
queue_lock = threading.Lock()
# only used for test
task_requests = {}
tasks = {}
machines = {}
restricted_index = 0
node_manager = None
etcdclient = None
recv_stop = False
def add_machine(id, cpus=24, mems=240000):
global machines
global machine_queue
machine = AllocationOfMachine(id, cpus, mems)
machines[id] = machine
heapq.heappush(machine_queue,machine)
# to-do:改成多线程,直接运行每个线程
# machine.colony.run()
send_colony("create",machine.machineid, str(machine.reliable_cpus), str(machine.reliable_mems))
sync_colony()
return machine
def pre_allocate(task):
global restricted_index
global queue_lock
global machines
if 'bid' in task and task['bid']!='0':
# queue_lock.acquire()
# machine = heapq.heappop(machine_queue)
# 计算每台机器新的heu, 挑选最小的
least_heu = float('inf')
selected_machine = {}
for id, machine in machines.items():
heu = machine.cpu_value * int(task['cpus']) + machine.mem_value * int(task['mems'])
if heu < least_heu:
selected_machine = machine
least_heu = heu
task['machineid'] = selected_machine.machineid
task['allocation_type'] = 'none'
task['allocation_cpus'] = str(int(task['cpus'])*1000)
task['allocation_mems'] = task['mems']
task['allocation_mems_sw'] = str( 2 * int(task['mems']) )
task['allocation_mems_soft'] = str( 2 * int(task['mems']) )
tasks[task['id']] = task
# 更新该台机器的MPV, RV不变
machine = selected_machine
machine.pre_cpus_wanted += int(task['cpus'])
machine.pre_mems_wanted += int(task['mems'])
#把该task加入heu_allocations
machine.heu_tasks.append(task)
# 不满
if(machine.pre_cpus_wanted <= machine.reliable_cpus and machine.pre_mems_wanted <= machine.reliable_mems):
# utilization = (machine.pre_cpus_wanted * machine.rareness_ratio + machine.pre_mems_wanted) / (machine.reliable_cpus * machine.rareness_ratio + machine.reliable_mems)
utilization = 0.5 * machine.pre_cpus_wanted / machine.reliable_cpus + 0.5 * machine.pre_mems_wanted / machine.reliable_mems
machine.cpu_value = 0.01 * machine.rareness_ratio * utilization
machine.mem_value = 0.01 * utilization
machine.rareness_ratio = ((machine.reliable_mems ** 2) * machine.pre_cpus_wanted) / ((machine.reliable_cpus ** 2) * machine.pre_mems_wanted)
# 满,贪心算法,求出获胜者中的最低出价
else:
if float(task['bid']) <= machine.cpu_value * int(task['cpus']) + machine.mem_value * int(task['mems']):
return task
for task in machine.heu_tasks:
heu = float(task['bid']) / (int(task['cpus']) * machine.rareness_ratio + int(task['mems']))
task['heu'] = heu
sorted_heu = sorted(machine.heu_tasks, key=lambda k: k['heu'],reverse=True)
utilized_cpus = 0
utilized_mems = 0
lowest_heu = 0.01
for task in sorted_heu:
if utilized_cpus + int(task['cpus']) < machine.reliable_cpus and utilized_mems + int(task['mems']) < machine.reliable_mems:
lowest_heu = task['heu']
utilized_cpus += int(task['cpus'])
utilized_mems += int(task['mems'])
else:
break
machine.cpu_value = machine.rareness_ratio * lowest_heu
machine.mem_value = lowest_heu
machine.rareness_ratio = ((machine.reliable_mems ** 2) * utilized_cpus) / ((machine.reliable_cpus ** 2) * utilized_mems)
# time.sleep(0.1)
else:
if(restricted_index >= len(machines)):
restricted_index = 0
slogger.debug("restricted_index: ", restricted_index)
values = list(machines.values())
task['machineid'] = values[restricted_index].machineid
restricted_index += 1
task['allocation_type'] = 'none'
task['allocation_cpus'] = str(int(task['cpus'])*1000)
task['allocation_mems'] = task['mems']
task['allocation_mems_sw'] = str( 2 * int(task['mems']) )
task['allocation_memsp_soft'] = str( 2 * int(task['mems']) )
tasks[task['id']] = task
return task
def allocate(id):
task = tasks[id]
machineid = task['machineid']
machine = machines[machineid]
if 'bid' in task and task['bid']!='0':
# slogger.debug("dispatch reliable")
task = machine.add_reliable_task(task)
# slogger.debug("pop machine: id = %s", machine.machineid)
send_task(machineid,task,"add")
else:
# slogger.debug("dispatch restricted")
task = machine.add_restricted_task(task)
return task
def release(id):
task = tasks[id]
machineid = tasks[id]['machineid']
machine = machines[machineid]
if 'bid' in task and task['bid']!='0':
slogger.debug("release reliable")
machine.release_reliable_task(id)
send_task(machine,task,'delete')
else:
slogger.debug("release restricted")
machine.release_restricted_task(id)
def after_release(id):
task = tasks[id]
for index,machine in enumerate(machine_queue):
if task['machineid'] == machine.machineid:
del machine_queue[index]
break
machine.total_value -= int(task['bid'])
heapq.heappush(machine_queue,machine)
del tasks[id]
def stop_scheduler():
global queue_lock
# print("stop scheduler")
queue_lock.acquire()
os.system("kill -9 $(pgrep acommdkp) > /dev/null 2>&1")
# time.sleep(1)
# print("close sockets")
close_sync_socket()
close_colony_socket()
close_task_socket()
import dconnection
dconnection.recv_run = False
queue_lock.release()
# time.sleep(1)
def init_scheduler():
global queue_lock
#启动c程序,后台运行
os.system("rm -rf /home/augustin/docklet/src/aco-mmdkp.log")
os.system("/home/augustin/docklet/src/aco-mmdkp/acommdkp >/home/augustin/docklet/src/aco-mmdkp.log 2>&1 &")
# time.sleep(1)
slogger.setLevel(logging.INFO)
slogger.info("init scheduler!")
# print("init scheduler")
init_sync_socket()
init_colony_socket()
init_task_socket()
init_result_socket()
_thread.start_new_thread(recv_result,(machines,machine_queue,queue_lock))
def test_all():
init_scheduler()
for i in range(0,2):
add_machine("m"+str(i),64,256)
slogger.info("add colonies done!")
# requests = generate_test_data(64,256,2,"reliable",'uniform',0)
# generate_test_data(64,256,1,"restricted",192)
requests = parse_test_data('uniform_tasks1.txt',64,256,1,"uniform")
for index,request in requests.items():
pre_allocate(request)
slogger.info("pre allocate tasks done")
for index,request in requests.items():
allocate(request['id'])
slogger.info("allocate tasks done")
time.sleep(10)
for index,request in requests.items():
release(request['id'])
slogger.info("release tasks done")
for index,request in requests.items():
after_release(request['id'])
slogger.info("after release tasks done")
def relax_mdp(tasks,cpus,mems,machines):
cpus = cpus*machines
mems = mems * machines
opt = np.zeros((cpus+1,mems+1))
for key,task in tasks.items():
i_cpu = int(task['cpus'])
i_mem = int(task['mems'])
bid = int(task['bid'])
for j in range(cpus,i_cpu-1,-1):
for k in range(mems,i_mem-1, -1):
# print(j,k)
opt[j][k] = max(opt[j][k],opt[j-i_cpu][k-i_mem]+bid)
# print(opt)
print("relax opt: ",opt[cpus][mems])
return opt[cpus][mems]
corr0 = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
corr1 = [[1, 0.9, 0.9], [0.9, 1, 0.9], [0.9, 0.9, 1]]
#corr1 = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
corr2 = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
corr05 = [[1, -0.5, 0.5, -0.5], [-0.5, 1, -0.5, 0.5], [0.5, -0.5, 1, -0.5], [-0.5, 0.5, -0.5, 1]]
corr_opt = [[1, -0.9, 0.9, -0.9], [-0.9, 1, -0.9, 0.9], [0.9, -0.9, 1, -0.9], [-0.9, 0.9, -0.9, 1]]
corr00 = [[1, 0, 0.5, -0.5], [0, 1, 0, 0.5], [0.5, 0, 1, 0], [0, 0.5, 0, 1]]
def generate_uniform_opt(cpu,mem,num_tasks):
mean = [0, 0, 0, 0]
corr = corr_opt
a,b,c,d = np.random.multivariate_normal(mean, corr, num_tasks).T
# for i,ia in enumerate(a):
# print(a[i],b[i],c[i],d[i],'\n')
cpus = []
mems = []
values = []
for ix in a:
cpus.append(norm.cdf(ix)*(cpu/4-1)+1)
for iy in b:
mems.append(norm.cdf(iy)*(mem/4-1)+1)
for index in range(len(c)):
if a[index]> b[index]:
values.append(norm.cdf(c[index])*(100-1)+1)
else:
values.append(norm.cdf(d[index])*(100-1)+1)
# for i,icpus in enumerate(cpus):
# print(cpus[i],mems[i],values[i],'\n')
# print(np.corrcoef([cpus,mems,values]))
return cpus,mems,values
def generate_uniform(cpu,mem,num_tasks,corr):
mean = [0, 0, 0]
if corr == 'corr0':
corr = corr0
elif corr == 'corr1':
corr = corr1
elif corr == 'corr2':
corr = corr2
x, y, z = np.random.multivariate_normal(mean, corr, num_tasks).T
# print(np.corrcoef([x,y,z]))
cpus = []
mems = []
values = []
for ix in x:
cpus.append(norm.cdf(ix)*(cpu/4-1)+1)
for iy in y:
mems.append(norm.cdf(iy)*(mem/4-1)+1)
for iz in z:
values.append(norm.cdf(iz)*(100-1)+1)
# print( np.corrcoef([cpus,mems,values]) )
return cpus,mems,values
def generate_multivariate_binomial(cpu,mem,num_tasks):
mean = [0, 0, 0]
corr = [[1, -0.5, -0.5], [-0.5, 1, -0.5], [-0.5, -0.5, 1]]
x, y, z = np.random.multivariate_normal(mean, corr, num_tasks).T
cpus = []
mems = []
values = []
for ix in x:
cpus.append(binom.ppf(norm.cdf(ix),cpu,8/cpu))
for iy in y:
mems.append(binom.ppf(norm.cdf(iy),mem,8/mem))
for iz in z:
values.append(norm.cdf(iz)*(100-1)+1)
# print("cpu mem corr: ", np.corrcoef(cpus,mems)[0, 1])
# print("cpus: ",cpus)
return cpus,mems,values
def generate_ec2(cpu,mem,num_tasks,corr):
mean = [0, 0, 0]
if corr == 'corr0':
corr = corr0
elif corr == 'corr1':
corr = corr1
elif corr == 'corr2':
corr = corr2
x, y, z =
|
np.random.multivariate_normal(mean, corr, num_tasks)
|
numpy.random.multivariate_normal
|
import matplotlib
import numpy as np
import sys
import json
from os import listdir
from os.path import isfile, join
from collections import defaultdict
def load_episodes_from_logs(data_path):
discretized_mdp_files = [join(data_path, f) for f in listdir(data_path) if isfile(join(data_path, f)) and f.startswith('discretized')]
episodes = []
for discretized_mdp_file in discretized_mdp_files:
f = open(discretized_mdp_file)
data = json.load(f)
episode = []
photo_idx = -1
for i in range(len(data)):
timestep = data[i]
state = timestep[0]
action = timestep[1]
reward = timestep[2]
photo_idx = state["num_photos"]
s = tuple([state["subpolicy"], state["position"], int(state["starting_new_photo"] == True), state["user_position"][0], state["user_position"][1], state["user_position"][2]])
a = action
r = []
for raw_reward in reward:
scaled_reward = raw_reward - 4 if raw_reward != 0 else 0
r.append(scaled_reward)
r = tuple(r)
episode.append((s, a, r))
if i == len(data)-1 or data[i+1][0]["starting_new_photo"]:
episodes.append(episode)
episode = []
return episodes
def compute_behavior_policy(subpolicy_to_num_positions, state, action):
if action == 100:
return 1./3
else:
return 2./3*1./subpolicy_to_num_positions[state[0]]
def create_random_policy(nA):
"""
Creates a random policy function.
Args:
nA: Number of actions in the environment.
Returns:
A function that takes an observation as input and returns a vector
of action probabilities
"""
A = np.ones(nA, dtype=float) / nA
def policy_fn(observation):
return A
return policy_fn
def create_greedy_policy(Q):
"""
Creates a greedy policy based on Q values.
Args:
Q: A dictionary that maps from state -> action values
Returns:
A function that takes an observation as input and returns a vector
of action probabilities.
"""
def policy_fn(state):
A = np.zeros_like(Q[state], dtype=float)
best_action = np.argmax(Q[state])
A[best_action] = 1.0
return A
return policy_fn
def mc_control_importance_sampling(episodes, discount_factor=1.0):
"""
Monte Carlo Control Off-Policy Control using Weighted Importance Sampling.
Finds an optimal greedy policy.
Args:
env: OpenAI gym environment.
num_episodes: Number of episodes to sample.
behavior_policy: The behavior to follow while generating episodes.
A function that given an observation returns a vector of probabilities for each action.
discount_factor: Gamma discount factor.
Returns:
A tuple (Q, policy).
Q is a dictionary mapping state -> action values.
policy is a function that takes an observation as an argument and returns
action probabilities. This is the optimal greedy policy.
"""
action_space_size = 101
num_rewards = 9
# The final action-value function.
# A dictionary that maps state -> action values
Qs = []
for i in range(num_rewards):
Q = defaultdict(lambda: np.zeros(action_space_size))
Qs.append(Q)
# The cumulative denominator of the weighted importance sampling formula
# (across all episodes)
Cs = []
for i in range(num_rewards):
C = defaultdict(lambda: np.zeros(action_space_size))
Cs.append(C)
# Our greedily policy we want to learn
for i_reward in range(num_rewards):
print("Computing Q table for reward " + str(i_reward))
target_policy = create_greedy_policy(Qs[i_reward])
for i_episode in range(1, len(episodes) + 1):
# Print out which episode we're on, useful for debugging.
print("Episode {}/{}.".format(i_episode, len(episodes)))
episode = episodes[i_episode-1]
# Sum of discounted returns
G = 0.0
# The importance sampling ratio (the weights of the returns)
W = 1.0
# For each step in the episode, backwards
for t in range(len(episode))[::-1]:
state, action, reward = episode[t]
# Update the total reward since step t
G = discount_factor * G + reward[i_reward]
# Update weighted importance sampling formula denominator
Cs[i_reward][state][action] += W
# Update the action-value function using the incremental update formula (5.7)
# This also improves our target policy which holds a reference to Q
Qs[i_reward][state][action] += (W / Cs[i_reward][state][action]) * (G - Qs[i_reward][state][action])
target_policy = create_greedy_policy(Qs[i_reward])
# If the action taken by the behavior policy is not the action
# taken by the target policy the probability will be 0 and we can break
if action != np.argmax(target_policy(state)):
break
W = W * 1./compute_behavior_policy(subpolicy_to_num_positions, state, action)
return Qs
subpolicy_to_num_positions = {0: 21, 1: 21, 2: 24, 3: 27}
#data_path = '/home/qz257/Projects/PhotoRL/shutter_tarzan/shutter-tarzan-shutter_rl/shutter_tarzan/rl_data_temp_updated/'
#output_path = 'Q_table.json'
data_path = '/home/aon7/tarzan_ws/src/shutter-tarzan/shutter_tarzan/rl_data_all/'
#data_path = '/home/aon7/tarzan_ws/src/shutter-tarzan/shutter_tarzan/rl_data_test_uniform_policy/'
episodes = load_episodes_from_logs(data_path)
#rewards = np.zeros((len(episodes), 9))
rewards_photo = []
rewards_interaction = []
#print("episodes", episodes)
#print(episodes[0])
#print(len(episodes))
for i, episode in enumerate(episodes):
#print("ep = ", episode)
curr_rewards_photo = []#np.zeros((9))
for j, (state, action, reward) in enumerate(episode):
if action == 100:
curr_rewards_photo.append(reward[:5])
#print("j", j)
if not (np.array(reward[5:]) == 0).all():#j == len(episode)-1:
#print("adding rew = ", reward[5:])
rewards_interaction += [reward[5:]]
rewards_photo += curr_rewards_photo
#rewards_interaction += episode[-1][2][6:]
#print("rewards = ", rewards)
#print("rewards interaction", rewards_interaction)
rewards_photo = np.array(rewards_photo)
rewards_interaction = np.array(rewards_interaction)
print("\nFor each photo-taking step:")
print("Rewards shape =", rewards_photo.shape)
print("Avg rewards = ", np.mean(rewards_photo, axis=0))
print("Std rewards = ",
|
np.std(rewards_photo, axis=0)
|
numpy.std
|
from ._base import TimeResolvedModel
import numpy as np
class MerrifieldExplicit1TT(TimeResolvedModel):
r"""
A class for time-resolved simulations using a modified version of Merrifield's model.
The model explicity includes the :math:`^1(TT)` state, and allows for decay
of a single triplet in a :math:`(T..T)` state.
Attributes
----------
states : list of str
The names of the excited state species.
rates : list of str
The names of the different rate constants in the model.
model_name : str
The name of the model.
G : float
The initial exciton density. Units of per volume.
initial_weighting : dict
Dictionary of (str, float) pairs. Key is the state name (str) and value is its initial weight (float). The default is {'S1': 1}.
t_step : float
The first time step taken by the simulation, thereafter the step will increase geometrically.
t_end : float
The last time point in the simulation.
num_points : int
The number of time points to compute the simulation at.
kSF : float
Rate constant for :math:`S_1\rightarrow ^1(TT)`. Units of per time.
k_SF : float
Rate constant for :math:`^1(TT)\rightarrow S_1`. Units of per time.
kHOP : float
Rate constant for :math:`^1(TT)\rightarrow (T..T)`. Units of per time.
k_HOP : float
Rate constant for :math:`(T..T)\rightarrow ^1(TT)`. Units of per time.
kHOP2 : float
Rate constant for :math:`(T..T)\rightarrow2\times T_1`. Units of per time.
kTTA : float
Rate constant for :math:`2\times T_1\rightarrow (T..T) or ^1(TT) or S_1`. See :attr:`MerrifieldExplicit1TT.TTA_channel`. Units of volume per time.
kRELAX : float
Rate constant for mixing between the :math:`(T..T)` states. Units of per time.
kSSA : float
Singlet-singlet annihilation rate constant. Units of volume per time.
kSNR : float
Rate constant for the decay of :math:`S_1`. Units of per time.
kTTNR : float
Rate constant for the decay of :math:`^1(TT)`. Units of per time.
kTNR : float
Rate constant for the decay of :math:`T_1`, or one of the triplets in :math:`(T..T)`. Units of per time.
cslsq : numpy.ndarray
1D array containing the overlap factors between the 9 :math:`(T..T)` states and the singlet.
TTA_channel : int
Index determining the fate of free triplets. 1 gives :math:`2\times T_1\rightarrow (T..T)`. 2 gives :math:`2\times T_1\rightarrow ^1(TT)`. 3 gives :math:`2\times T_1\rightarrow S_1`.
simulation_results : dict
Produced by :meth:`simulate`. Keys are the excited-state names (str), values the simulated populations (numpy.ndarray).
"""
def __init__(self):
super().__init__()
# metadata
self.model_name = 'MerrifieldExplicit1TT'
self._number_of_states = 12
self.states = ['S1', 'TT', 'T_T_total', 'T1']
self.rates = ['kSF', 'k_SF', 'kHOP', 'k_HOP', 'kHOP2', 'kTTA', 'kRELAX', 'kSNR', 'kSSA', 'kTTNR', 'kTNR']
self._allowed_initial_states = {'S1', 'TT', 'T1'}
self._initial_state_mapping = {'S1': 0, 'TT': 1, 'T1': -1}
# rates between excited states
self.kSF = 20.0
self.k_SF = 0.03
self.kHOP = 0.067
self.k_HOP = 2.5e-4
self.kHOP2 = 1e-5
self.kTTA = 1e-18
# spin relaxation
self.kRELAX = 0
# rates of decay
self.kSNR = 0.1
self.kSSA = 0
self.kTTNR = 0.067
self.kTNR = 1e-5
# TTA channel
self.TTA_channel = 1
# cslsq values
self.cslsq = (1/9)*np.ones(9)
def _rate_equations(self, y, t):
S1, TT, T_T_1, T_T_2, T_T_3, T_T_4, T_T_5, T_T_6, T_T_7, T_T_8, T_T_9, T1 = y
dydt = np.zeros(self._number_of_states)
# S1
dydt[0] = -(self.kSNR+self.kSF)*S1 - self.kSSA*S1*S1 + self.k_SF*TT + self._kTTA_3*T1**2
# TT
dydt[1] = self.kSF*S1 - (self.k_SF+self.kTTNR+self.kHOP*np.sum(self.cslsq))*TT + self.k_HOP*(self.cslsq[0]*T_T_1+self.cslsq[1]*T_T_2+self.cslsq[2]*T_T_3+self.cslsq[3]*T_T_4+self.cslsq[4]*T_T_5+self.cslsq[5]*T_T_6+self.cslsq[6]*T_T_7+self.cslsq[7]*T_T_8+self.cslsq[8]*T_T_9) + self._kTTA_2*T1**2
# T_T_1
dydt[2] = self.kHOP*self.cslsq[0]*TT - (self.k_HOP*self.cslsq[0]+self.kTNR+self.kHOP2+self.kRELAX)*T_T_1 + (1/9)*self._kTTA_1*T1**2 + (1/8)*self.kRELAX*(T_T_2+T_T_3+T_T_4+T_T_5+T_T_6+T_T_7+T_T_8+T_T_9)
# T_T_2
dydt[3] = self.kHOP*self.cslsq[1]*TT - (self.k_HOP*self.cslsq[1]+self.kTNR+self.kHOP2+self.kRELAX)*T_T_2 + (1/9)*self._kTTA_1*T1**2 + (1/8)*self.kRELAX*(T_T_1+T_T_3+T_T_4+T_T_5+T_T_6+T_T_7+T_T_8+T_T_9)
# T_T_3
dydt[4] = self.kHOP*self.cslsq[2]*TT - (self.k_HOP*self.cslsq[2]+self.kTNR+self.kHOP2+self.kRELAX)*T_T_3 + (1/9)*self._kTTA_1*T1**2 + (1/8)*self.kRELAX*(T_T_1+T_T_2+T_T_4+T_T_5+T_T_6+T_T_7+T_T_8+T_T_9)
# T_T_4
dydt[5] = self.kHOP*self.cslsq[3]*TT - (self.k_HOP*self.cslsq[3]+self.kTNR+self.kHOP2+self.kRELAX)*T_T_4 + (1/9)*self._kTTA_1*T1**2 + (1/8)*self.kRELAX*(T_T_1+T_T_2+T_T_3+T_T_5+T_T_6+T_T_7+T_T_8+T_T_9)
# T_T_5
dydt[6] = self.kHOP*self.cslsq[4]*TT - (self.k_HOP*self.cslsq[4]+self.kTNR+self.kHOP2+self.kRELAX)*T_T_5 + (1/9)*self._kTTA_1*T1**2 + (1/8)*self.kRELAX*(T_T_1+T_T_2+T_T_3+T_T_4+T_T_6+T_T_7+T_T_8+T_T_9)
# T_T_6
dydt[7] = self.kHOP*self.cslsq[5]*TT - (self.k_HOP*self.cslsq[5]+self.kTNR+self.kHOP2+self.kRELAX)*T_T_6 + (1/9)*self._kTTA_1*T1**2 + (1/8)*self.kRELAX*(T_T_1+T_T_2+T_T_3+T_T_4+T_T_5+T_T_7+T_T_8+T_T_9)
# T_T_7
dydt[8] = self.kHOP*self.cslsq[6]*TT - (self.k_HOP*self.cslsq[6]+self.kTNR+self.kHOP2+self.kRELAX)*T_T_7 + (1/9)*self._kTTA_1*T1**2 + (1/8)*self.kRELAX*(T_T_1+T_T_2+T_T_3+T_T_4+T_T_5+T_T_6+T_T_8+T_T_9)
# T_T_8
dydt[9] = self.kHOP*self.cslsq[7]*TT - (self.k_HOP*self.cslsq[7]+self.kTNR+self.kHOP2+self.kRELAX)*T_T_8 + (1/9)*self._kTTA_1*T1**2 + (1/8)*self.kRELAX*(T_T_1+T_T_2+T_T_3+T_T_4+T_T_5+T_T_6+T_T_7+T_T_9)
# T_T_9
dydt[10] = self.kHOP*self.cslsq[8]*TT - (self.k_HOP*self.cslsq[8]+self.kTNR+self.kHOP2+self.kRELAX)*T_T_9 + (1/9)*self._kTTA_1*T1**2 + (1/8)*self.kRELAX*(T_T_1+T_T_2+T_T_3+T_T_4+T_T_5+T_T_6+T_T_7+T_T_8)
# T1
dydt[11] = (self.kTNR+(2.0*self.kHOP2))*(T_T_1+T_T_2+T_T_3+T_T_4+T_T_5+T_T_6+T_T_7+T_T_8+T_T_9) - 2*self._kTTA_1*T1**2 - 2*self._kTTA_2*T1**2 - 2*self._kTTA_3*T1**2 - self.kTNR*T1
#
return dydt
def _set_tta_rates(self):
if self.TTA_channel == 1: # this is T1 + T1 -> (T..T)
self._kTTA_1 = self.kTTA
self._kTTA_2 = 0
self._kTTA_3 = 0
elif self.TTA_channel == 2: # this is T1 + T1 -> (TT)
self._kTTA_1 = 0
self._kTTA_2 = self.kTTA
self._kTTA_3 = 0
elif self.TTA_channel == 3: # this is T1 + T1 -> S1
self._kTTA_1 = 0
self._kTTA_2 = 0
self._kTTA_3 = self.kTTA
else:
raise ValueError('TTA channel must be either 1, 2 or 3')
return
def _initialise_simulation(self):
self._set_tta_rates()
self._calculate_time_axis()
self._check_initial_weighting()
self._set_initial_condition()
return
def _unpack_simulation(self, y):
self.S1 = y[:, 0]
self.TT = y[:, 1]
self.T_T_total = np.sum(y[:, 2:11], axis=1)
self.T1 = y[:, -1]
self._wrap_simulation_results()
return
def _wrap_simulation_results(self):
self.simulation_results = dict(zip(self.states, [self.S1, self.TT, self.T_T_total, self.T1]))
return
class Merrifield(TimeResolvedModel):
r"""
A class for steady-state simulations using Merrifield's model.
Attributes
----------
states : list of str
The names of the excited state species.
rates : list of str
The names of the different rate constants in the model.
model_name : str
The name of the model.
G : float
The initial exciton density. Units of per volume.
initial_weighting : dict
Dictionary of (str, float) pairs. Key is the state name (str) and value is its initial weight (float). The default is {'S1': 1}.
t_step : float
The first time step taken by the simulation, thereafter the step will increase geometrically.
t_end : float
The last time point in the simulation.
num_points : int
The number of time points to compute the simulation at.
kSF : float
Rate constant for :math:`S_1\rightarrow (TT)`. Units of per time.
k_SF : float
Rate constant for :math:`(TT)\rightarrow S_1`. Units of per time.
kDISS : float
Rate constant for :math:`(TT)\rightarrow2\times T_1`. Units of per time.
kTTA : float
Rate constant for :math:`2\times T_1\rightarrow (TT)`. Units of volume per time.
kRELAX : float
Rate constant for mixing between the :math:`(T..T)` states. Units of per time.
kSSA : float
Singlet-singlet annihilation rate constant. Units of volume per time.
kSNR : float
Rate constant for the decay of :math:`S_1`. Units of per time.
kTTNR : float
Rate constant for the decay of :math:`(TT)`. Units of per time.
kTNR : float
Rate constant for the decay of :math:`T_1`. Units of per time.
cslsq : numpy.ndarray
1D array containing the overlap factors between the 9 :math:`(T..T)` states and the singlet.
simulation_results : dict
Produced by :meth:`simulate`. Keys are the excited-state names (str), values the simulated populations (numpy.ndarray).
"""
def __init__(self):
super().__init__()
# metadata
self.model_name = 'Merrifield'
self._number_of_states = 11
self.states = ['S1', 'TT_bright', 'TT_total', 'T1']
self.rates = ['kSF', 'k_SF', 'kDISS', 'kTTA', 'kRELAX', 'kSNR', 'kSSA', 'kTTNR', 'kTNR']
# rates between excited states
self.kSF = 20.0
self.k_SF = 0.03
self.kDISS = 0.067
self.kTTA = 1e-18
# spin relaxation (Bardeen addition - not in original Merrifield)
self.kRELAX = 0
# rates of decay
self.kSNR = 0.1
self.kSSA = 0
self.kTTNR = 0.067
self.kTNR = 1e-5
# cslsq values
self.cslsq = (1/9)*np.ones(9)
def _rate_equations(self, y, t):
S1, TT_1, TT_2, TT_3, TT_4, TT_5, TT_6, TT_7, TT_8, TT_9, T1 = y
dydt = np.zeros(self._number_of_states)
# S1
dydt[0] = -(self.kSNR+self.kSF*np.sum(self.cslsq))*S1 -self.kSSA*S1*S1+ self.k_SF*(self.cslsq[0]*TT_1+self.cslsq[1]*TT_2+self.cslsq[2]*TT_3+self.cslsq[3]*TT_4+self.cslsq[4]*TT_5+self.cslsq[5]*TT_6+self.cslsq[6]*TT_7+self.cslsq[7]*TT_8+self.cslsq[8]*TT_9)
# TT_1
dydt[1] = self.kSF*self.cslsq[0]*S1 - (self.k_SF*self.cslsq[0]+self.kDISS+self.kTTNR+self.kRELAX)*TT_1 + (1/9)*self.kTTA*T1*T1 + (1/8)*self.kRELAX*(TT_2+TT_3+TT_4+TT_5+TT_6+TT_7+TT_8+TT_9)
# TT_2
dydt[2] = self.kSF*self.cslsq[1]*S1 - (self.k_SF*self.cslsq[1]+self.kDISS+self.kTTNR+self.kRELAX)*TT_2 + (1/9)*self.kTTA*T1*T1 + (1/8)*self.kRELAX*(TT_1+TT_3+TT_4+TT_5+TT_6+TT_7+TT_8+TT_9)
# TT_3
dydt[3] = self.kSF*self.cslsq[2]*S1 - (self.k_SF*self.cslsq[2]+self.kDISS+self.kTTNR+self.kRELAX)*TT_3 + (1/9)*self.kTTA*T1*T1 + (1/8)*self.kRELAX*(TT_1+TT_2+TT_4+TT_5+TT_6+TT_7+TT_8+TT_9)
# TT_4
dydt[4] = self.kSF*self.cslsq[3]*S1 - (self.k_SF*self.cslsq[3]+self.kDISS+self.kTTNR+self.kRELAX)*TT_4 + (1/9)*self.kTTA*T1*T1 + (1/8)*self.kRELAX*(TT_1+TT_2+TT_3+TT_5+TT_6+TT_7+TT_8+TT_9)
# TT_5
dydt[5] = self.kSF*self.cslsq[4]*S1 - (self.k_SF*self.cslsq[4]+self.kDISS+self.kTTNR+self.kRELAX)*TT_5 + (1/9)*self.kTTA*T1*T1 + (1/8)*self.kRELAX*(TT_1+TT_2+TT_3+TT_4+TT_6+TT_7+TT_8+TT_9)
# TT_6
dydt[6] = self.kSF*self.cslsq[5]*S1 - (self.k_SF*self.cslsq[5]+self.kDISS+self.kTTNR+self.kRELAX)*TT_6 + (1/9)*self.kTTA*T1*T1 + (1/8)*self.kRELAX*(TT_1+TT_2+TT_3+TT_4+TT_5+TT_7+TT_8+TT_9)
# TT_7
dydt[7] = self.kSF*self.cslsq[6]*S1 - (self.k_SF*self.cslsq[6]+self.kDISS+self.kTTNR+self.kRELAX)*TT_7 + (1/9)*self.kTTA*T1*T1 + (1/8)*self.kRELAX*(TT_1+TT_2+TT_3+TT_4+TT_5+TT_6+TT_8+TT_9)
# TT_8
dydt[8] = self.kSF*self.cslsq[7]*S1 - (self.k_SF*self.cslsq[7]+self.kDISS+self.kTTNR+self.kRELAX)*TT_8 + (1/9)*self.kTTA*T1*T1 + (1/8)*self.kRELAX*(TT_1+TT_2+TT_3+TT_4+TT_5+TT_6+TT_7+TT_9)
# TT_9
dydt[9] = self.kSF*self.cslsq[8]*S1 - (self.k_SF*self.cslsq[8]+self.kDISS+self.kTTNR+self.kRELAX)*TT_9 + (1/9)*self.kTTA*T1*T1 + (1/8)*self.kRELAX*(TT_1+TT_2+TT_3+TT_4+TT_5+TT_6+TT_7+TT_8)
# T1
dydt[10] = 2.0*self.kDISS*(TT_1+TT_2+TT_3+TT_4+TT_5+TT_6+TT_7+TT_8+TT_9) - 2.0*self.kTTA*T1*T1 - self.kTNR*T1
#
return dydt
def _unpack_simulation(self, y):
self.S1 = y[:, 0]
self.TT_bright = self.cslsq[0]*y[:, 1] + self.cslsq[1]*y[:, 2] + self.cslsq[2]*y[:, 3] + self.cslsq[3]*y[:, 4] + self.cslsq[4]*y[:, 5] + self.cslsq[5]*y[:, 6] + self.cslsq[6]*y[:, 7] + self.cslsq[7]*y[:, 8] + self.cslsq[8]*y[:, 9]
self.TT_total = np.sum(y[:, 1:10], axis=1)
self.T1 = y[:, -1]
self._wrap_simulation_results()
return
def _wrap_simulation_results(self):
self.simulation_results = dict(zip(self.states, [self.S1, self.TT_bright, self.TT_total, self.T1]))
return
class Bardeen(TimeResolvedModel):
r"""
A class for steady-state simulations using a modified version of Merrifield's model.
The model does not include free triplets. Instead Merrifield's :math:`(TT)`
states can separate to form 9 :math:`(T..T)` states which can undergo spin
relaxation. This is an approximation to triplet-diffusion in the limit of
low excitation density.
Attributes
----------
states : list of str
The names of the excited state species.
rates : list of str
The names of the different rate constants in the model.
model_name : str
The name of the model.
G : float
The initial exciton density. Units of per volume.
initial_weighting : dict
Dictionary of (str, float) pairs. Key is the state name (str) and value is its initial weight (float). The default is {'S1': 1}.
t_step : float
The first time step taken by the simulation, thereafter the step will increase geometrically.
t_end : float
The last time point in the simulation.
num_points : int
The number of time points to compute the simulation at.
kSF : float
Rate constant for :math:`S_1\rightarrow (TT)`. Units of per time.
k_SF : float
Rate constant for :math:`(TT)\rightarrow S_1`. Units of per time.
kHOP : float
Rate constant for :math:`(TT)\rightarrow (T..T)`. Units of per time.
k_HOP : float
Rate constant for :math:`(T..T)\rightarrow (TT)`. Units of per time.
kRELAX : float
Rate constant for mixing between the :math:`(T..T)` states. Units of per time.
kSSA : float
Singlet-singlet annihilation rate constant. Units of volume per time.
kSNR : float
Rate constant for the decay of :math:`S_1`. Units of per time.
kTTNR : float
Rate constant for the decay of :math:`(TT)`. Units of per time.
kSPIN : float
Rate constant for the decay of :math:`(T..T)`. Units of per time.
cslsq : numpy.ndarray
1D array containing the overlap factors between the 9 :math:`(T..T)` states and the singlet.
simulation_results : dict
Produced by :meth:`simulate`. Keys are the excited-state names (str), values the simulated populations (numpy.ndarray).
"""
def __init__(self):
super().__init__()
# metadata
self.model_name = 'Bardeen'
self._number_of_states = 19
self.states = ['S1', 'TT_bright', 'TT_total', 'T_T_total']
self.rates = ['kSF', 'k_SF', 'kHOP', 'k_HOP', 'kRELAX', 'kSNR', 'kSSA', 'kTTNR', 'kSPIN']
self._allowed_initial_states = {'S1'}
self._initial_state_mapping = {'S1': 0}
# rates between excited states
self.kSF = 20.0
self.k_SF = 0.03
self.kHOP = 0.067
self.k_HOP = 2.5e-4
# spin relaxation
self.kRELAX = 0.033
# rates of decay
self.kSNR = 0.1
self.kSSA =0
self.kTTNR = 0.067
self.kSPIN = 2.5e-4
# cslsq values
self.cslsq = (1/9)*
|
np.ones(9)
|
numpy.ones
|
import numpy as np
class Layer():
def __init__(self, in_size, out_size, lam, name):
self.in_size = in_size
self.out_size = out_size
self.name = name
self.isActivation = (False if name is "linear" else True)
if not self.isActivation:
# Weights
self.W = np.random.normal(loc=0.0, scale=0.01, size=(out_size, in_size))
# Bias
self.b = np.random.normal(loc=0.0, scale=0.01, size=(out_size, 1))
# Weight regularization
self.lam = lam
self.mom = {
'W' : np.zeros(self.W.shape),
'b' : np.zeros(self.b.shape)
}
self.resetGrad()
# this is a memory variable between forward/backward
self.x = np.empty(shape=(self.in_size, 1))
def forward(self, x):
assert x is not None
# sometimes we need to store input for backward
self.x = x
#print(self.name + " forward")
#print(self.x.shape)
def backward(self):
assert self.x is not None
#print(self.name + " back")
#print(self.x.shape)
def cost(self):
return 0
def resetGrad(self):
self.gW = np.zeros(self.W.shape)
self.gB = np.zeros(self.b.shape)
# for non-activation layers to implement
def update(self, l_rate=0.001):
pass
def updateMom(self, l_rate=0.001, momentum=0.0):
pass
class Linear(Layer):
def __init__(self, in_size, out_size, lam=0, name="linear"):
super().__init__(in_size, out_size, lam, name)
def forward(self, x):
Layer.forward(self, x)
# Wx + b
return np.dot(self.W, x) + self.b
def backward(self, grad):
Layer.backward(self)
N = self.x.shape[1]
self.resetGrad()
for i in range(N):
p = self.x[:, i]
g = grad[i, :]
self.gW += np.outer(g, p)
self.gB += np.reshape(g, self.gB.shape)
# here's the difference in (10) and (11)
self.gW = (1.0/N) * self.gW + 2 * self.lam * self.W
self.gB /= N
return np.dot(grad, self.W)
def cost(self):
return self.lam * np.power(self.W, 2).sum()
def update(self, l_rate=0.001):
self.W -= l_rate * self.gW
self.b -= l_rate * self.gB
def updateMom(self, l_rate=0.001, momentum=0.0):
self.mom['W'] = momentum * self.mom['W'] + l_rate * self.gW
self.mom['b'] = momentum * self.mom['b'] + l_rate * self.gB
self.W -= self.mom['W']
self.b -= self.mom['b']
class ReLU(Layer):
def __init__(self, in_size, name="relu"):
super().__init__(in_size, in_size, -1, name)
def forward(self, x):
Layer.forward(self, x)
# max(0, x)
return self.x * (self.x > 0)
def backward(self, grad):
Layer.backward(self)
return np.multiply(grad, self.x.T > 0)
class Softmax(Layer):
def __init__(self, in_size, name="softmax"):
super().__init__(in_size, in_size, -1, name)
def forward(self, x):
assert x is not None
try:
# this should prevent error tried for
e = np.exp(x - x.max())
res = e / np.sum(e, axis=0)
except FloatingPointError:
# Gradient explosion scenario
print("jesus take the wheel")
res = np.ones(x)
Layer.forward(self, res)
return res
def backward(self, truth):
Layer.backward(self)
assert self.x.shape[1] == truth.shape[1]
N = truth.shape[1]
cols = ((truth[:,i], self.x[:,i]) for i in range(N))
grad = [self.softGrad(t, p) for (t, p) in cols]
return np.vstack(grad)
@staticmethod
def softGrad(t, p):
# Jacobian according for formulas in Ass1
a = np.outer(p,p)
b = np.dot(t, (np.diag(p) - a))
c = np.dot(t, p)
return -b/c
def cost(self, truth, prob=None):
x = self.x if prob is None else prob
assert x.shape[1] == truth.shape[1]
N = x.shape[1]
Py = np.multiply(truth, x).sum(axis=0)
Py[Py == 0] = np.finfo(float).eps # fix floats
return - np.log(Py).sum() / N
class BatchNorm(Layer):
# https://wiseodd.github.io/techblog/2016/07/04/batchnorm/
def __init__(self, in_size, mu=None, s=None, name="batch_norm"):
super().__init__(in_size, in_size, -1, name)
self.mu = mu if mu is not None else np.zeros(shape=(in_size, 1), dtype=float)
self.s = s if s is not None else np.eye(in_size, dtype=float)
def forward(self, x, train=False):
Layer.forward(self, x)
# if mu, s is passed: then it's eval time not training
self.mu = x.mean(axis=1) if train else self.mu
self.s = x.var(axis=1) if train else self.s
return
|
np.dot(self.s, (x.T - self.mu.T).T)
|
numpy.dot
|
import holoviews as hv
import hvplot
import hvplot.pandas
import hvplot.streamz
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import streamz
import umap
from bokeh.models import HoverTool
from holoviews import streams
from plotly.subplots import make_subplots
from sklearn.manifold.t_sne import TSNE
from sklearn.metrics import auc, classification_report, confusion_matrix, roc_curve
from sklearn.neighbors.classification import KNeighborsClassifier
from streamz.dataframe import DataFrame as StreamzDataFrame
from model_evaluation import plot_classification_report, plot_confussion_matrix
def define_visibles(x):
"""Funcion usada para definir qué traces se muestran en un grafico de plotly
Examples
--------
>>> define_visibles(x=[2,1,3])
[[True, True, False, False, False, False],
[False, False, True, False, False, False],
[False, False, False, True, True, True]]
Parameters
-------
x: list or np.array
Contiene el numero de clases/traces por cada dropdown menu. [1,1,1] significa 1 trace, 3 dropdown menus
Returns
-------
list
Lista de listas, contiene solo True or False.
"""
if isinstance(x, list):
x = np.array(x)
visible_trace = []
for i, a in enumerate(x):
visible_trace.append(
[False] * np.sum(x[0:i]) + [True] * x[i] + [False] * np.sum(x[i + 1 :])
)
return visible_trace
def time_vs_y(df, time_col, id_col_name, id_list, cols_descr, y_col="y", title=""):
"""PLot a time series dataset with bars when a certain column is equal to 1
Example
--------
time_vs_y(df = df_m2,
time_col = 'Timestamp',
id_col_name = 'DMA',
id_list =['NEWSEVMA','NORFIFMA'],
cols_descr = ['PressureBar','m3Volume'],
y_col = 'is_leakage',
title='Time series of leakage and pressure at DMA level')
Parameters
----------
df : pd.DataFrame
Dataframe that contains all the information
time_col : str
Name of column with time
id_col_name : str
column name to aggregate by. Effectively this sets the dropdown menu
id_list : list of str
List of ids to display
cols_descr : [type]
Selected columns to plot against time
y_col : str, optional
Column of dataset that contains 1 when a gray bar is plotted
title : str, optional
Title of plot, by default ""
Returns
-------
plotly.Figure
PLotly figure that contains dropdown menu for each id_list
"""
if len(cols_descr) == 0:
print("No selected columns")
return 0
fig = go.Figure()
buttons = []
visible_start = [True] + [False] * (len(id_list) - 1)
# Select which columns are visible for each button
visible_trace = define_visibles([len(cols_descr)] * len(id_list))
# Loop over the selected columns and create trace
for i, z in enumerate(id_list):
df_subset = df.loc[
df[id_col_name] == z,
]
# Generate figure and keep data and layout
for c in cols_descr:
fig.add_trace(
go.Scattergl(
x=df_subset[time_col],
y=df_subset[c],
name=f"{c}",
visible=visible_start[i],
)
)
# Print lines as shapes
shapes = list()
min_val = 0
for j in np.where(df_subset[y_col] == 1)[0]:
if j == 0:
continue
max_val = (
df_subset[cols_descr]
.iloc[
j - 1,
]
.max()
.max()
)
shapes.append(
{
"type": "line",
"xref": "x",
"yref": "y",
"x0": df_subset[time_col].iloc[j] - pd.Timedelta(1, "h"),
"y0": min_val,
"x1": df_subset[time_col].iloc[j],
"y1": max_val,
"fillcolor": "gray",
"type": "rect",
"opacity": 0.5,
"layer": "below",
"line_width": 0,
}
),
if visible_start[i] is True:
fig.update_layout(shapes=shapes)
# Crear botones
buttons.append(
dict(
label=id_list[i],
method="update",
args=[{"visible": visible_trace[i]}, {"shapes": shapes}],
)
)
# Añadir botones
fig.update_layout(
updatemenus=[
go.layout.Updatemenu(
direction="up",
showactive=True,
xanchor="center",
yanchor="bottom",
pad={"l": 150, "b": -390, "t": 0},
buttons=buttons,
)
]
)
fig.update_layout(width=1100, height=500, title=title)
return fig
def visualize_time_series(
df, range_dates_zoom=None, range_y_right=[1.5, 5], range_y_left=[-10, 45]
):
"""[summary]
Parameters
----------
df : [type]
[description]
range_dates_zoom : [type], optional
[description], by default None
Returns
-------
[type]
[description]
"""
# Create figure with secondary y-axis
fig = make_subplots(specs=[[{"secondary_y": True}]])
# Add traces
fig.add_trace(
go.Scattergl(
x=df["Timestamp"], y=df["m3Volume"], name="m3Volume", marker_color="#636EFA"
),
secondary_y=False,
)
fig.add_trace(
go.Scattergl(
x=df["Timestamp"],
y=df["PressureBar"],
name="PressureBar",
marker_color="#EF553B",
),
secondary_y=True,
)
fig.update_yaxes(range=range_y_right, secondary_y=True)
fig.update_yaxes(range=range_y_left, secondary_y=False)
# Print lines as shapes
shapes = list()
min_val =
|
np.min([range_y_right, range_y_left])
|
numpy.min
|
import numpy as np
import os
import glob
import nibabel as nib
from dipy.data import get_sphere
from dipy.core.sphere import Sphere, HemiSphere
from dipy.reconst.shm import sph_harm_lookup, smooth_pinv
from dipy.io import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from dipy.reconst.dti import TensorModel
from dipy.reconst.dti import fractional_anisotropy
import dipy.reconst.dti as dti
from dipy.segment.mask import median_otsu
import threading
class DataHandler(object):
def __init__(self, params, mode):
self.params = params
self.dwi_path = self.params.data
self.brain_mask_path = self.params.bm
self.wm_mask_path = self.params.wm
if mode == 'train':
self.labels_path = self.params.labels
else:
self.labels_path = None
self.dwi = None
self.bvals = None
self.bvecs = None
self.brain_mask = np.array([])
self.wm_mask = np.array([])
if self.dwi_path is not None:
self.load_dwi()
self.load_b_table()
if self.brain_mask_path is not None:
self.brain_mask = self.load_mask(self.brain_mask_path)
else:
self.brain_mask = self.get_bm()
if self.wm_mask_path is not None:
self.wm_mask = self.load_mask(self.wm_mask_path)
else:
self.wm_mask = self.get_wm_mask()
if self.labels_path is not None:
self.load_labels()
def load_dwi(self):
dwi_file = get_file_path(os.getcwd(), self.dwi_path, "*.nii*")
dwi_data = nib.load(dwi_file)
self.dwi = dwi_data.get_data().astype("float32")
self.affine = dwi_data.affine
self.load_b_table()
b0 = self.bvals <= 5
single_shell_bval = 1000
b_single = np.logical_and(self.bvals<=single_shell_bval+5, self.bvals>=single_shell_bval-5)
ind_0_single= np.logical_or(b0, b_single)
self.dwi = self.dwi[:,:,:,ind_0_single]
self.bvecs= self.bvecs[ind_0_single,:]
self.bvals= self.bvals[ind_0_single]
self.gtab = gradient_table(self.bvals, self.bvecs)
print(f'Number of single shell directions: {sum(b_single)}')
def load_labels(self):
data = nib.load(self.labels_path)
self.labels = data.get_data().astype("float32")
def load_b_table(self):
bval_file = get_file_path(os.getcwd(), self.dwi_path, "*.bvals")
bvec_file = get_file_path(os.getcwd(), self.dwi_path, "*.bvecs")
self.bvals, self.bvecs = read_bvals_bvecs(bval_file, bvec_file)
def get_wm_mask(self):
tenmodel = TensorModel(self.gtab)
tenfit = tenmodel.fit(self.dwi, mask=self.brain_mask)
FA = fractional_anisotropy(tenfit.evals)
MD = dti.mean_diffusivity(tenfit.evals)
wm_mask = (np.logical_or(FA >= 0.25, (np.logical_and(FA >= 0.12, MD >= 0.001))))
return wm_mask
def get_bm(self):
b0_mask, brain_mask = median_otsu(self.dwi, median_radius=1, numpass=2, vol_idx=[0,0])
return brain_mask
def resample_dwi(self, directions=200, sh_order=8, smooth=0.002):
print(f'resampling diffusion data on {directions} directions...')
# sphere = get_sphere('repulsion200')
Xp, Yp, Zp= distribute_on_hemisphere(directions)
sphere = Sphere(Xp, Yp, Zp)
# self.v = sphere.vertices
sph_harm_basis = sph_harm_lookup.get("tournier07")
# descoteaux07
Ba, m, n = sph_harm_basis(sh_order, sphere.theta, sphere.phi)
self.Ba = Ba
self.b0_idx = self.bvals == np.min(self.bvals)
self.b0 = self.dwi[:,:,:,self.b0_idx].mean(axis=3) + 1e-6
self.bvecs = self.bvecs[np.logical_not(self.b0_idx)]
raw_sphere = Sphere(xyz=self.bvecs)
Ba, m, n = sph_harm_basis(sh_order, raw_sphere.theta, raw_sphere.phi)
L = -n * (n + 1)
self.invB = smooth_pinv(Ba, np.sqrt(smooth) * L)
self.dwi = self.dwi[..., np.logical_not(self.b0_idx)]
nb_erroneous_voxels = np.sum(self.dwi > self.b0[..., None])
if nb_erroneous_voxels != 0:
self.dwi = np.minimum(self.dwi, self.b0[..., None])
self.dwi /= self.b0[..., None]
self.mean_val =
|
np.mean(self.dwi)
|
numpy.mean
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Feb-28-20 22:56
# @Update : Nov-23-20 20:57
# @Author : <NAME> (<EMAIL>)
# @Link : http://example.org
import os
import csv
import json
import numpy as np
import matplotlib.pyplot as plt
from utils import read_data
from utils import read_TP, read_TN
# data path
MODEL_SAVES_DIR = "./models-resnetv2/"
MODEL_FILE_NAME = "ResNet20v2.025-auc-0.9367.h5"
# constants
IMAGE_WIDTH = IMAGE_HEIGHT = 224
IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT)
BATCH_SIZE = 16
CURRENT_BEST = 0.98866
MY_BEST = 0.97156
def plot_scores(epoch_auc):
""" Load Config """
with open('config.json', 'r') as f:
CONFIG = json.load(f)
current_best = CONFIG["CURRENT_BEST"]
my_best = CONFIG["MY_BEST"]
""" Visualize Training """
plt.figure(figsize=(1000/100, 800/100))
plt.plot(epoch_auc[:, 0], epoch_auc[:, 1],
color='r', linestyle='-', marker='*', label="Training auc")
plt.plot(epoch_auc[:, 0], epoch_auc[:, 2],
color='b', linestyle='-', marker='o', label="Score")
# 随便选两个epoch坐标把best画出来
plt.scatter(45, current_best, label="Current best")
plt.scatter(40, my_best, label="My best")
# plt.set_xticks(np.arange(1, epochs, 1))
# plt.set_yticks(np.arange(0, 1, 0.1))
# 设置坐标轴范围
plt.xlim((20, 50))
plt.ylim((0.9, 1))
plt.xlabel("num_epoch")
plt.ylabel("train_auc")
# 设置坐标轴刻度
plt.xticks(np.arange(20, 50, 1))
plt.yticks(np.arange(0.9, 1, 0.01))
plt.grid(True)
legend = plt.legend(loc='best', shadow=True)
plt.tight_layout()
plt.show()
def plot_scores2():
""" Load Config """
with open('config.json', 'r') as f:
CONFIG = json.load(f)
current_best = CURRENT_BEST
my_best = MY_BEST
""" Read data """
data_acc = read_data(filepath="./ResNet20v2-epoch-auc.csv")
epochs_resnet20 = 45
epoch_val_accuracy = read_data(
filepath="./ResNet56v2-epoch-val_accuracy.csv")
epoch_auc = read_data(
filepath="./ResNet56v2-epoch-auc.csv")
for i in range(len(epoch_val_accuracy)):
epoch_val_accuracy[i, 0] += epochs_resnet20
for i in range(len(epoch_auc)):
epoch_auc[i, 0] += epochs_resnet20
""" Visualize Training """
plt.figure(figsize=(1200/100, 800/100))
plt.plot(data_acc[:, 0], data_acc[:, 1],
color='r', linestyle='-', marker='*', label="Training auc")
plt.plot(data_acc[:, 0], data_acc[:, 2],
color='b', linestyle='-', marker='o', label="Score")
# 随便选两个epoch坐标把best画出来
plt.scatter(45.5, my_best, color='g', label="My best")
plt.scatter(60, current_best, color='m', label="Current best")
plt.plot(epoch_val_accuracy[:, 0], epoch_val_accuracy[:, 1],
color='r', linestyle='-', marker='*', label="Training accuracy")
plt.plot(epoch_val_accuracy[:, 0], epoch_val_accuracy[:, 2],
color='b', linestyle='-', marker='o', label="Score")
plt.plot(epoch_auc[:, 0], epoch_auc[:, 1],
color='r', linestyle='-', marker='*', label="Training auc")
plt.plot(epoch_auc[:, 0], epoch_auc[:, 2],
color='b', linestyle='-', marker='o', label="Score")
# plt.set_xticks(np.arange(1, epochs, 1))
# plt.set_yticks(np.arange(0, 1, 0.1))
# 设置坐标轴范围
EPOCHS = 70
plt.xlim((20, EPOCHS))
plt.ylim((0.65, 1))
plt.xlabel("num_epoch")
plt.ylabel("train_auc/acc/score")
# 设置坐标轴刻度
plt.xticks(
|
np.arange(20, EPOCHS, 1)
|
numpy.arange
|
import logging
import numpy
import os
from multiprocessing import Process, Manager, Value, Semaphore
from random import random
from uuid import uuid4
import pysam
from Bio import pairwise2
from Bio.Seq import Seq
from blast_wrapper import get_blast_matched_ids, make_blast_database
from coverage_bias import CoverageBiasDetector, CoverageCorrector
from hmm_utils import *
from pacbio_haplotyper import PacBioHaplotyper
from pomegranate import HiddenMarkovModel as Model
from profiler import time_usage
from sam_utils import get_reference_genome_of_alignment_file
from sam_utils import get_related_reads_and_read_count_in_samfile
import settings
from utils import is_low_quality_read
class SelectedRead:
def __init__(self, sequence, logp, vpath, mapq=None, reference_start=None):
self.sequence = sequence
self.logp = logp
self.vpath = vpath
self.mapq = mapq
self.is_mapped = reference_start is not None
def is_mapped(self):
return self.is_mapped
class VNTRFinder:
"""Find the VNTR structure of a reference VNTR in NGS data of the donor."""
def __init__(self, reference_vntr):
self.reference_vntr = reference_vntr
self.min_repeat_bp_to_add_read = 2
if len(self.reference_vntr.pattern) < 30:
self.min_repeat_bp_to_add_read = 2
self.min_repeat_bp_to_count_repeats = 2
self.minimum_left_flanking_size = {}
self.minimum_right_flanking_size = {69212: 19, 532789: 12, 400825: 10, 468671: 10}
self.vntr_start = self.reference_vntr.start_point
self.vntr_end = self.vntr_start + self.reference_vntr.get_length()
@time_usage
def build_vntr_matcher_hmm(self, copies, flanking_region_size=100):
patterns = self.reference_vntr.get_repeat_segments()
left_flanking_region = self.reference_vntr.left_flanking_region[-flanking_region_size:]
right_flanking_region = self.reference_vntr.right_flanking_region[:flanking_region_size]
vntr_matcher = get_read_matcher_model(left_flanking_region, right_flanking_region, patterns, copies)
return vntr_matcher
def get_vntr_matcher_hmm(self, read_length):
"""Try to load trained HMM for this VNTR
If there was no trained HMM, it will build one and store it for later usage
"""
logging.info('Using read length %s' % read_length)
copies = int(round(float(read_length) / len(self.reference_vntr.pattern) + 0.5))
base_name = str(self.reference_vntr.id) + '_' + str(read_length) + '.json'
stored_hmm_file = settings.TRAINED_HMMS_DIR + base_name
if settings.USE_TRAINED_HMMS and os.path.isfile(stored_hmm_file):
model = Model()
model = model.from_json(stored_hmm_file)
return model
flanking_region_size = read_length
vntr_matcher = self.build_vntr_matcher_hmm(copies, flanking_region_size)
json_str = vntr_matcher.to_json()
with open(stored_hmm_file, 'w') as outfile:
outfile.write(json_str)
return vntr_matcher
@time_usage
def filter_reads_with_keyword_matching(self, working_directory, read_file, short_reads=True):
db_name = 'blast_db__' + os.path.basename(read_file)
blast_db_name = working_directory + db_name
empty_db = False
if not os.path.exists(blast_db_name + '.nsq') and not os.path.exists(blast_db_name + '.nal'):
empty_db = make_blast_database(read_file, blast_db_name)
word_size = int(len(self.reference_vntr.pattern)/3)
if word_size > 11:
word_size = 11
if word_size < 5:
word_size = 5
word_size = str(word_size)
search_results = []
blast_ids = set([])
search_id = str(uuid4()) + str(self.reference_vntr.id)
queries = self.reference_vntr.get_repeat_segments()
if len(self.reference_vntr.pattern) < 10:
min_copies = int(10 / len(self.reference_vntr.pattern))
queries = [self.reference_vntr.pattern * min_copies]
identity_cutoff = '0'
if not short_reads:
queries = [self.reference_vntr.left_flanking_region[-80:], self.reference_vntr.right_flanking_region[:80]]
word_size = str('10')
identity_cutoff = '70'
if not empty_db:
for query in queries:
search_result = get_blast_matched_ids(query, blast_db_name, max_seq='50000', word_size=word_size,
evalue=10, search_id=search_id, identity_cutoff=identity_cutoff)
search_results.append(search_result)
if short_reads:
for search_result in search_results:
blast_ids |= search_result
else:
blast_ids = search_results[0] & search_results[1]
logging.info('blast selected %s reads for %s' % (len(blast_ids), self.reference_vntr.id))
if len(blast_ids) == len(self.reference_vntr.get_repeat_segments()) * 50 * 1000:
logging.error('maximum number of read selected in filtering for pattern %s' % self.reference_vntr.id)
return blast_ids
@staticmethod
def add_hmm_score_to_list(sema, hmm, read, result_scores):
logp, vpath = hmm.viterbi(str(read.seq))
rev_logp, rev_vpath = hmm.viterbi(str(Seq(str(read.seq)).reverse_complement()))
if logp < rev_logp:
logp = rev_logp
result_scores.append(logp)
sema.release()
def is_true_read(self, read):
read_start = read.reference_start
reference_name = read.reference_name
if not reference_name.startswith('chr'):
reference_name = 'chr' + reference_name
if reference_name == self.reference_vntr.chromosome and self.vntr_start - len(read.seq) < read_start < self.vntr_end:
return True
return False
def find_score_distribution_of_ref(self, samfile, reference, hmm, false_scores, true_scores):
process_list = []
sema = Semaphore(settings.CORES)
for read in samfile.fetch(reference, multiple_iterators=True):
if read.is_unmapped:
continue
if read.seq.count('N') > 0:
continue
if self.is_true_read(read):
sema.acquire()
p = Process(target=VNTRFinder.add_hmm_score_to_list, args=(sema, hmm, read, true_scores))
else:
if random() > settings.SCORE_FINDING_READS_FRACTION:
continue
sema.acquire()
p = Process(target=VNTRFinder.add_hmm_score_to_list, args=(sema, hmm, read, false_scores))
process_list.append(p)
p.start()
for p in process_list:
p.join()
def save_scores(self, true_scores, false_scores, alignment_file):
with open('true_scores_dist_%s_%s' % (self.reference_vntr.id, os.path.basename(alignment_file)), 'w') as out:
for score in true_scores:
out.write('%.4f\n' % score)
with open('false_scores_dist_%s_%s' % (self.reference_vntr.id, os.path.basename(alignment_file)), 'w') as out:
for score in false_scores:
out.write('%.4f\n' % score)
@time_usage
def calculate_min_score_to_select_a_read(self, hmm, alignment_file):
"""Calculate the score distribution of false positive reads
and return score to select the 1e-8 percentile of the distribution
"""
process_list = []
manager = Manager()
false_scores = manager.list()
true_scores = manager.list()
read_mode = 'r' if alignment_file.endswith('sam') else 'rb'
samfile = pysam.AlignmentFile(alignment_file, read_mode)
refs = [ref for ref in samfile.references if ref in settings.CHROMOSOMES or 'chr' + ref in settings.CHROMOSOMES]
for ref in refs:
p = Process(target=self.find_score_distribution_of_ref, args=(samfile, ref, hmm, false_scores, true_scores))
process_list.append(p)
p.start()
for p in process_list:
p.join()
if settings.SAVE_SCORE_DISTRIBUTION:
self.save_scores(true_scores, false_scores, alignment_file)
score =
|
numpy.percentile(false_scores, 100 - settings.SCORE_SELECTION_PERCENTILE)
|
numpy.percentile
|
import json
import datetime
from typing import List, Tuple
from collections.abc import Mapping
from pytz import timezone
import numpy as np
from scipy import interpolate
from scipy.signal import (butter,
sosfilt, sosfiltfilt, lfilter,
iirnotch, filtfilt, medfilt)
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
from yacs.config import CfgNode
# ======================================================== General utils ========================================================
def count_section_sample(list_of_section):
count = 0
for section in list_of_section:
count+= section[1]-section[0] +1
return count
def round_or_none(number, r):
if number != None:
return round(number, r)
return number
def format_timedelta(td):
minutes, seconds = divmod(td.seconds + td.days * 86400, 60)
hours, minutes = divmod(minutes, 60)
return '{:d}:{:02d}:{:02d}'.format(hours, minutes, seconds)
def sec_to_time_format(sec):
conversion = datetime.timedelta(seconds=sec)
return format_timedelta(conversion)
def update_parameters(parameters_dict, cfg_new):
for key, value in parameters_dict.items():
if isinstance(value, Mapping) and value:
update_parameters(parameters_dict.get(key, value), cfg_new[key])
elif value is not None:
cfg_new[key] = parameters_dict[key]
return cfg_new
def convert_to_dict(cfg_node, key_list=[]):
_VALID_TYPES = {tuple, list, str, int, float, bool}
""" Convert a config node to dictionary """
if not isinstance(cfg_node, CfgNode):
return cfg_node
else:
cfg_dict = dict(cfg_node)
for k, v in cfg_dict.items():
cfg_dict[k] = convert_to_dict(v, key_list + [k])
return cfg_dict
def json_to_dict(path):
with open(path, "r") as fp:
dict_data = json.load(fp)
return dict_data
def sec_to_sample(sec, fs, to_int = True):
if to_int:
return int(sec*fs)
return sec*fs
def msec_to_sample(msec, fs, to_int = True):
if to_int:
return int(msec*fs/1000)
return msec*fs/1000
def msec_to_hr(msec):
return 60/(msec/1000)
def min_to_sample(min, fs):
return msec_to_sample(min*60*1000, fs)
def sample_to_msec(sample, fs):
return sample*1000/fs
def sample_to_sec(sample, fs):
return sample/fs
def sample_to_hr(sample, fs):
return int(60/sample_to_sec(sample, fs))
def sample_to_minute(sample, fs):
return sample/fs/60
def minute_to_msec(minute):
return minute*60*1000
def timestamp_sec_to_datetime(ts, mode = 'bkk', return_object = True):
if mode == 'utc':
mode = timezone('UTC')
elif mode == "gmt":
mode = timezone('GMT')
elif mode == "bkk":
mode = timezone('Asia/Bangkok')
else:
mode = timezone('GMT')
dt_object = datetime.datetime.fromtimestamp(ts, mode)
if return_object:
return dt_object
return dt_object.strftime("%d/%m/%Y %H:%M:%S")
def timestamp_msec_to_datetime(ts, mode = 'bkk', return_object=True):
return timestamp_sec_to_datetime(ts/1000, mode = mode, return_object = return_object)
def list_of_list_to_list(list_of_list):
return [element for mylist in list_of_list for element in mylist]
# ======================================================== signal utils ============================================
# signals mean square
def signal_mean_square(signals, fs, windows = 20):
windows = int(windows*fs/1000)
return ma(signals*signals, windows)
def signal_mean_absolute(signals, fs, windows = 20):
windows = int(windows*fs/1000)
return ma(np.abs(signals), windows)
def signal_second_derivative(signals):
second_dif_signals = np.zeros_like(signals)
second_dif_signals[1:-1] = np.diff(np.diff(signals))
return second_dif_signals
def butter_highpass_parameter(highpass, fs,output ='sos',order = 4):
high = highpass / (0.5 * fs)
if output == 'sos':
sos = butter(order, high, btype='high', analog=False, output=output)
return sos
elif output == 'ba':
b, a = butter(order, high, btype='high', analog=False, output=output)
return b, a
def butter_highpass(signal, highpass, fs, order = 4, output = 'sos', algo = 'filtfilt'):
if output == 'sos':
sos = butter_highpass_parameter(highpass, fs, output=output, order = order)
if algo == 'filtfilt':
return sosfiltfilt(sos, signal)
elif algo == 'filt':
return sosfilt(sos, signal)
if output == 'ba':
b, a = butter_highpass_parameter(highpass, fs, output=output, order = order)
if algo == 'filtfilt':
return filtfilt(b, a, signal)
elif algo == 'filt':
return lfilter(b,a, signal)
raise ValueError("Filter algorithm not support")
def butter_lowpass_parameter(lowpass, fs,output ='sos',order = 4):
low = lowpass / (0.5 * fs)
if output == 'sos':
sos = butter(order, low, btype='low', analog=False, output=output)
return sos
elif output == 'ba':
b, a = butter(order, low, btype='low', analog=False, output=output)
return b, a
def butter_lowpass(signal, lowpass, fs, order = 4, output = 'sos', algo = 'filtfilt'):
if output == 'sos':
sos = butter_lowpass_parameter(lowpass, fs, output=output, order = order)
if algo == 'filtfilt':
return sosfiltfilt(sos, signal)
elif algo == 'filt':
return sosfilt(sos, signal)
if output == 'ba':
b, a = butter_lowpass_parameter(lowpass, fs, output=output, order = order)
if algo == 'filtfilt':
return filtfilt(b, a, signal)
elif algo == 'filt':
return lfilter(b,a, signal)
raise ValueError("Filter algorithm not support")
def notch_filter_parameter(notch_fs, q_factor, fs):
b, a = iirnotch(notch_fs, q_factor, fs)
return b, a
def notch_filter(signal, notch_fs, q_factor, fs):
b, a = notch_filter_parameter(notch_fs, q_factor, fs)
return lfilter(b, a, signal)
def butter_bandpass(signal, lowpass, highpass, fs, order = 4, output = 'sos', algo = 'filtfilt'):
if output == 'sos':
sos = butter(order, [lowpass, highpass], btype='band', analog=False, output=output, fs = fs)
if algo == 'filtfilt':
return sosfiltfilt(sos, signal)
elif algo == 'filt':
return sosfilt(sos, signal)
elif output == 'ba':
b,a = butter(order, [lowpass, highpass], btype='band', analog=False, output=output, fs = fs)
if algo == 'filtfilt':
return filtfilt(b, a, signal)
elif algo == 'filt':
return lfilter(b,a, signal)
raise ValueError("Filter algorithm not support")
def ma(signal, sample, algo = 'cumsum'):
if algo == 'convolve':
return ma_convolve(signal, sample)
elif algo == 'cumsum':
return ma_cumsum(signal, sample)
raise ValueError(f'{algo} not support. Support algo: "convolve", "cumsum"')
def ma_cumsum(signal, sample):
back = int(sample/2)
signal = np.pad(signal, (0, back) ,'edge')
signal =
|
np.cumsum(signal, dtype=float)
|
numpy.cumsum
|
import properties
import numpy as np
from geoana.em.base import BaseMagneticDipole
from geoana.em.fdem.base import BaseFDEM, sigma_hat
from scipy.constants import mu_0, epsilon_0
from empymod.utils import check_hankel
from empymod.transform import get_dlf_points
from geoana.kernels.tranverse_electric_reflections import rTE_forward
class MagneticDipoleLayeredHalfSpace(BaseMagneticDipole, BaseFDEM):
thickness = properties.Array(
"Layer thicknesses (m) starting from the top-most layer. The bottom layer is assumed to be infinite.",
shape=('*', ),
dtype=float
)
sigma = properties.Array(
"Electrical conductivity (S/m), defined starting from the top most layer",
shape=('*', ),
dtype=complex,
coerce=True
)
frequency = properties.Array(
"Source frequency (Hz)",
shape=('*', ),
dtype=float
)
mu = properties.Array(
"Magnetic permeability (H/m), defined starting from the top most layer",
shape=('*', ),
dtype=complex,
default=np.array([mu_0], dtype=np.complex128)
)
epsilon = properties.Array(
"Permitivity value (F/m), defined starting from the top most layer",
shape=('*', ),
dtype=float,
default=np.array([epsilon_0], dtype=np.float64)
)
def _get_valid_properties(self):
thick = self.thickness
n_layer = len(thick)+1
sigma = self.sigma
epsilon = self.epsilon
mu = self.mu
if n_layer != 1:
sigma = self.sigma
if len(sigma) == 1:
sigma = np.ones(n_layer)*sigma
epsilon = self.epsilon
if len(epsilon) == 1:
epsilon = np.ones(n_layer)*epsilon
mu = self.mu
if len(mu) == 1:
mu = np.ones(n_layer)*mu
return thick, sigma, epsilon, mu
@property
def sigma_hat(self):
_, sigma, epsilon, _ = self._get_valid_properties()
return sigma_hat(
self.frequency[:, None], sigma, epsilon,
quasistatic=self.quasistatic
).T
@property
def wavenumber(self):
raise NotImplementedError()
@property
def skin_depth(self):
raise NotImplementedError()
def magnetic_field(self, xyz, field="secondary"):
"""
Magnetic field due to a magnetic dipole in a layered halfspace at a specific height z
Parameters
----------
xyz : numpy.ndarray
receiver locations of shape (n_locations, 3).
The z component cannot be below the surface (z=0.0).
field : ("secondary", "total")
Flag for the type of field to return.
"""
if
|
np.any(xyz[:, 2] < 0.0)
|
numpy.any
|
import argparse
import json
import os
import glob
import numpy as np
import pandas as pd
import tqdm
# This function maps the statistics outputted by the hospital model
# to the statistics of the true data
def compute_true_summary_statistics(csv_df, expected_columns):
new_dict = {}
for column in expected_columns:
if column == 'timestep' or column == 'date':
continue
elif column == 'n_occupied_beds':
new_dict[column] = csv_df['n_InGeneralWard'] + csv_df['n_OffVentInICU'] + csv_df['n_OnVentInICU']
elif column == 'n_InICU':
new_dict[column] = csv_df['n_OffVentInICU'] + csv_df['n_OnVentInICU']
else:
new_dict[column] = csv_df[column]
return pd.DataFrame(new_dict)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', default='results/US/MA-20201111-20210111-20210211')
parser.add_argument('--output_dir', default='results/US/MA-20201111-20210111-20210211')
parser.add_argument('--config_path', default='results/US/MA-20201111-20210111-20210211/config_after_abc.json')
parser.add_argument('--output_template', default='metrics_after_abc')
parser.add_argument('--true_stats', default='datasets/US/MA-20201111-20210111-20210211/daily_counts.csv')
parser.add_argument('--input_summaries_template', default='summary_after_abc_')
parser.add_argument('--coverages',
type=str,
default='2.5_97.5,10_90,25_75')
parser.add_argument('--comma_sep_expected_columns',
default='n_InGeneralWard,n_OffVentInICU,n_OnVentInICU,n_InICU,n_occupied_beds,n_TERMINAL,n_TERMINAL_5daysSmoothed')
args = parser.parse_args()
with open(args.config_path, 'r') as f:
config = json.load(f)
num_training_timesteps = config['num_training_timesteps']
if args.comma_sep_expected_columns == 'None':
expected_columns = None
else:
expected_columns = args.comma_sep_expected_columns.split(',')
true_df = pd.read_csv(args.true_stats)
true_df = true_df[true_df['timestep'] > num_training_timesteps]
# drop columns from true_df that are not among the expected columns
original_columns = true_df.columns
for col in original_columns:
if col not in expected_columns:
true_df = true_df.drop(col, axis=1)
true_counts = true_df.values
print("------------------------------------------------------")
print("Computing MAE for mean")
print("------------------------------------------------------")
mean_df = pd.read_csv(os.path.join(args.input_dir, "%smean.csv" % (args.input_summaries_template)))
mean_df = mean_df[mean_df['timestep'] > num_training_timesteps]
if 'n_TERMINAL_5daysSmoothed' in expected_columns:
mean_df['n_TERMINAL_5daysSmoothed'] = np.copy(mean_df['n_TERMINAL'])
mean_df = compute_true_summary_statistics(mean_df, expected_columns)
original_columns = mean_df.columns
for col in original_columns:
if col not in expected_columns:
mean_df = mean_df.drop(col, axis=1)
mean_counts = mean_df.values
mae_scores = np.mean(np.abs(mean_counts - true_counts), axis=0).reshape((1, len(expected_columns)))
max_scores = np.max(np.abs(mean_counts - true_counts), axis=0).reshape((1, len(expected_columns)))
mean_true_counts = np.mean(true_counts, axis=0).reshape((1, len(expected_columns)))
scores = np.vstack([mae_scores, max_scores, mean_true_counts])
rows = np.array(['Mean Absolute Error', 'Maximum Absolute Error', 'Mean of True Counts']).reshape((3, 1))
df = pd.DataFrame(np.hstack([rows, scores]), columns=['Metric']+expected_columns)
df.to_csv(
os.path.join(args.output_dir, "mae_scores_%s.csv" % (args.output_template)),
index=False, float_format='%.2f')
print("------------------------------------------------------")
print("Computing coverage for given ranges")
print("------------------------------------------------------")
T = true_counts.shape[0]
coverages = args.coverages.split(',')
results_dict = {'Coverages (%)': list(map(lambda x: '%d' % (float(x.split('_')[1]) - float(x.split('_')[0])), coverages))}
for coverage in coverages:
low, high = list(map(float, coverage.split('_')))
low_df = pd.read_csv(os.path.join(args.input_dir, "%spercentile=%06.2f.csv" % (args.input_summaries_template, low)))
low_df = low_df[low_df['timestep'] > num_training_timesteps]
if 'n_TERMINAL_5daysSmoothed' in expected_columns:
low_df['n_TERMINAL_5daysSmoothed'] = np.copy(low_df['n_TERMINAL'])
low_df = compute_true_summary_statistics(low_df, expected_columns)
original_columns = low_df.columns
for col in original_columns:
if col not in expected_columns:
low_df = low_df.drop(col, axis=1)
high_df = pd.read_csv(os.path.join(args.input_dir, "%spercentile=%06.2f.csv" % (args.input_summaries_template, high)))
high_df = high_df[high_df['timestep'] > num_training_timesteps]
if 'n_TERMINAL_5daysSmoothed' in expected_columns:
high_df['n_TERMINAL_5daysSmoothed'] = np.copy(high_df['n_TERMINAL'])
high_df = compute_true_summary_statistics(high_df, expected_columns)
original_columns = high_df.columns
for col in original_columns:
if col not in expected_columns:
high_df = high_df.drop(col, axis=1)
low_counts = low_df.values
high_counts = high_df.values
is_in_low = true_counts > low_counts
is_in_high = true_counts < high_counts
is_in_range = np.logical_and(is_in_low, is_in_high)
counts_in_range =
|
np.sum(is_in_range, axis=0)
|
numpy.sum
|
import numpy
from skimage.external.tifffile import TiffFile, TiffWriter
import skimage.io
import skimage.morphology
import skimage.transform
import random
import json
import tensorflow
from tensorflow import nn
import tensorflow.keras as keras
from tensorflow.keras.utils import Sequence
import re
import pathlib
import math
import unetsl
import os.path
import scipy.ndimage
from matplotlib import pyplot
from matplotlib.widgets import Slider
"""
Data keys
"""
SOURCE_TYPE = "source type"
PAIRED_DIRECTORY = "paired directories"
PAIRED_FILES = "paired files"
WEIGHTED_DIRECTORY = "weighted directories"
LABELS_TO_CATEGORY = "labels to category"
INPUT_FOLDERS = "input folders"
LABEL_FOLDERS = "label folders"
WEIGHTS_FOLDERS = "weights folders"
TRAINING_IMAGES = "training images"
LABEL_IMAGES = "label images"
ROTATIONS = "rotations"
CROP = "crop"
LABELLER = "labeller"
REGION_LABELLER = "region labels"
MULTICLASS_LABELS = "multiclass labels"
CATEGORICAL_LABELS = "categorical labels"
DOUBLE_MEMBRANE_LABELS = "double membrane labels"
LINEAR_LABELS = "linear labels"
"""
Data Functions
"""
class DataSource:
def __init__(self):
pass
def getDataGenerators(self, stride, batch_size):
pass
def updateGeometry(self, *args):
pass
def split(self, *args):
return self, None
def steps(self, *args):
return 0
class RequiredArgument(Exception):
pass
class IndexedVolumeData:
"""
Indexed volume pairs two volumes, images together and indexes over them
returning each chunk. Usage
ivd = Ivd(volume, labels, n_labels, patches, stride, labeller)
ivd.generateIndexes()
dg = ivd.getDataGenerator()
The patches, stride, and indexes are optional, created for splitting
the indexed volume.
volume: an image with 5 dimensions. [channel, z, y, x]
labels: image with the same zyx dimensions [n_labels, z, y, x].
n_labels: number of output channels.
patches: size of input
stride: distance between
"""
def __init__(self, volume, labels, n_labels=1, patches=(1,1,1,1), stride = None, indexes=None, labeller=None, normalize_samples=None, padding=None):
if padding is not None:
raise Exception("padding is not an accepted argument")
else:
self.padding = [0, 10, 10]
if normalize_samples is None:
raise RequiredArgument("normalize_samples is a required argument")
if stride is None:
stride = patches[:]
if volume.shape[-3:] != labels.shape[-3:]:
raise Exception("label and sample data differ in x,y,z dimensions %s != %s"%(volume.shape[-3:], labels.shape[-3:]))
self.volume = volume
self.labels = labels
self.n_labels = n_labels
self.patches = patches
self.stride = stride
self.indexes = indexes
self.labeller=labeller
self.normalize_samples=normalize_samples
def updateGeometry(self, n_labels, patches, stride=None, padding=None):
if stride is None:
stride = patches[:]
if self.volume.shape[0] != patches[0]:
self.volume = splitIntoChannels(patches, self.volume)
self.n_labels = n_labels
self.patches = patches
self.stride = stride
def generateIndexes(self):
self.indexes=indexVolume(self.volume, self.patches, self.stride, self.padding)
def setIndexes(self, indexes):
self.indexes=indexes
def getDataGenerator(self, batch_size=1):
if(self.indexes==None):
self.generateIndexes()
if self.padding is None:
return getDataGenerator(self.volume, self.labels, self.n_labels, self.indexes, self.patches, batch_size=batch_size, labeller=self.labeller, normalize_samples = self.normalize_samples)
else:
return getPaddedDataGenerator(self.volume, self.labels, self.n_labels, self.indexes, self.patches, batch_size=batch_size, labeller=self.labeller, normalize_samples = self.normalize_samples, padding=self.padding)
def size(self):
return len(self.indexes)
def steps(self, batch_size):
n = len(self.indexes)
batches = n//batch_size
if n == batches*batch_size:
return batches
else:
return batches + 1
def split(self, f, shuffle=True):
if(shuffle):
random.shuffle(self.indexes);
s1 = int(f*len(self.indexes))
return (
IndexedVolumeData(self.volume, self.labels, self.n_labels, self.patches, self.stride, self.indexes[:s1], self.labeller, self.normalize_samples),
IndexedVolumeData(self.volume, self.labels, self.n_labels, self.patches, self.stride, self.indexes[s1:], self.labeller, self.normalize_samples)
)
def __str__(self):
l = 0
if self.indexes:
l = len(self.indexes)
return "%s shape: %s n_labels: %s indexes %s normalize: %s"%(
self.__class__, self.volume.shape, self.n_labels,
l, self.normalize_samples)
class TimeSeriesDataGenerator:
"""
Outdated broken until further notice.
"""
def __init__(self, file_list, patch_size, out_patch, channels, batch, crop, stride):
self.n = -1
self.file_list = file_list
self.patch_size = patch_size
self.out_patch = out_patch
self.channels = channels
self.batch = batch
self.crop = crop
self.n_labels = out_patch[0]
self.stride = stride
self.loadFirstStack()
def getCount(self):
return self.n//self.batch
def getGenerator(self):
while True:
loaded = list(self.loaded)
self.loaded = []
for i in range(len(self.file_list)):
#process
genx = []
for j in range(self.channels):
genx.append(loaded[j].getDataGenerator())
steps = loaded[self.before].steps(self.batch)
for j in range(steps):
xbatch = []
ybatch = []
for k in range(self.batch):
"""
x.shape 1, 1, z, y, x
y.shape 1, 2, z, y, x
normally the batch would be the first index, but since
the 'time' is being stored as a channel that are
accumulated
"""
xs = []
ys = []
for c in range(self.channels):
x, y = genx[c].__next__()
"""
we only segment 1 image. The other two are used at
different times.
"""
xs.append(x[0,0])
if c == self.before:
#before is time_points//2 so the middle index.
ys = y[0]
xbatch.append(xs)
ybatch.append(ys)
yield numpy.array(xbatch), numpy.array(ybatch)
#shift
for j in range(len(loaded)-1):
loaded[j] = loaded[j+1]
dex = i + self.after + 1
if dex < len(self.file_list):
img, _ = loadImage(self.file_list[dex][0], self.crop)
skel, _ = loadImage(self.file_list[dex][1], self.crop)
next_stack = IndexedVolumeData(img, skel, self.n_labels, self.patch_size, out_patches=self.out_patch, stride=self.stride, normalize_samples=self.normalize_samples)
loaded[-1] = next_stack
self.loadFirstStack()
def loadFirstStack(self):
"""
loads the first stack of images.
"""
self.loaded = []
self.before = self.channels//2
self.after = self.channels//2
img, _ = loadImage(self.file_list[0][0], self.crop)
skel, _ = loadImage(self.file_list[0][1], self.crop)
current = IndexedVolumeData(img, skel, self.n_labels, self.patch_size, out_patches=self.out_patch, stride = self.stride, normalize_samples=self.normalize_samples)
current.generateIndexes()
self.n = len(self.file_list)*current.steps(1)
for i in range(self.before):
self.loaded.append(current)
self.loaded.append(current)
for i in range(self.after):
img, _ = loadImage(self.file_list[1 + i][0], self.crop)
skel, _ = loadImage(self.file_list[1 + i][1], self.crop)
next_stack = IndexedVolumeData(img, skel, self.n_labels, self.patch_size, out_patches=self.out_patch, stride = self.stride, normalize_samples=self.normalize_samples)
self.loaded.append(next_stack)
class RotatedIndexedVolumeData(IndexedVolumeData):
def __init__(self, volume, labels, angle, n_labels=1, patches=(1, 1, 1, 1), stride = None, indexes=None, labeller=None, normalize_samples=None):
"""
volume: full image data that will be indexed over
labels: label volume data that will be labelled
angle: rotation angle in radians
n_labels: number of labels
patches: size of volumes to be sampled
stride: stride to be used for generating indexes.
indexes: if the indexes were previously generated.
"""
self.angle = angle
IndexedVolumeData.__init__(self, volume, labels, n_labels, patches, stride, indexes, labeller, normalize_samples)
if indexes:
#if indexes were already generated can only assume angle/patches are correct.
self.rotated_patch_size = getCropStride(self.patches, self.angle);
def generateIndexes(self):
self.rotated_patch_size = getCropStride(self.patches, self.angle)
self.indexes=indexVolume(self.volume, self.rotated_patch_size, self.stride, self.padding)
def getDataGenerator(self, batch_size=1):
if(self.indexes==None):
self.generateIndexes()
large_patch_generator = getDataGenerator(self.volume, self.labels, self.n_labels, self.indexes, self.rotated_patch_size, batch_size=batch_size, labeller = self.labeller, normalize_samples = self.normalize_samples)
offset = [(np - p)//2 for np, p in zip(self.rotated_patch_size, self.patches)]
angle_deg = self.angle*180/math.pi
for x_batch, y_batch in large_patch_generator:
for sample_czyx in x_batch:
for channel_zyx in sample_czyx:
for slice_yx in channel_zyx:
slice_yx[:, :] = skimage.transform.rotate(slice_yx, angle_deg, preserve_range=True)
for sample_czyx in y_batch:
for channel_zyx in sample_czyx:
for slice_yx in channel_zyx:
slice_yx[:, :] = skimage.transform.rotate(slice_yx, angle_deg, preserve_range=True, order=0)
#slice_yx[:, :] = rotate2DByPixels(slice_yx, angle_deg)
x_batch = x_batch[:,
offset[0]:offset[0] + self.patches[0],
offset[1]:offset[1] + self.patches[1],
offset[2]:offset[2] + self.patches[2],
offset[3]:offset[3] + self.patches[3]
]
y_batch =y_batch[:,
offset[0]:offset[0] + self.n_labels,
offset[1]:offset[1] + self.patches[1],
offset[2]:offset[2] + self.patches[2],
offset[3]:offset[3] + self.patches[3]
]
yield x_batch, y_batch
def split(self, f):
random.shuffle(self.indexes);
s1 = int(f*len(self.indexes))
return (
RotatedIndexedVolumeData(self.volume, self.labels, self.angle, self.n_labels, patches = self.patches, stride = self.stride, indexes = self.indexes[:s1], labeller = self.labeller, normalize_samples=self.normalize_samples),
RotatedIndexedVolumeData(self.volume, self.labels, self.angle, self.n_labels, patches = self.patches, stride = self.stride, indexes = self.indexes[s1:], labeller = self.labeller, normalize_samples=self.normalize_samples)
)
class WeightedIndexedVolumeData(IndexedVolumeData):
def __init__(self, volume, labels, weights, n_labels=1, patches=(1,1,1), stride = None, indexes=None, labeller=None, normalize_samples = None):
IndexedVolumeData.__init__(self, volume, labels, n_labels, patches, stride, indexes, labeller, normalize_samples)
self.weights = weights
def split(self, f):
random.shuffle(self.indexes);
s1 = int(f*len(self.indexes))
return (
WeightedIndexedVolumeData(self.volume, self.labels, self.weights, self.n_labels, patches = self.patches, stride = self.stride, indexes = self.indexes[:s1], labeller=self.labeller, normalize_samples=self.normalize_samples),
WeightedIndexedVolumeData(self.volume, self.labels, self.weights, self.n_labels, patches = self.patches, stride = self.stride, indexes = self.indexes[s1:], labeller=self.labeller, normalize_samples=self.normalize_samples)
)
def getDataGenerator(self, batch_size=1):
if(self.indexes==None):
self.generateIndexes()
return getWeightedDataGenerator(self.volume, self.labels, self.weights, self.n_labels, self.indexes, self.patches, batch_size=batch_size, labeller = self.labeller, normalize_samples = self.normalize_samples)
class InfiniteGenerator:
def __init__(self, repeatingGenerators, randomize=True):
"""
repeatingGenerators needs to be a list of tuples. [ ( n, gen), ...]
n is the number of steps before repeating for a generator.
gen is the generator
"""
self.generators = repeatingGenerators
self.batches = sum(c[0] for c in repeatingGenerators)
gen_steps = [c[0] for c in repeatingGenerators]
single_indexes = [i for i in range(len(gen_steps))]
self.indexes = numpy.repeat(single_indexes, gen_steps)
if randomize:
numpy.random.shuffle(self.indexes)
self.index = 0
def __iter__(self):
return self
def getNBatches(self):
return self.batches
def generator(self):
raise Exception("what are you doing!?")
while True:
#generatorLog("%d Top of the list"%tally)
index = 0;
for steps, generator in self.generators:
for i in range(steps):
yield generator.__next__()
index += 1
def __next__(self):
if self.index==self.batches:
self.index = 0
dex = self.indexes[self.index]
self.index += 1
return self.generators[dex][1].__next__()
def get_dims(n_chan):
mx = int(math.sqrt(n_chan))
factors = []
for i in range(1, mx+1):
if n_chan%i==0:
factors.append((n_chan/i, i))
factors.sort()
return factors[-1]
class VolumeViewer:
def __init__(self, figure_no, data, limits=None):
"""
figure: int representing which matplotlib figure this should be
data: (channel, z, y, x ) data.
"""
self.figure_no = figure_no
self.channels=len(data)
self.n_slices = len(data[0])
self.slice = self.n_slices//2
self.plots = []
self.data=data
limits = None
self.initializeDisplay(limits)
def initializeDisplay(self, limits):
self.figure = pyplot.figure(self.figure_no)
m,n = get_dims(self.channels)
for c in range(self.channels):
self.figure.add_subplot(m, n, (c+1) )
slc = self.data[c, self.slice]
mx = numpy.max(slc)
mn = numpy.min(slc)
if limits:
orig = pyplot.imshow(self.data[c, self.slice], vmax=limits[1], vmin=limits[0])
else:
mn = numpy.min(self.data)
mx = numpy.max(self.data)
if mn == mx:
mn = 0
mx = 1
orig = pyplot.imshow(self.data[c, self.slice], vmax = mx, vmin=mn)
if mx==mn:
mx = mn+1
self.plots.append(orig)
pyplot.subplots_adjust(left=0.1, bottom=0.25)
axrs = pyplot.axes([0.2, 0.05, 0.65, 0.05], facecolor="blue")
self.slider = Slider(axrs, "Slice", 0, self.n_slices-1, valinit=self.slice, valstep=1)
self.slider.on_changed(self.setSlice)
pyplot.show(False)
def setData(self, data):
self.data=data
self.refresh()
pass
def setSlice(self, slc):
slc = int(slc)
if slc >= 0 and slc<self.n_slices:
self.slice = slc
self.refresh()
def refresh(self):
for c, plot in enumerate(self.plots):
plot.set_data(self.data[c, self.slice])
self.figure.canvas.draw()
def adInfinitum(infiniteGenerators):
"""
Data generators are inifinite but repeat after so many steps,
this takes a finite number of steps from a generator then
proceeds to the next one.
"""
return InfiniteGenerator(infiniteGenerators)
class Pooler:
def __init__(self, shape, pool, operation):
np = len(pool)
in_dims = len(shape)
skip = in_dims - np
leaves = shape[:skip]
ax = tuple()
for i, p in enumerate(pool):
leaves += ( shape[i + skip]//p, )
leaves += ( p, )
ax += ( skip + 2*i + 1, )
self.ax = ax
self.leaves = leaves
self.op = operation
def __call__(self, arr):
return self.op(arr.reshape(self.leaves), self.ax)
def maxPool(arr, pool):
"""
max pools arr in the pool dimensions.
"""
return Pooler(arr.shape, pool, numpy.max)(arr)
def minPool(arr, pool):
"""
min pools arr in the pool dimensions.
"""
return Pooler(arr.shape, pool, numpy.min)(arr)
def rotate2DByPixels(in_img, angle_deg):
angle=angle_deg*math.pi/180.0
cx = in_img.shape[1]//2
cy = in_img.shape[0]//2
y, x = numpy.where(in_img!=0)
out = numpy.zeros(in_img.shape,dtype="uint8")
x = x - cx
y = y - cy
if x.shape[0]==0:
return out
angle = - angle
xp = x*math.cos(angle) - y*math.sin(angle) + (cx)
yp = x*math.sin(angle) + y*math.cos(angle) + (cy)
mn = (0,0)
mx = in_img.shape
for dx in (0.15, 0.85):
for dy in (0.15, 0.85):
cnets = numpy.array([
( yi + dy, xi + dx) for yi,xi in zip(yp, xp) if xi+dx>=mn[1] and xi+dx<mx[1] and yi+dy>=mn[0] and yi+dy<mx[0]
], dtype="int")
if len(cnets)>0:
out[cnets[:,0], cnets[:, 1]] = 1
return out
def normalizeImages(batch, sigma=1.0):
std = batch.std(axis=(-3, -2, -1), keepdims=True)
mn = batch.mean(axis=(-3, -2, -1), keepdims=True)
std[numpy.where(std<1e-3)] = 1
batch = sigma*(batch - mn)/std
return batch
def normalizeBatch(batch, sigma=1.0):
std = batch.std()
mn = batch.mean()
if std>1e-3:
return (batch - mn)*sigma/std
else:
return batch
def getMultiClassLabels(data, n_labels, fatten=False):
"""
Translates a labelled volume into a set of binary labels.
:param data: numpy array containing the label map with shape: (1, ...).
:param labels: integer values of the labels.
:return: binary numpy array of shape: (n_samples, n_labels, ...)
"""
new_shape = [n_labels] + list(data.shape)
y = numpy.zeros(new_shape, numpy.int8)
for label_index in range(n_labels):
y[label_index] = (data>>label_index)&1
if fatten:
for i,sli in enumerate(y[label_index]):
y[label_index, i]=skimage.morphology.dilation(sli)
return y
def getLinearLabels(data, n_labels):
"""
Doesn't change anything, keeps the values 1 to 1, currently stores at 8 bits.
:param data: numpy array containing the label map with shape: (n_samples, 1, ...).
:param n_labels: integer values of the labels.
:return: numpy array of shape: (n_samples, n_labels, ...)
"""
new_shape = [1] + list(data.shape)
y = numpy.zeros(new_shape, numpy.int8)
y[0] = data*1
return y
def skeletonToMultiClassRegions(data, n_labels):
"""
performs a connected components, and labels the stack as different regions
instead of
"""
#1 label for membrane, region labels for regions.
regions = n_labels - 1
new_shape = [n_labels] + list(data.shape)
y = numpy.zeros(new_shape, numpy.int8)
for i, slc in enumerate(data):
labelled, count = scipy.ndimage.label((slc==0)*1)
lim = n_labels
if count<regions:
lim = count+1
elif count>regions:
labelled[labelled>regions]=regions
for j in range(lim):
y[j, i] = (labelled==j)*1
return y
def getCategoricalLabels(data, n_labels):
"""
Similar to the multi-class labels, except labels are presumed to be unique
and 0 is a label value. eg a binary image would be 2-label categries.
n_labels has to be the n_non_zero_labels + 1. The 0 value will get changed
to the highest value label
"""
new_shape = [n_labels] + list(data.shape)
y = numpy.zeros(new_shape, numpy.int8)
for label_index in range(n_labels - 1):
y[label_index] = (data>>label_index)&1
y[ label_index - 1] = (data==0)*1
return y
def getDoubleMembraneLabels(data, n_labels):
"""
Translates a labelled volume into a set of binary labels.
:param data: numpy array containing the label map with shape: (n_samples, 1, ...).
:param labels: integer values of the labels.
:return: binary numpy array of shape: (n_samples, n_labels, ...)
"""
return getMultiClassLabels(data, n_labels, fatten=True)
labeller_map = {
REGION_LABELLER : skeletonToMultiClassRegions,
MULTICLASS_LABELS : getMultiClassLabels,
CATEGORICAL_LABELS : getCategoricalLabels,
DOUBLE_MEMBRANE_LABELS : getDoubleMembraneLabels,
LINEAR_LABELS : getLinearLabels
}
def fullRange(total, region, stride):
"""
go from 0 to total-region, even if the last section overlaps.
"""
x0 = 0
while x0+region < total:
yield x0
x0 += stride
if x0+region==total:
yield x0
else:
x0 = total - region
yield x0
def getPadding(total, region, stride):
strided = total - region
remains = strided%stride
if remains<strided//4:
return stride
else:
return stride
def paddedRange(total, region, stride, padding=None):
"""
goes from 0 to total-region-padding so that the origin of an index
can be shifted any region within padding.
If padding is left as none, then the last section will be treated as
padding.
This can also have overlap on the last frame.
"""
if padding is None:
padding = getPadding(total, region, stride)
stridable = total - padding
x0 = 0
while x0+region < stridable:
yield x0
x0 += stride
if x0+region==stridable:
yield x0
else:
x0 = stridable - region
yield x0
def getPaddedDataGenerator(xdata, ydata, n_labels, indexes, patch, batch_size=1, labeller=None, normalize_samples=False, padding=[0, 0, 0]):
"""
Returns input batches, and output batches as sampled from the provided
data. The data is expected to be (c, z, y, x) format and the return
is a tuple of (n, ci, zi, yi, xi), (n, co, zo, yo, xo) values.
This will repeat indefinitely with a period of len(indexes)
xdata: input image
ydata: output that will be labelled
indexes: list of starting indexes.
patch: shape of the input data (c, z, y, x), note that the output
data is (n_labels, z, y, x)
"""
indexes = list(indexes)
xbatch = []
ybatch = []
batches = len(indexes)//batch_size
bonus = len(indexes) - batches*batch_size
for i in range(bonus):
indexes.append(indexes[i])
pad = [ random.randint(0, r) for r in padding ]
while True:
for index in indexes:
x = xdata[
0:patch[0],
index[1] + pad[0]:index[1] + pad[0] + patch[1],
index[2] + pad[1]:index[2] + pad[1] + patch[2],
index[3] + pad[2]:index[3] + pad[2] + patch[3]
]
y = labeller(ydata[
0,
index[1] + pad[0]:index[1] + pad[0] + patch[1],
index[2] + pad[1]:index[2] + pad[1] + patch[2],
index[3] + pad[2]:index[3] + pad[2] + patch[3]
], n_labels)
if(x.shape[-3:] != y.shape[-3:]):
print("geometry doesn't match! x %s, y %s"%(x.shape[-3:], y.shape[-3:]))
xbatch.append(x)
ybatch.append(y)
if len(xbatch)==batch_size:
batch = numpy.array(xbatch)
if normalize_samples:
batch = normalizeImages(batch)
yield batch, numpy.array(ybatch)
pad = [ random.randint(0, r) for r in padding ]
xbatch = []
ybatch = []
#epoch, re-randomize. possibly should be in a callback.
random.shuffle(indexes)
pad = [ random.randint(0, r) for r in padding ]
def getDataGenerator(xdata, ydata, n_labels, indexes, patch, batch_size=1, labeller=None, normalize_samples=False, shuffle=False):
"""
Returns input batches, and output batches as sampled from the provided
data. The data is expected to be (c, z, y, x) format and the return
is a tuple of (n, ci, zi, yi, xi), (n, co, zo, yo, xo) values.
This will repeat indefinitely with a period of len(indexes)
xdata: input image
ydata: output that will be labelled
indexes: list of starting indexes.
patch: shape of the input data (c, z, y, x), note that the output
data is (n_labels, z, y, x)
"""
indexes = list(indexes)
xbatch = []
ybatch = []
batches = len(indexes)//batch_size
bonus = len(indexes) - batches*batch_size
for i in range(bonus):
indexes.append(indexes[i])
while True:
for index in indexes:
x = xdata[
0:patch[0],
index[1]:index[1] + patch[1],
index[2]:index[2] + patch[2],
index[3]:index[3] + patch[3]
]
y = labeller(ydata[
0,
index[1]:index[1] + patch[1],
index[2]:index[2] + patch[2],
index[3]:index[3] + patch[3]
], n_labels)
if(x.shape[-3:] != y.shape[-3:]):
print("geometry doesn't match! x %s, y %s"%(x.shape[-3:], y.shape[-3:]))
xbatch.append(x)
ybatch.append(y)
if len(xbatch)==batch_size:
batch =
|
numpy.array(xbatch)
|
numpy.array
|
from crystal_torture.node import Node
from crystal_torture.cluster import Cluster
from crystal_torture.graph import Graph
from crystal_torture import dist
from crystal_torture import tort
from pymatgen.core import Structure, Molecule, PeriodicSite
import numpy as np
import itertools
import math
import copy
import sys
import time
"Functions for setting up a node, cluster and graph using pymatgen"
def map_index(uc_neighbours, uc_index, x_d, y_d, z_d):
"""
Takes a list of neighbour indices for sites in the original unit cell,
and maps them on to all of the supercell sites.
Args:
- uc_neighbours(list(list(int))): list of lists containing neighbour indices for the nodes that are in the primitive cell
- uc_index(list(int)): list of indices corresponding to the primitive cell nodes
- x_d (int): x dimension of supercell
- y_d (int): y dimension of supercell
- z_d (int): z dimension of supercell
Returns:
- neigh ([[int]...[int]]): list of neighbour indices for all nodes
"""
no_atoms = len(uc_index)
count = -1
neigh = []
append = neigh.append
for i, index in enumerate(uc_index):
for x in range(0, x_d, 1):
for y in range(0, y_d, 1):
for z in range(0, z_d, 1):
count += 1
append(
[
dist.shift_index(neighbour, [x, y, z])
for neighbour in uc_neighbours[i]
]
)
return neigh
def get_all_neighbors_and_image(structure, r, include_index=False):
"""
Modified from `pymatgen
<http://pymatgen.org/_modules/pymatgen/core/structure.html#IStructure.get_all_neighbors>`_
to return image (used for mapping to supercell), and to use the f2py wrapped
OpenMP dist subroutine to get the distances (smaller memory footprint and faster
than numpy).
Get neighbours for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
Args:
- r (float): Radius of sphere.
- include_index (bool): Whether to include the non-supercell site
- in the returned data
Returns:
- A list of a list of nearest neighbors for each site, i.e.,
[[(site, dist, index, image) ...], ..]. Index only supplied if include_index = True.
The index is the index of the site in the original (non-supercell)
structure. This is needed for ewaldmatrix by keeping track of which
sites contribute to the ewald sum.
"""
recp_len = np.array(structure.lattice.reciprocal_lattice.abc)
maxr = np.ceil((r + 0.15) * recp_len / (2 * math.pi))
nmin = np.floor(np.min(structure.frac_coords, axis=0)) - maxr
nmax = np.ceil(np.max(structure.frac_coords, axis=0)) + maxr
all_ranges = [
|
np.arange(x, y)
|
numpy.arange
|
# Python modules
# 3rd party modules
import numpy as np
import xml.etree.cElementTree as ElementTree
# Our modules
import vespa.datasim.util_datasim as util_datasim
import vespa.common.minf_parabolic_info as minf
import vespa.common.constants as common_constants
import vespa.common.mrs_experiment as mrs_experiment
import vespa.common.util.xml_ as util_xml
import vespa.common.util.generic_spectral as util_spectral
from vespa.common.constants import Deflate
from vespa.datasim.util_datasim import calc_lw
DEFAULT_MMOL_FLAGS = [False,False,False,False,False,True,False]
DEFAULT_MMOL_PPMS = [2.346,2.89,2.142,1.638,1.357,0.9,3.81]
DEFAULT_MMOL_AREAS = [0.5,0.5,1,1,1,1,6.0]
DEFAULT_MMOL_WIDTHS = [0.1575,0.1575,0.2363,0.2756,0.2756,0.3543,0.9449] # in ppm, in hz [20,20,30,35,35,45,120]
DEFAULT_MMOL_PHASES = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]
DEFAULT_BASE_FLAGS = [False,False]
DEFAULT_BASE_PPMS = [4.69,1.0]
DEFAULT_BASE_AREAS = [10.0,20.0]
DEFAULT_BASE_WIDTHS = [0.3543,0.7] # damping coeff in [sec] ~ 45 Hz
DEFAULT_BASE_PHASES = [0.0,0.0]
class Datasim(object):
""" A container for simulated magnetic resonance spectroscopy data. """
XML_VERSION = "1.0.0"
def __init__(self, attributes=None):
"""
Define parameters to describe how MRS data is simulated.
"""
self.datasim_filename = ''
# Spectral parameter settings
self.dims = [2048,1,1,1]
self.frequency = 123.9 # in MHz
self.sw = 2048.0 # in Hz
self.linewidth = 3.0 # in Hz
self.resppm = 4.7 # in ppm
self.ta = 0.300 # in sec - only for Tab LW display calc, Metab Ta vals control indiv T2
self.tb = 0.105 # in sec - controls T2* globally
self.phase0 = 0.0 # in deg
self.phase1 = 0.0 # in deg
self.phase_1_pivot = 4.7 # in ppm
self.b0shift = 0.0 # in Hz
self.left_shift = 0 # in points of FID
self.zero_fill_multiplier = 1.0 # placeholder for completeness, not read or saved
self.echopeak = 0.0 # placeholder for completeness, not read or saved
self.comment = ''
# simulated metabolite signal basis settings
self.loop = [0,0,0] # selected loop indices
self.experiment = None
self.mets_flags = None
self.mets_scales = None
self.mets_decays = None
self.mets_ppm_start = self.pts2ppm(self.dims[0]-1)
self.mets_ppm_end = self.pts2ppm(0)
# macromolecule signal contributions
self.mmol_flags = np.array(DEFAULT_MMOL_FLAGS)
self.mmol_ppms = np.array(DEFAULT_MMOL_PPMS)
self.mmol_areas = np.array(DEFAULT_MMOL_AREAS)
self.mmol_widths = np.array(DEFAULT_MMOL_WIDTHS) # in ppm
self.mmol_phases = np.array(DEFAULT_MMOL_PHASES)
self.mmol_lineshape = 'lorentzian'
self.mmol_group_scale = 1.0
# baseline signal contributions
self.base_flags = np.array(DEFAULT_BASE_FLAGS)
self.base_ppms =
|
np.array(DEFAULT_BASE_PPMS)
|
numpy.array
|
"""Functions chop simulation data into subvolumes for independent processing."""
import numpy as np
__all__ = (
"calculate_subvolume_id",
"points_in_buffered_rectangle",
"points_in_rectangle",
)
def calculate_subvolume_id(x, y, z, nx, ny, nz, period):
"""Calculate the subvolume ID for every input point.
The function first wraps the input x, y, z into the periodic box,
then assigns each point to a rectangular subvolume according to
the number of subdivisions in each dimension.
The subvolume ID is defined by a dictionary ordering of the
digitized values of x, y, z.
Parameters
----------
x, y, z : ndarrays of shape (npts, )
nx, ny, nz : integers
period : float or 3-element sequence
Length of the periodic box
Returns
-------
x, y, z : ndarrays of shape (npts, )
Identical to the input points except for cases where points have been
wrapped around the periodic boundaries
ix, iy, iz : ndarrays of shape (npts, )
Integer arrays that together specify the subvolume of each point
cellnum : ndarray of shape (npts, )
Integer array that by itself specifies the subvolume of each point,
e.g., (ix, iy, iz)=(0, 0, 0) <==> cellnum=0
"""
period = _get_3_element_sequence(period)
x = np.atleast_1d(np.mod(x, period[0]))
y = np.atleast_1d(np.mod(y, period[1]))
z = np.atleast_1d(np.mod(z, period[2]))
_rescaled_x = nx * x / period[0]
_rescaled_y = ny * y / period[1]
_rescaled_z = nz * z / period[2]
ix = np.floor(_rescaled_x).astype("i4")
iy = np.floor(_rescaled_y).astype("i4")
iz = np.floor(_rescaled_z).astype("i4")
cellnum = np.ravel_multi_index((ix, iy, iz), (nx, ny, nz))
return x, y, z, ix, iy, iz, cellnum
def points_in_buffered_rectangle(x, y, z, xyz_mins, xyz_maxs, rmax_xyz, period):
"""Return the subset of points inside a buffered rectangular subvolume.
All returned points will lie within rmax_xyz of (xyz_mins, xyz_maxs),
accounting for periodic boundary conditions.
Parameters
----------
x, y, z : ndarrays, each with shape (npts, )
xyz_mins : 3-element sequence
xyz coordinates of the lower corner of the rectangular subvolume.
Must have 0 <= xyz_mins <= xyz_maxs <= period_xyz
xyz_maxs : 3-element sequence
xyz coordinates of the upper corner of the rectangular subvolume.
Must have 0 <= xyz_mins <= xyz_maxs <= period_xyz
rmax_xyz : 3-element sequence
Search radius distance in the xyz direction.
Must have rmax_xyz <= period_xyz/2.
period : float or 3-element sequence
Length of the periodic box
Returns
-------
xout, yout, zout : ndarrays, each with shape (npts_buffered_subvol, )
Coordinates of points that lie within the buffered subvolume.
The returned points will lie in the range
[xyz_mins-rmax_xyz, xyz_maxs+rmax_xyz]. Note that this may spill beyond
the range [0, Lbox], as required by the size of
the search radius, the size of the box,
and the position of the subvolume.
The buffered subvolume includes all points relevant to pair-counting
within rmax_xyz for points in the rectangular subvolume,
and so periodic boundary conditions can be ignored for xout, yout, zout.
indx : ndarray, shape (npts_buffered_subvol, )
Index of the corresponding point in the input xyz arrays.
xout[i] == x[indx[i]] except for cases where the point
has been wrapped around the periodic boundaries.
inside_subvol : ndarray, shape (npts_buffered_subvol, )
boolean array is True when the point is in the rectangular subvolume,
False when the point is in the +/-rmax_xyz buffering region.
"""
period_xyz = _get_3_element_sequence(period)
xyz_mins = np.array(xyz_mins)
xyz_maxs = np.array(xyz_maxs)
rmax_xyz = np.array(rmax_xyz)
x = np.mod(x, period_xyz[0])
y = np.mod(y, period_xyz[1])
z = np.mod(z, period_xyz[2])
x_collector = []
y_collector = []
z_collector = []
indx_collector = []
in_subvol_collector = []
for subregion in _buffering_rectangular_subregions(xyz_mins, xyz_maxs, rmax_xyz):
subregion_ix_iy_iz, subregion_xyz_mins, subregion_xyz_maxs = subregion
_points = points_in_rectangle(
x, y, z, subregion_xyz_mins, subregion_xyz_maxs, period_xyz
)
subregion_x, subregion_y, subregion_z, subregion_indx = _points
_npts = len(subregion_x)
if _npts > 0:
x_collector.append(subregion_x)
y_collector.append(subregion_y)
z_collector.append(subregion_z)
indx_collector.append(subregion_indx)
in_subvol = np.zeros_like(subregion_x).astype(bool) + (
subregion_ix_iy_iz == (0, 0, 0)
)
in_subvol_collector.append(in_subvol)
if len(x_collector) == 0:
xout = np.zeros(0, dtype="f4")
yout = np.zeros(0, dtype="f4")
zout = np.zeros(0, dtype="f4")
indx = np.zeros(0, dtype="i8")
inside_subvol =
|
np.zeros(0, dtype=bool)
|
numpy.zeros
|
# future
from __future__ import annotations
# stdlib
from functools import partial
import os
from pathlib import Path
import time
from typing import Any
from typing import Callable
from typing import Final
from typing import Optional
from typing import TYPE_CHECKING
from typing import Tuple
# relative
from ...logger import info
if TYPE_CHECKING:
# stdlib
from dataclasses import dataclass
else:
from flax.struct import dataclass
# third party
import jax
from jax import numpy as jnp
from nacl.signing import VerifyKey
import numpy as np
from scipy.optimize import minimize_scalar
# relative
from ...core.node.common.node_manager.user_manager import RefreshBudgetException
from ...lib.numpy.array import capnp_deserialize
from ...lib.numpy.array import capnp_serialize
from ..common.serde.capnp import CapnpModule
from ..common.serde.capnp import get_capnp_schema
from ..common.serde.capnp import serde_magic_header
from ..common.serde.serializable import serializable
from .abstract_ledger_store import AbstractDataSubjectLedger
from .abstract_ledger_store import AbstractLedgerStore
def get_cache_path(cache_filename: str) -> str:
here = os.path.dirname(__file__)
root_dir = Path(here) / ".." / ".." / "cache"
return os.path.abspath(root_dir / cache_filename)
def load_cache(filename: str) -> np.ndarray:
CACHE_PATH = get_cache_path(filename)
if not os.path.exists(CACHE_PATH):
raise Exception(f"Cannot load {CACHE_PATH}")
cache_array = np.load(CACHE_PATH)
info(f"Loaded constant2epsilon cache of size: {cache_array.shape}")
return cache_array
@dataclass
class RDPParams:
sigmas: jnp.array
l2_norms: jnp.array
l2_norm_bounds: jnp.array
Ls: jnp.array
coeffs: jnp.array
@partial(jax.jit, static_argnums=3, donate_argnums=(1, 2))
def first_try_branch(
constant: jax.numpy.DeviceArray,
rdp_constants: np.ndarray,
entity_ids_query: np.ndarray,
max_entity: int,
) -> jax.numpy.DeviceArray:
summed_constant = constant.take(entity_ids_query) + rdp_constants.take(
entity_ids_query
)
if max_entity < len(rdp_constants):
return rdp_constants.at[entity_ids_query].set(summed_constant)
else:
pad_length = max_entity - len(rdp_constants) + 1
rdp_constants = jnp.concatenate([rdp_constants, jnp.zeros(shape=pad_length)])
summed_constant = constant + rdp_constants.take(entity_ids_query)
return rdp_constants.at[entity_ids_query].set(summed_constant)
@partial(jax.jit, static_argnums=1)
def compute_rdp_constant(rdp_params: RDPParams, private: bool) -> jax.numpy.DeviceArray:
squared_Ls = rdp_params.Ls**2
squared_sigma = rdp_params.sigmas**2
if private:
# this is calculated on the private true values
squared_l2 = rdp_params.l2_norms**2
else:
# bounds is computed on the metadata
squared_l2 = rdp_params.l2_norm_bounds**2
return (squared_Ls * squared_l2 / (2 * squared_sigma)) * rdp_params.coeffs
@jax.jit
def get_budgets_and_mask(
epsilon_spend: jnp.array, user_budget: jnp.float64
) -> Tuple[float, float, jax.numpy.DeviceArray]:
# Function to vectorize the result of the budget computation.
mask = jnp.ones_like(epsilon_spend) * user_budget < epsilon_spend
# get the highest value which was under budget and represented by False in the mask
highest_possible_spend = jnp.max(epsilon_spend * (1 - mask))
return (highest_possible_spend, user_budget, mask)
@serializable(capnp_bytes=True)
class DataSubjectLedger(AbstractDataSubjectLedger):
"""for a particular data subject, this is the list
of all mechanisms releasing informationo about this
particular subject, stored in a vectorized form"""
CONSTANT2EPSILSON_CACHE_FILENAME = "constant2epsilon_300k.npy"
_cache_constant2epsilon = load_cache(filename=CONSTANT2EPSILSON_CACHE_FILENAME)
def __init__(
self,
constants: Optional[np.ndarray] = None,
update_number: int = 0,
timestamp_of_last_update: Optional[float] = None,
) -> None:
self._rdp_constants = (
constants if constants is not None else np.array([], dtype=np.float64)
)
self._update_number = update_number
self._timestamp_of_last_update = (
timestamp_of_last_update
if timestamp_of_last_update is not None
else time.time()
)
self._pending_save = False
def __eq__(self, other: Any) -> bool:
if not isinstance(other, DataSubjectLedger):
return self == other
return (
self._update_number == other._update_number
and self._timestamp_of_last_update == other._timestamp_of_last_update
and all(self._rdp_constants == other._rdp_constants)
)
@property
def delta(self) -> float:
FIXED_DELTA: Final = 1e-6
return FIXED_DELTA # WARNING: CHANGING DELTA INVALIDATES THE CACHE
def bind_to_store_with_key(
self, store: AbstractLedgerStore, user_key: VerifyKey
) -> None:
self.store = store
self.user_key = user_key
@staticmethod
def get_or_create(
store: AbstractLedgerStore, user_key: VerifyKey
) -> Optional[AbstractDataSubjectLedger]:
ledger: Optional[AbstractDataSubjectLedger] = None
try:
# todo change user_key or uid?
ledger = store.get(key=user_key)
ledger.bind_to_store_with_key(store=store, user_key=user_key)
except KeyError:
print("Creating new Ledger")
ledger = DataSubjectLedger()
ledger.bind_to_store_with_key(store=store, user_key=user_key)
except Exception as e:
print(f"Failed to read ledger from ledger store. {e}")
return ledger
def get_entity_overbudget_mask_for_epsilon_and_append(
self,
unique_entity_ids_query: np.ndarray,
rdp_params: RDPParams,
get_budget_for_user: Callable,
deduct_epsilon_for_user: Callable,
private: bool = True,
) -> np.ndarray:
# coerce to np.int64
entity_ids_query: np.ndarray = unique_entity_ids_query.astype(np.int64)
# calculate constants
rdp_constants = self._get_batch_rdp_constants(
entity_ids_query=entity_ids_query, rdp_params=rdp_params, private=private
)
# here we iteratively attempt to calculate the overbudget mask and save
# changes to the database
mask = self._get_overbudgeted_entities(
get_budget_for_user=get_budget_for_user,
deduct_epsilon_for_user=deduct_epsilon_for_user,
rdp_constants=rdp_constants,
)
# at this point we are confident that the database budget field has been updated
# so now we should flush the _rdp_constants that we have calculated to storage
if self._write_ledger():
return mask
def _write_ledger(self) -> bool:
self._update_number += 1
try:
self._pending_save = False
self.store.set(key=self.user_key, value=self)
return True
except Exception as e:
self._pending_save = True
print(f"Failed to write ledger to ledger store. {e}")
raise e
def _increase_max_cache(self, new_size: int) -> None:
new_entries = []
current_size = len(self._cache_constant2epsilon)
new_alphas = []
for i in range(new_size - current_size):
alph, eps = self._get_optimal_alpha_for_constant(
constant=i + 1 + current_size
)
new_entries.append(eps)
new_alphas.append(alph)
self._cache_constant2epsilon = np.concatenate(
[self._cache_constant2epsilon, np.array(new_entries)]
)
def _get_fake_rdp_func(self, constant: int) -> Callable:
def func(alpha: float) -> float:
return alpha * constant
return func
def _get_alpha_search_function(self, rdp_compose_func: Callable) -> Callable:
log_delta = np.log(self.delta)
def fun(alpha: float) -> float: # the input is the RDP's \alpha
if alpha <= 1:
return np.inf
else:
alpha_minus_1 = alpha - 1
return np.maximum(
rdp_compose_func(alpha)
+
|
np.log(alpha_minus_1 / alpha)
|
numpy.log
|
import warnings
import numpy as np
# Scipy
try:
import scipy.linalg as spa
from scipy.spatial import ConvexHull
from scipy.linalg import block_diag
except:
warnings.warn("You don't have scipy package installed. You may get error while using some feautures.")
# Pydrake
try:
import pydrake.solvers.mathematicalprogram as MP
import pydrake.solvers.gurobi as Gurobi_drake
import pydrake.solvers.osqp as OSQP_drake
# use Gurobi solver
global gurobi_solver,OSQP_solver, license
gurobi_solver=Gurobi_drake.GurobiSolver()
license = gurobi_solver.AcquireLicense()
OSQP_solver=OSQP_drake.OsqpSolver()
except:
warnings.warn("You don't have pydrake installed properly. Methods that rely on optimization may fail.")
# Pypolycontain
try:
import pypolycontain as pp
except:
warnings.warn("You don't have pypolycontain properly installed. Can not import objects")
"""
Optimization-based Operations:
"""
def affine_map( T, P, t=None , get_inverse=True):
"""
Returns the affine map of a polytope.
"""
if type(t)==type(None):
t=np.zeros((T.shape[0],1))
if P.type=='AH_polytope':
return pp.AH_polytope(t=t+np.dot(T,P.t),T=np.dot(T,P.T),P=P.P)
elif P.type=='zonotope':
return pp.zonotope(x=t+np.dot(T,P.x),G=np.dot(T,P.G))
elif P.type=="H_polytope":
if T.shape[0]>=T.shape[1] and get_inverse:
Tinv=np.linalg.pinv(T)
H=np.dot(P.H,Tinv)
# print("inverse error=",np.linalg.norm(np.dot(Tinv,T)-np.eye(T.shape[1])))
assert np.linalg.norm(np.dot(Tinv,T)-np.eye(T.shape[1]))<=1e-2*P.n
return pp.H_polytope(H=H,h=P.h+np.dot(H,t))
else:
Q=pp.to_AH_polytope(P)
return affine_map( T, Q, t )
else:
return ValueError('Polytope type: ',P.type," Not recognized")
def translate( t, P ):
"""
Shifts the polytope by t vector
"""
assert t.shape[0]==P.n # Dimension match
if P.type=='AH_polytope':
return pp.AH_polytope(t=t+P.t,T=P.T,P=P.P)
elif P.type=='zonotope':
return pp.zonotope(x=t+P.x,G=P.G)
elif P.type=="H_polytope":
return pp.H_polytope(H=P.H,h=P.h+np.dot(P.H,t))
else:
return ValueError('Polytope type: ',P.type," Not recognized")
def point_membership(Q,x,tol=10**-5,solver="gurobi"):
if type(Q).__name__=="H_polytope":
return Q.if_inside(x,tol)
else:
Q=pp.to_AH_polytope(Q)
prog=MP.MathematicalProgram()
zeta=prog.NewContinuousVariables(Q.P.H.shape[1],1,"zeta")
prog.AddLinearConstraint(A=Q.P.H,ub=Q.P.h+tol,lb=-np.inf*np.ones((Q.P.h.shape[0],1)),vars=zeta)
prog.AddLinearEqualityConstraint(Q.T,x-Q.t,zeta)
if solver=="gurobi":
result=gurobi_solver.Solve(prog,None,None)
elif solver=="osqp":
prog.AddQuadraticCost(np.eye(zeta.shape[0]),np.zeros(zeta.shape),zeta)
result=OSQP_solver.Solve(prog,None,None)
else:
result=MP.Solve(prog)
return result.is_success()
def check_subset(P1,P2,k=-1):
"""
Checks if .math..`P1 \subseteq P2`
Inputs:
"""
def point_membership_fuzzy(Q,x,tol=10**-5,solver="gurobi"):
"""
Fuzzy membership check. If x contains NaN, the entry is unconstrained
@param Q: Polytope in R^n
@param x: n*1 numpy array, may contain NaNs
@param tol:
@param solver: solver to use
@return: boolean of whether x is in Q
"""
Q=pp.to_AH_polytope(Q)
prog=MP.MathematicalProgram()
zeta=prog.NewContinuousVariables(Q.P.H.shape[1],1,"zeta")
prog.AddLinearConstraint(A=Q.P.H,ub=Q.P.h+tol,lb=-np.inf*np.ones((Q.P.h.shape[0],1)),vars=zeta)
assert(x.shape[1]==1)
for i, xi in enumerate(x):
if not np.isnan(xi):
prog.AddLinearEqualityConstraint(np.atleast_2d(Q.T[i,:]),(x[i]-Q.t[i]).reshape([-1,1]),zeta)
if solver=="gurobi":
result=gurobi_solver.Solve(prog,None,None)
elif solver=="osqp":
prog.AddQuadraticCost(np.eye(zeta.shape[0]),np.zeros(zeta.shape),zeta)
result=OSQP_solver.Solve(prog,None,None)
else:
result=MP.Solve(prog)
return result.is_success()
def check_non_empty(Q,tol=10**-5,solver="gurobi"):
Q=pp.to_AH_polytope(Q)
prog=MP.MathematicalProgram()
zeta=prog.NewContinuousVariables(Q.P.H.shape[1],1,"zeta")
prog.AddLinearConstraint(A=Q.P.H,ub=Q.P.h+tol,lb=-np.inf*np.ones((Q.P.h.shape[0],1)),vars=zeta)
if solver=="gurobi":
result=gurobi_solver.Solve(prog,None,None)
elif solver=="osqp":
prog.AddQuadraticCost(np.eye(zeta.shape[0]),np.zeros(zeta.shape),zeta)
result=OSQP_solver.Solve(prog,None,None)
else:
result=MP.Solve(prog)
return result.is_success()
#def directed_Hausdorff_distance(Q1,Q2,ball="infinty_norm",solver="gurobi"):
# r"""
# Computes the directed Hausdorff distance of Q_1 and Q_2 (AH_polytopes)
# ***************************************************************************
# The optimization problem is:
# Minimize epsilon
# such that Q1 \subset Q2+epsilon(Ball)
#
# It is zero if and only if Q1 subset Q2. The method is based on
#
# Sadraddini&Tedrake, 2019, CDC (available on ArXiv)
#
# We solve the following problem:
# D*ball+Q1 subset Q2
# We solve the following linear program:
# ..math::
# \min D
# s.t. Lambda_1 H_1=H_2 Gamma_1
# Lambda_2 H_1=H_ball Gamma_2
# Lambda_1 h_1<=h_2 + H_2 beta_1
# Lambda_2 h_2<=D h_ball + H_ball beta_2
# x_2 - X_2 beta_1 - beta_2 = x_1
# X_2 Gamma_1 + Gamma_2 = X_1
# ***************************************************************************
# """
# Q1,Q2=pp.to_AH_polytope(Q1),pp.to_AH_polytope(Q2)
# n=Q1.t.shape[0]
# if ball=="infinty_norm":
# HB=np.vstack((np.eye(n),-np.eye(n)))
# hB=np.vstack((np.ones((n,1)),np.ones((n,1))))
# elif ball=="l1":
# HB,hb=make_ball(ball)
# prog=MP.MathematicalProgram()
# # Variables
# D=prog.NewContinuousVariables(1,1,"D")
# Lambda_1=prog.NewContinuousVariables(Q2.P.H.shape[0],Q1.P.H.shape[0],"Lambda_1")
# Lambda_2=prog.NewContinuousVariables(HB.shape[0],Q1.P.H.shape[0],"Lambda2")
# Gamma_1=prog.NewContinuousVariables(Q2.P.H.shape[1],Q1.P.H.shape[1],"Gamma1")
# Gamma_2=prog.NewContinuousVariables(HB.shape[1],Q1.P.H.shape[1],"Gamma1")
# beta_1=prog.NewContinuousVariables(Q2.P.H.shape[1],1,"beta1")
# beta_2=prog.NewContinuousVariables(HB.shape[1],1,"beta1")
# # Constraints
# # Lambda_1 and Lambda_2 positive
# prog.AddBoundingBoxConstraint(0,np.inf,Lambda_1)
# prog.AddBoundingBoxConstraint(0,np.inf,Lambda_2)
# # Lambda_1 H_1
# Lambda_H_Gamma(prog,Lambda_1,Q1.P.H,Q2.P.H,Gamma_1)
# # Lambda_2 H_1
# Lambda_H_Gamma(prog,Lambda_2,Q1.P.H,HB,Gamma_2)
# # Lambda_1 h_1
# Lambda_h_Inequality(prog,Lambda_1,beta_1,Q2.P.H,Q1.P.h,Q2.P.h)
# # Lambda_2 h_1
# Lambda_h_Inequality_D(prog,Lambda_2,beta_2,HB,Q1.P.h,hB,D)
# # X2 beta_1
# prog.AddLinearEqualityConstraint(-np.hstack((Q2.T,np.eye(n))),Q1.t-Q2.t,np.vstack((beta_1,beta_2)))
# # X2 Gamma_1
# Aeq=np.hstack((Q2.T,np.eye(Q2.T.shape[0])))
# for i in range(Gamma_1.shape[1]):
# beq=Q1.T[:,i]
# var=np.hstack((Gamma_1[:,i],Gamma_2[:,i]))
# prog.AddLinearEqualityConstraint(Aeq,beq,var)
# # Cost
# # Optimize
# if solver=="gurobi":
# prog.AddLinearCost(D[0,0])
# result=gurobi_solver.Solve(prog,None,None)
# elif solver=="osqp":
# prog.AddQuadraticCost(D[0,0]*D[0,0])
# result=OSQP_solver.Solve(prog,None,None)
# else:
# result=MP.Solve(prog)
# if result.is_success():
# return np.asscalar(result.GetSolution(D))
# else:
# print("Optimization Failed")
#
#def old_Hausdorff_distance(Q1,Q2,directed=False,ball="infinty_norm",solver="gurobi"):
# return max(directed_Hausdorff_distance(Q1,Q2,ball,solver),directed_Hausdorff_distance(Q2,Q1,ball,solver))
def Hausdorff_distance(Q1,Q2,directed=False,ball="infinty_norm",solver="gurobi",k=-1):
X,Y=pp.to_AH_polytope(Q1),pp.to_AH_polytope(Q2)
prog=MP.MathematicalProgram()
# Variables
n=Q1.n
D1=prog.NewContinuousVariables(1,"D1")
D2=prog.NewContinuousVariables(1,"D2")
if ball=="infinty_norm":
P_ball=pp.unitbox(n).H_polytope
elif ball in ["L1",1,"1","l1"]:
P_ball=pp.unitball(n,1)
else:
print("I don't recognize the ball norm")
raise NotImplementedError
if P_ball.type=='H_polytope':
Dball1=pp.H_polytope(P_ball.H,P_ball.h*D1)
if not directed:
Dball2=pp.H_polytope(P_ball.H,P_ball.h*D2)
if P_ball.type=='AH_polytope':
Dball1=pp.AH_polytope(t=P_ball.t*D1,T=P_ball.T,\
P=pp.H_polytope(P_ball.P.H,P_ball.P.h*D1))
if not directed:
Dball2=pp.AH_polytope(t=P_ball.t*D2,T=P_ball.T,\
P=pp.H_polytope(P_ball.P.H,P_ball.P.h*D2))
X_plus=pp.minkowski_sum(X,Dball1)
pp.subset(prog,Y,X_plus,k=k)
prog.AddLinearCost(np.array([1]),np.array([0]),D1)
if not directed:
Y_plus=pp.minkowski_sum(Y,Dball2)
pp.subset(prog,X,Y_plus,k=k)
prog.AddLinearCost(np.array([1]),np.array([0]),D2)
if solver=="gurobi":
result=gurobi_solver.Solve(prog,None,None)
if result.is_success():
dXY=np.asscalar(result.GetSolution(D1))
if not directed:
dYX=np.asscalar(result.GetSolution(D2))
return max(dXY,dYX),dXY,dYX
else:
return dXY
def distance_polytopes(Q1,Q2,ball="infinity",solver="gurobi"):
"""
Finds the closest two points in two polytopes and their distance.
It is zero if polytopes have non-empty intersection
"""
Q1,Q2=pp.to_AH_polytope(Q1),pp.to_AH_polytope(Q2)
n=Q1.n
prog=MP.MathematicalProgram()
zeta1=prog.NewContinuousVariables(Q1.P.H.shape[1],1,"zeta1")
zeta2=prog.NewContinuousVariables(Q2.P.H.shape[1],1,"zeta2")
delta=prog.NewContinuousVariables(n,1,"delta")
prog.AddLinearConstraint(A=Q1.P.H,ub=Q1.P.h,lb=-np.inf*np.ones((Q1.P.h.shape[0],1)),vars=zeta1)
prog.AddLinearConstraint(A=Q2.P.H,ub=Q2.P.h,lb=-np.inf*np.ones((Q2.P.h.shape[0],1)),vars=zeta2)
prog.AddLinearEqualityConstraint( np.hstack((Q1.T,-Q2.T,np.eye(n))),Q2.t-Q1.t,np.vstack((zeta1,zeta2,delta)) )
if ball=="infinity":
delta_abs=prog.NewContinuousVariables(1,1,"delta_abs")
prog.AddBoundingBoxConstraint(0,np.inf,delta_abs)
prog.AddLinearConstraint(np.greater_equal( np.dot(np.ones((n,1)),delta_abs),delta,dtype='object' ))
prog.AddLinearConstraint(np.greater_equal( np.dot(np.ones((n,1)),delta_abs),-delta,dtype='object' ))
cost=delta_abs
elif ball=="l1":
delta_abs=prog.NewContinuousVariables(n,1,"delta_abs")
prog.AddBoundingBoxConstraint(0,np.inf,delta_abs)
prog.AddLinearConstraint(np.greater_equal( delta_abs,delta,dtype='object' ))
prog.AddLinearConstraint(np.greater_equal( delta_abs,-delta,dtype='object' ))
cost=np.dot(np.ones((1,n)),delta_abs)
else:
raise NotImplementedError
if solver=="gurobi":
prog.AddLinearCost(cost[0,0])
result=gurobi_solver.Solve(prog,None,None)
elif solver=="osqp":
prog.AddQuadraticCost(cost[0,0]*cost[0,0])
result=OSQP_solver.Solve(prog,None,None)
else:
prog.AddLinearCost(cost[0,0])
result=MP.Solve(prog)
if result.is_success():
return np.sum(result.GetSolution(delta_abs)),\
np.dot(Q1.T,result.GetSolution(zeta1).reshape(zeta1.shape[0],1))+Q1.t,\
np.dot(Q2.T,result.GetSolution(zeta2).reshape(zeta2.shape[0],1))+Q2.t
def _setup_program_distance_point(P,ball="infinity",solver="Gurobi"):
"""
Initilize the mathematial program
Choice of balls:
infinity: L-infinity norm
l1: l1 norm (Manhattan Distance)
l2: l2 norm (Euclidean Distance)
"""
if P.distance_program is None:
prog=MP.MathematicalProgram()
Q=pp.to_AH_polytope(P)
n=Q.n
x=np.zeros((n,1))
P.zeta=prog.NewContinuousVariables(Q.P.H.shape[1],1,"zeta")
delta=prog.NewContinuousVariables(n,1,"delta")
prog.AddLinearConstraint(A=Q.P.H,ub=Q.P.h,lb=-np.inf*np.ones((Q.P.h.shape[0],1)),vars=P.zeta)
P.distance_constraint=prog.AddLinearEqualityConstraint( np.hstack((Q.T,-np.eye(n))),x-Q.t,np.vstack((P.zeta,delta)) )
if ball=="infinity":
delta_abs=prog.NewContinuousVariables(1,1,"delta_abs")
prog.AddBoundingBoxConstraint(0,np.inf,delta_abs)
prog.AddLinearConstraint(np.greater_equal( np.dot(np.ones((n,1)),delta_abs),delta,dtype='object' ))
prog.AddLinearConstraint(np.greater_equal( np.dot(np.ones((n,1)),delta_abs),-delta,dtype='object' ))
prog.AddLinearCost(delta_abs[0,0])
elif ball=="l1":
delta_abs=prog.NewContinuousVariables(n,1,"delta_abs")
prog.AddBoundingBoxConstraint(0,np.inf,delta_abs)
prog.AddLinearConstraint(np.greater_equal( delta_abs,delta,dtype='object' ))
prog.AddLinearConstraint(np.greater_equal( delta_abs,-delta,dtype='object' ))
cost=np.dot(np.ones((1,n)),delta_abs)
prog.AddLinearCost(cost[0,0])
elif ball=="l2":
prog.AddQuadraticCost(np.eye(n),np.zeros(n),delta)
else:
print(("Not a valid choice of norm",str(ball)))
raise NotImplementedError
P.distance_program=prog
return
else:
return
def distance_point_polytope(P, x, ball="infinity", solver="Gurobi"):
"""
Computes the distance of point x from AH-polytope Q
"""
x_vector = np.atleast_2d(x) #in case x is not n*1 vector
P = pp.to_AH_polytope(P)
_setup_program_distance_point(P,ball,solver)
prog=P.distance_program
Q=pp.to_AH_polytope(P)
a=P.distance_constraint.evaluator()
x_vector=x_vector.reshape(max(x_vector.shape),1)
a.UpdateCoefficients(np.hstack((Q.T,-np.eye(Q.n))), x_vector - Q.t)
if solver=="Gurobi":
result=gurobi_solver.Solve(prog,None,None)
elif solver=="osqp":
result=OSQP_solver.Solve(prog,None,None)
else:
result=MP.Solve(prog)
if result.is_success():
zeta_num=result.GetSolution(P.zeta).reshape(P.zeta.shape[0],1)
x_nearest=np.dot(Q.T,zeta_num)+Q.t
delta=(x_vector - x_nearest).reshape(Q.n)
if ball=="infinity":
d=np.linalg.norm(delta,ord=np.inf)
elif ball=="l1":
d=np.linalg.norm(delta,ord=1)
elif ball=="l2":
d=np.linalg.norm(delta,ord=2)
return d,x_nearest
def bounding_box(Q,solver="Gurobi"):
r"""
Computes the bounding box of a polytope by solving :math:`2n` linear programs.
Each linear program is in the form:
.. math::
\begin{array}{lll}
l_i= & \min & e_i^T x \\
& \text{subject to} & x \in \mathbb{P}
\end{array}
and
.. math::
\begin{array}{lll}
u_i= & \max & e_i^T x \\
& \text{subject to} & x \in \mathbb{P}
\end{array}
where :math:`l,u` define the lower and upper corners of the bounding box.
"""
Q=pp.to_AH_polytope(Q)
prog=MP.MathematicalProgram()
zeta=prog.NewContinuousVariables(Q.P.H.shape[1],1,"zeta")
x=prog.NewContinuousVariables(Q.n,1,"x")
prog.AddLinearConstraint(A=Q.P.H,ub=Q.P.h,lb=-np.inf*np.ones((Q.P.h.shape[0],1)),vars=zeta)
prog.AddLinearEqualityConstraint(np.hstack((-Q.T,np.eye(Q.n))),Q.t,np.vstack((zeta,x)))
lower_corner=np.zeros((Q.n,1))
upper_corner=np.zeros((Q.n,1))
c=prog.AddLinearCost(np.ones(Q.n),0,x)
if solver=="Gurobi":
solver=gurobi_solver
else:
raise NotImplementedError
a=
|
np.zeros((Q.n,1))
|
numpy.zeros
|
import warnings
import dask.array as dsa
import numpy as np
import xarray as xr
import xarrayutils as xru
from xarrayutils.utils import linear_trend
from cmip6_preprocessing.postprocessing import _match_datasets, exact_attrs
from cmip6_preprocessing.utils import cmip6_dataset_id
def _maybe_unpack_date(date):
"""`Unpacks` cftime from xr.Dataarray if provided"""
# I should probably not do this here but instead in the higher level functions...
if isinstance(date, xr.DataArray):
date = date.data.tolist()
if isinstance(date, list):
if len(date) != 1:
raise RuntimeError(
"The passed date has the wrong format. Got [{date}] after conversion to list."
)
else:
date = date[0]
return date
def _construct_cfdate(data, units, calendar):
# This seems clunky. I feel there must be a more elegant way of doing this?
date = xr.DataArray(data, attrs={"units": units, "calendar": calendar})
return xr.decode_cf(date.to_dataset(name="time"), use_cftime=True).time
def _datestr_to_cftime(date_str, calendar):
# Again I feel this should be more elegant? For now I guess it works
return _construct_cfdate([0], f"days since {date_str}", calendar)
def replace_time(
ds, ref_date=None, ref_idx=0, freq="1MS", calendar=None, time_dim="time"
):
"""This function replaces the time encoding of a dataset acoording to `ref_date`.
The ref date can be any index of ds.time (default is 0; meaning the first timestep of ds will be replaced with `ref_date`).
"""
#! I might be able to achieve some of this with time.shift
# !
if calendar is None:
calendar = ds.time.encoding.get("calendar", "standard")
if ref_date is None:
ref_date = _maybe_unpack_date(ds.time[0])
if isinstance(ref_date, str):
ref_date = _maybe_unpack_date(_datestr_to_cftime(ref_date, calendar))
# TODO: Check the frequency. Currently the logic only works on monthly intervals
if freq != "1MS":
raise ValueError("`replace_time` currently only works with monthly data.")
# determine the start date
# propagate the date back (this assumes stricly monthly data)
year = _maybe_unpack_date(ref_date).year - (ref_idx // 12)
month = _maybe_unpack_date(ref_date).month - (ref_idx % 12)
if month <= 0:
# move the year one more back
year -= 1
month = 12 + month
attrs = ds.time.attrs
start = f"{int(year):04d}-{int(month):02d}"
ds = ds.assign_coords(
time=xr.cftime_range(start, periods=len(ds.time), freq=freq, calendar=calendar)
)
ds.time.attrs = attrs
return ds
def find_date_idx(time, date):
"""Finds the index of `date` within an array of cftime dates. This strictly requires monthly data.
Might result in undesired behavior for other time frequencies.
"""
# ! seems like I can refactor this with http://xarray.pydata.org/en/stable/generated/xarray.CFTimeIndex.get_loc.html#xarray.CFTimeIndex.get_loc
date = _maybe_unpack_date(date)
# easier approach: Find the difference in years and months
year_diff = date.year - _maybe_unpack_date(time[0]).year
month_diff = date.month - _maybe_unpack_date(time[0]).month
return (year_diff * 12) + month_diff
def unify_time(parent, child, adjust_to="child"):
"""Uses the CMIP6 specific metadata (augmented by cmip6_preprocessing....time_preprocessing!!!) to adjust parent time encoding to child experiment.
Similar to `switch_to_child_time`, but sets the time parameters (e.g. calendar) explicitly to the child conventions
"""
branch_time_in_parent = child.attrs.get("branch_time_in_parent")
parent_time_units = child.attrs.get("parent_time_units")
# if branch time is not in attrs do nothing
if branch_time_in_parent is None:
child_source_id = child.attrs.get("source_id", "not found")
parent_source_id = parent.attrs.get("source_id", "not found")
msg = (
f"Could not unify time for [child:{child_source_id}|parent:{parent_source_id}]."
"`branch_time_in_parent` not found in attributes."
)
warnings.warn(msg, UserWarning)
return parent, child
else:
parent_calendar = parent.time.to_index().calendar
child_calendar = child.time.to_index().calendar
branch_time_parent = _construct_cfdate(
child.attrs.get("branch_time_in_parent"),
child.attrs.get("parent_time_units"),
parent_calendar,
)
branch_time_child = _construct_cfdate(
child.attrs.get("branch_time_in_child"),
child.time.encoding.get("units"),
child_calendar,
)
if adjust_to == "child":
branch_idx_parent = find_date_idx(parent.time, branch_time_parent)
return (
replace_time(
parent,
branch_time_child,
ref_idx=branch_idx_parent,
calendar=child_calendar,
),
child,
)
elif adjust_to == "parent":
branch_idx_child = find_date_idx(child.time, branch_time_child)
return parent, replace_time(
child,
branch_time_parent,
ref_idx=branch_idx_child,
calendar=parent_calendar,
)
else:
raise ValueError(
f"Input for `adjust_to` not valid. Got {adjust_to}. Expected either `child` or `parent`."
)
def calculate_drift(
reference, ds, variable, trend_years=250, compute_short_trends=False
):
"""Calculate the linear trend at every grid position for the given time (`trend_years`)
starting from the date when `ds` was branched of from `ds_parent`.
CMIP6 metadata must be present.
Parameters
----------
ds_parent : xr.Dataset
The dataset from which the drift (trend) is calculated. Usually the preindustrial control run
ds : xr.Dataset
The dataset for which the drift is matched. This is usually the historical experiment.
!For many models, each historical member is branched
trend_years : int, optional
The duration of the trend to compute in years, by default 250 (This is the lenght of
historical+standard scenario, e.g. 1850-2100)
"""
for attr in [
"parent_variant_label",
"parent_source_id",
"branch_time_in_parent",
"parent_time_units",
"source_id",
"variant_label",
]:
if not attr in ds.attrs:
raise ValueError(f"Could not find {attr} in attributes of `ds`.")
# Check if the parent member id matches
match_attrs = ["source_id", "variant_label"]
for ma in match_attrs:
if not ds.attrs[f"parent_{ma}"] in reference.attrs[ma]:
raise ValueError(
f'`ds_parent` {ma} ({reference.attrs[ma]}) not compatible with `ds` parent_{ma} ({ds.attrs[f"parent_{ma}"]})'
)
# find the branch date in the control run
branch_time_reference = _construct_cfdate(
ds.attrs["branch_time_in_parent"],
ds.attrs["parent_time_units"],
reference.time.to_index().calendar,
)
branch_idx_reference = find_date_idx(reference.time, branch_time_reference)
# there might be some cases where this is not true. Figure out what to do when it happens.
assert branch_idx_reference >= 0
# cut the referenmce to the appropriate time frame
reference_cut = reference.isel(
time=slice(branch_idx_reference, branch_idx_reference + (12 * trend_years))
)
if len(reference_cut.time) == 0:
raise RuntimeError(
f"Selecting from `reference` according to the branch time resulted in empty dataset. Check the metadata."
)
return None
else:
if len(reference_cut.time) < trend_years * 12:
if compute_short_trends:
warnings.warn(
f"reference dataset does not have the full {trend_years} years to calculate trend. Using {int(len(reference_cut.time)/12)} years only"
)
else:
raise RuntimeError(
f"Reference dataset does not have the full {trend_years} years to calculate trend. Set `calculate_short_trend=True` to compute from a shorter timeseries"
)
time_range = xr.concat(
[
reference_cut.time[0].squeeze().drop_vars("time"),
reference_cut.time[-1].squeeze().drop_vars("time"),
],
dim="bnds",
).reset_coords(drop=True)
# there is some problem when encoding very large years. for now ill preserve these only as
# strings
time_range = time_range.astype(str)
# # The polyfit implementation actually respects the units.
# # For now my implementation requires the slope to be in units .../month
# # I might be able to change this later and accomodate other time frequencies?
# get rid of all the additional coords, which resets the time to an integer index
reference_cut = reference_cut[variable]
# TODO: This has pretty poor performance...need to find out why.
# Reset time dimension to integer index.
# reference_cut = reference_cut.drop_vars("time")
# linear regression slope is all we need here.
# reg = reference_cut.polyfit("time", 1).sel(degree=1).polyfit_coefficients
reg_raw = linear_trend(
reference_cut,
"time",
)
#! quite possibly the shittiest fix ever.
# I changed the API over at xarrayutils and now I have to pay the price over here.
# TODO: Might want to eliminate this ones the new xarrayutils version has matured.
if xru.__version__ > "v0.1.3":
reg = reg_raw.slope
else:
reg = reg_raw.sel(parameter="slope").drop_vars("parameter").squeeze()
# again drop all the coordinates
reg = reg.reset_coords(drop=True)
reg = reg.to_dataset(name=variable)
# add metadata about regression
reg = reg.assign_coords(trend_time_range=time_range)
reg.coords["trend_time_range"].attrs.update(
{
"standard_name": "regression_time_bounds",
"long_name": "regression_time_in_reference_run",
}
)
# reg should carry the attributes of `ds`
# ? Maybe I should convert to a dataset?
reg.attrs.update(ds.attrs)
return reg
# TODO: I need a more generalized detrending? Based on indicies --> xarrayutils
# Then refactor this one here just for cmip6
def detrend_basic(da, da_slope, start_idx=0, dim="time", keep_attrs=True):
"""Basic detrending just based on time index, not date"""
# now create a trend timeseries at each point
# and the time indicies by the ref index. This way the trend is correctly calculated from the reference year.
## this adapts the chunk structure from the input if its a dask array
attrs = {k: v for k, v in da.attrs.items()}
idx_start = -start_idx
idx_stop = len(da.time) - start_idx
if isinstance(da.data, dsa.Array):
ref_time = da.isel({di: 0 for di in da.dims if di != dim})
chunks = ref_time.chunks
trend_time_idx_data = dsa.arange(
idx_start, idx_stop, chunks=chunks, dtype=da.dtype
)
else:
trend_time_idx_data =
|
np.arange(idx_start, idx_stop, dtype=da.dtype)
|
numpy.arange
|
'''
Tetris Battle
originally Ccreated by <NAME>, <NAME>
modified by <NAME>
this is a similar version to the ever popular tetris battle
game with not many changes"
'''
#basic modules needed for game to run
import os
import pygame
import random
import numpy as np
from .settings import *
from copy import deepcopy
import time as t
from collections import Counter
def put_block_in_grid(grid, block, px, py):
feasibles = block.return_pos_color(px, py)
for x, y, c in feasibles:
'''
TODO: y boundary
'''
if -1 < x < GRID_WIDTH and -1 < y < len(grid[0]):
grid[x][y] = c
def collide(grid, block, px, py):
feasibles = block.get_feasible()
# print(px)
# print(block)
# excess = len(grid[0]) - GRID_DEPTH
for pos in feasibles:
# print(px + pos[0], py + pos[1])
if px + pos[0] > GRID_WIDTH - 1: # right
return True
if px + pos[0] < 0: # left
return True
if py + pos[1] > len(grid[0]) - 1: # down
return True
if py + pos[1] < 0: # up
continue
if grid[px + pos[0]][py + pos[1]] > 0:
# print(px, py)
# print(px + pos[0], py + pos[1])
# print("Touch")
return True
return False
# collidedown function
# for i in range 4(y position)
# if px+y=20 then collidedown =true
# used for move down and rotation collisions
def collideDown(grid, block, px, py):
return collide(grid, block, px, py + 1)
# collideleft function
# for i in range 4(x positions)
# if blockx +x =0 then collide left = True
# used for moving block and rotation collision
def collideLeft(grid, block, px, py):
return collide(grid, block, px - 1, py)
# collideright function
# for i in range 4(x positions)
# if blockx +x +1>9 then collide left = True
# plus 1 is there cuz pxis on left of the piece
# used for moving block and rotation collision
def collideRight(grid, block, px, py):
return collide(grid, block, px + 1, py)
# rotatecollision function
# when respective rotate buttons are pressed
# this function checks if collide(left right or down has occured)
# if it hasnt then rotation occurs
def rotateCollide(grid, block, px, py):
feasibles = block.get_feasible()
left_most = 100
right_most = 0
up_most = 100
down_most = 0
for pos in feasibles:
right_most = max(right_most, pos[0])
left_most = min(left_most, pos[0])
down_most = max(down_most, pos[1])
up_most = min(up_most, pos[1])
c = Counter()
# print(px)
# print(block)
excess = len(grid[0]) - GRID_DEPTH
for pos in feasibles:
# print(px + pos[0], py + pos[1])
if px + pos[0] > 9: # right
c.update({"right": 1})
if px + pos[0] < 0: # left
c.update({"left": 1})
if py + pos[1] > len(grid[0]) - 1: # down
c.update({"down": 1})
# if py + pos[1] < excess: # up
# c.update({"up": 1})
if 0 <= px + pos[0] <= 9 and excess <= py + pos[1] <= len(grid[0]) - 1:
if grid[px + pos[0]][py + pos[1]] > 0:
if pos[0] == left_most:
c.update({"left": 1})
elif pos[0] == right_most:
c.update({"right": 1})
elif pos[1] == down_most:
c.update({"down": 1})
# elif pos[1] == up_most:
# c.update({"up": 1})
# print(c)
if len(c) == 0:
return False
else:
return c.most_common()[0][0]
#this function checks if a tspin has occured
#checks all possible tspin positions
#then spins the t piece into the spot
def tspinCheck(grid, block, px, py):
if collideDown(grid, block, px, py) == True:
if block.block_type() == 'T':
if px + 2 < GRID_WIDTH and py + 3 < len(grid[0]):
if grid[px][py + 1] > 0 and grid[px][py + 3] > 0 and grid[px + 2][py + 3] > 0:
return True
elif grid[px][py + 3] > 0 and grid[px + 2][py + 3] > 0 and grid[px + 2][py + 1] > 0:
return True
return False
# this function rotates the piece
# when rotation button is hit the next grid in the piece list becomes the piece
def rotate(grid, block, px, py, _dir=1):
# print(grid)
block.rotate(_dir)
# b = block.now_block()
collision = rotateCollide(grid, block, px, py) # checks for collisions
# print(collision)
find = 0
if collision == "left":
y_list = [0, 1, -1]
for s_x in range(0, 3):
for s_y in y_list:
if not find and not collide(grid, block, px + s_x, py + s_y):
px += s_x
py += s_y
find = 1
elif collision == "right":
y_list = [0, 1, -1]
for s_x in reversed(range(-2, 0)):
for s_y in y_list:
if not find and not collide(grid, block, px + s_x, py + s_y):
px += s_x
py += s_y
find = 1
elif collision == "down":
# y_list = [-1, -2]
x_list = [0, -1, 1, -2, 2]
for s_y in reversed(range(-1, 0)):
for s_x in x_list:
if not find and not collide(grid, block, px + s_x, py + s_y):
px += s_x
py += s_y
find = 1
elif collision == "up":
x_list = [0, -1, 1, -2, 2]
for s_y in range(1, 2):
for s_x in x_list:
if not find and not collide(grid, block, px + s_x, py + s_y):
px += s_x
py += s_y
find = 1
if collision != False and not find:
block.rotate(- _dir)
# print(collision)
tspin = 0
if tspinCheck(grid, block, px, py) == True:
tspin = 1
print("Tspin rotate")
# return [block, px, py, tspin]
return block, px, py, tspin
# this function drops the piece as far as it can go until
# it collides with a piece below it
def hardDrop(grid, block, px, py):
y = 0
x = 0
if collideDown(grid, block, px, py) == False:
x = 1
if x == 1:
while True:
py += 1
y += 1
if collideDown(grid, block, px, py) == True:
break
return y
# this function enables you to hold a piece
def hold(block, held, _buffer):
# when piece is held the block at pos[0]
# in the nextlist becomes the newpiece
if held == None:
held = block
block = _buffer.new_block()
# the piece switches with the held piece
else:
block, held = held, block
return [block, held]
def freeze(last_time):
start = t.time()
while t.time() - start < last_time:
pass
def get_infos(board):
# board is equal to grid
# borrow from https://github.com/scuriosity/machine-learning-tetris/blob/master/tetris.py
# This function will calculate different parameters of the current board
# Initialize some stuff
heights = [0] * len(board)
diffs = [0] * (len(board) - 1)
holes = 0
diff_sum = 0
# Calculate the maximum height of each column
for i in range(0, len(board)): # Select a column
for j in range(0, len(board[0])): # Search down starting from the top of the board
if int(board[i][j]) > 0: # Is the cell occupied?
heights[i] = len(board[0]) - j # Store the height value
break
# Calculate the difference in heights
for i in range(0, len(diffs)):
diffs[i] = heights[i + 1] - heights[i]
# Calculate the maximum height
max_height = max(heights)
# Count the number of holes
for i in range(0, len(board)):
occupied = 0 # Set the 'Occupied' flag to 0 for each new column
for j in range(0, len(board[0])): # Scan from top to bottom
if int(board[i][j]) > 0:
occupied = 1 # If a block is found, set the 'Occupied' flag to 1
if int(board[i][j]) == 0 and occupied == 1:
holes += 1 # If a hole is found, add one to the count
height_sum = sum(heights)
for i in diffs:
diff_sum += abs(i)
return height_sum, diff_sum, max_height, holes
class Piece(object):
def __init__(self, _type, possible_shapes):
self._type = _type
self.possible_shapes = possible_shapes
self.current_shape_id = 0
def block_type(self):
return self._type
def reset(self):
self.current_shape_id = 0
def return_pos_color(self, px, py):
feasibles = []
block = self.now_block()
for x in range(BLOCK_WIDTH):
for y in range(BLOCK_LENGTH):
if block[x][y] > 0:
feasibles.append([px + x, py + y, block[x][y]])
return feasibles
def return_pos(self, px, py):
feasibles = []
block = self.now_block()
for x in range(BLOCK_WIDTH):
for y in range(BLOCK_LENGTH):
if block[x][y] > 0:
feasibles.append([px + x, py + y])
return feasibles
def get_feasible(self):
feasibles = []
b = self.now_block()
for x in range(BLOCK_WIDTH):
for y in range(BLOCK_LENGTH):
if b[x][y] > 0:
feasibles.append([x, y])
return feasibles
def now_block(self):
return self.possible_shapes[self.current_shape_id]
# def move_right(self, unit=1):
# self.px += unit
# def move_left(self, unit=1):
# self.px -= unit
# def move_up(self, unit=1):
# self.py -= unit
# def move_down(self, unit=1):
# self.py += unit
def rotate(self, _dir=1):
self.current_shape_id += _dir
self.current_shape_id %= len(self.possible_shapes)
class Buffer(object):
'''
Stores the coming pieces, every 7 pieces in a group.
'''
def __init__(self):
self.now_list = []
self.next_list = []
self.fill(self.now_list)
self.fill(self.next_list)
'''
make sure "now list" are filled
now list next list
next piece <- [ ] <- [ ]
'''
def new_block(self):
out = self.now_list.pop(0)
self.now_list.append(self.next_list.pop(0))
if len(self.next_list) == 0:
self.fill(self.next_list)
return out
def fill(self, _list):
pieces_keys = deepcopy(POSSIBLE_KEYS)
random.shuffle(pieces_keys)
for key in pieces_keys:
_list.append(Piece(key, PIECES_DICT[key]))
'''
class for player
'''
class Player(object):
def __init__(self, info_dict):
self._id = info_dict.get("id")
self._drop = info_dict.get("drop")
self._hold = info_dict.get("hold")
self._rotate_right = info_dict.get("rotate_right")
self._rotate_left = info_dict.get("rotate_left")
self._down = info_dict.get("down")
self._left = info_dict.get("left")
self._right = info_dict.get("right")
@property
def id(self):
return self._id
@property
def drop(self):
return self._drop
@property
def hold(self):
return self._hold
@property
def rotate_right(self):
return self._rotate_right
@property
def rotate_left(self):
return self._rotate_left
@property
def down(self):
return self._down
@property
def left(self):
return self._left
@property
def right(self):
return self._right
'''
class Judge
'''
class Judge(object):
@staticmethod
def check_ko_win(tetris, max_ko):
if tetris.KO >= max_ko:
return 1
return 0
@staticmethod
def who_win(tetris_1, tetris_2):
if tetris_2.KO > tetris_1.KO: # Checks who is the winner of the game
return tetris_2.get_id() # a is screebn.copy,endgame ends the game,2 is player 2 wins
if tetris_1.KO > tetris_2.KO:
return tetris_1.get_id() # a is screebn.copy,endgame ends the game,1 is player 1 wins
if tetris_1.KO == tetris_2.KO:
if tetris_2.sent > tetris_1.sent:
return tetris_2.get_id() # a is screebn.copy,endgame ends the game,2 is player 2 wins
elif tetris_1.sent > tetris_2.sent:
return tetris_1.get_id() # a is screebn.copy,endgame ends the game,1 is player 1 wins
elif tetris_1.get_maximum_height() > tetris_2.get_maximum_height():
return tetris_2.get_id()
elif tetris_2.get_maximum_height() > tetris_1.get_maximum_height():
return tetris_1.get_id()
else:
return tetris_1.get_id() # no UI of draw
class Tetris(object):
def __init__(self, player, gridchoice):
if gridchoice == "none":
self.o_grid = [[0] * GRID_DEPTH for i in range(GRID_WIDTH)]
if gridchoice == "classic":
self.o_grid = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
if gridchoice == "comboking":
self.o_grid = [[0, 0, 0, 0, 0, 5, 5, 5, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[0, 0, 0, 0, 0, 6, 6, 6, 4, 5, 5, 6, 6, 6, 6, 6, 6, 6, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5],
[0, 0, 0, 0, 0, 6, 6, 6, 4, 5, 5, 6, 6, 6, 6, 6, 6, 6, 4, 5],
[0, 0, 0, 0, 0, 5, 5, 5, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]]
if gridchoice == "lunchbox":
self.o_grid = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 5, 5, 5, 5, 5, 5, 5, 5, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 5, 2, 2, 2, 2, 2, 2, 5, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 5, 2, 4, 4, 4, 4, 2, 5, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 5, 2, 4, 4, 4, 4, 2, 5, 6],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 5, 2, 2, 2, 2, 2, 2, 5, 6],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 5, 5, 5, 5, 5, 5, 5, 5, 6],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6]]
self.player = player
self.reset()
def reset(self):
self.grid = deepcopy(self.o_grid)
self.oldko = 0 # these two used to keep track of ko's
self._n_used_block = 1
self.buffer = Buffer()
# list of the held piece
self.held = None
self.block = self.buffer.new_block()
# amount of lines sent for p1 and p2
self.sent = 0
self.tempsend = 0 # tempsending for p1 and p2
self.oldcombo = self.combo = -1 # used for checking comboas
self.tspin = 0 # for t spin
self.now_back2back = 0
self.pre_back2back = 0
self.tetris = 0
#for "KO"
self._KO = 0
self._attacked = 0
self._is_fallen = 0
self.px = 4
self.py = -2
# DEFINING VARIABLES
self.cleared = 0
self.kocounter = 0
self.stopcounter = 0
self.isholded = 0
self.pressedRight = False
self.pressedLeft = False
self.pressedDown = False
self.LAST_ROTATE_TIME = 0
self.LAST_MOVE_SHIFT_TIME = 0
self.LAST_MOVE_DOWN_TIME = 0
self.LAST_COMBO_DRAW_TIME = 0
self.LAST_TETRIS_DRAW_TIME = 0
self.LAST_TSPIN_DRAW_TIME = 0
self.LAST_BACK2BACK_DRAW_TIME = 0
self.LAST_NATRUAL_FALL_TIME = 0
self.LAST_FALL_DOWN_TIME = 0
self.tetris_drawing = 0
self.tspin_drawing = 0
self.back2back_drawing = 0
self.combo_counter = 0
self.natural_down_counter = 0
def increment_timer(self):
self.LAST_ROTATE_TIME += 1
self.LAST_MOVE_SHIFT_TIME += 1
self.LAST_MOVE_DOWN_TIME += 1
self.LAST_COMBO_DRAW_TIME += 1
self.LAST_TETRIS_DRAW_TIME += 1
self.LAST_TSPIN_DRAW_TIME += 1
self.LAST_BACK2BACK_DRAW_TIME += 1
self.LAST_NATRUAL_FALL_TIME += 1
self.LAST_FALL_DOWN_TIME += 1
@property
def is_fallen(self):
return self._is_fallen
@property
def n_used_block(self):
return self._n_used_block
@property
def KO(self):
return self._KO
@property
def attacked(self):
return self._attacked
def get_grid(self):
excess = len(self.grid[0]) - GRID_DEPTH
return_grids = np.zeros(shape=(GRID_WIDTH, GRID_DEPTH), dtype=np.float32)
block, px, py = self.block, self.px, self.py
excess = len(self.grid[0]) - GRID_DEPTH
b = block.now_block()
for i in range(len(self.grid)):
return_grids[i] =
|
np.array(self.grid[i][excess:GRID_DEPTH], dtype=np.float32)
|
numpy.array
|
from __future__ import print_function
# Import smorgasbord
import sys
import os
import pdb
current_module = sys.modules[__name__]
import numpy as np
import scipy.stats
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import astropy.io.fits
import astropy.wcs
import astropy.convolution
import ChrisFuncs
# Function to sum all elements in an ellipse centred on the middle of a given array
# Args: Array, semi-major axis (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
# Returns: Numpy array containing the sum of the pixel values in the ellipse, total number of pixels counted, and an array containing the pixel values
def EllipseSum(array, rad, axial_ratio, angle, i_centre, j_centre):
# Create slice of input array, containing only the region of interest
i_cutout_min = int(np.floor(max([0, i_centre-rad])))
i_cutout_max = int(np.ceil(min([(array.shape)[0], i_centre+rad])))
j_cutout_min = int(np.floor(max([0, j_centre-rad])))
j_cutout_max = int(np.ceil(min([(array.shape)[1], j_centre+rad])))
array_slice = array[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
i_centre_slice = i_centre - i_cutout_min
j_centre_slice = j_centre - j_cutout_min
if array[int(i_centre),int(j_centre)]!=array_slice[int(i_centre_slice),int(j_centre_slice)]:
if np.isnan(array[int(i_centre),int(j_centre)]==False) and np.isnan(array_slice[int(i_centre_slice),int(j_centre_slice)]==False):
print('SEVERE ERROR: EllipseSum check failed.')
pdb.set_trace()
else:
array = array_slice
i_centre = i_centre_slice
j_centre = j_centre_slice
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj = float(rad)
semi_min = float(rad) / float(axial_ratio)
angle = np.radians(float(angle))
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, array.shape[0]-1, array.shape[0])
j_linespace = np.linspace(0, array.shape[1]-1, array.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create array identifying which coordinates lie within ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check = (j_trans**2 / semi_maj**2) + (i_trans**2 / semi_min**2 )
# Calculate flux & pixels in aperture, and store pixel values
ellipse_where = np.where( (ellipse_check<=1) & (
|
np.isnan(array)
|
numpy.isnan
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test numpy functions and ufuncs on Masked arrays and quantities.
The tests here are fairly detailed but do not aim for complete
coverage. Complete coverage of all numpy functions is done
with less detailed tests in test_function_helpers.
"""
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units import Quantity
from astropy.utils.masked.core import Masked
from .test_masked import (MaskedArraySetup, QuantitySetup, LongitudeSetup,
assert_masked_equal)
class MaskedUfuncTests(MaskedArraySetup):
@pytest.mark.parametrize('ufunc', (np.add, np.subtract, np.divide,
np.arctan2, np.minimum))
def test_2op_ufunc(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
expected_data = ufunc(self.a, self.b)
expected_mask = (self.ma.mask | self.mb.mask)
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
@pytest.mark.parametrize('ufunc', (np.add, np.subtract, np.divide,
np.arctan2, np.minimum))
def test_ufunc_inplace(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
out = Masked(np.zeros_like(ma_mb.unmasked))
result = ufunc(self.ma, self.mb, out=out)
assert result is out
assert_masked_equal(result, ma_mb)
def test_ufunc_inplace_no_masked_input(self):
a_b = np.add(self.a, self.b)
out = Masked(np.zeros_like(a_b))
result = np.add(self.a, self.b, out=out)
assert result is out
assert_array_equal(result.unmasked, a_b)
assert_array_equal(result.mask, np.zeros(a_b.shape, bool))
def test_ufunc_inplace_error(self):
out = np.zeros(self.ma.shape)
with pytest.raises(TypeError):
np.add(self.ma, self.mb, out=out)
@pytest.mark.parametrize('ufunc', (np.add.outer, np.minimum.outer))
def test_2op_ufunc_outer(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
expected_data = ufunc(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a, self.mask_b)
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
def test_3op_ufunc(self):
ma_mb = np.clip(self.ma, self.b, self.c)
expected_data = np.clip(self.a, self.b, self.c)
expected_mask = self.mask_a
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_add_reduce(self, axis):
ma_reduce = np.add.reduce(self.ma, axis=axis)
expected_data = np.add.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
out = Masked(np.zeros_like(ma_reduce.unmasked),
np.ones_like(ma_reduce.mask))
ma_reduce2 = np.add.reduce(self.ma, axis=axis, out=out)
assert ma_reduce2 is out
assert_masked_equal(ma_reduce2, ma_reduce)
def test_add_reduce_no_masked_input(self):
a_reduce = np.add.reduce(self.a, axis=0)
out = Masked(np.zeros_like(a_reduce), np.ones(a_reduce.shape, bool))
result = np.add.reduce(self.a, axis=0, out=out)
assert result is out
assert_array_equal(out.unmasked, a_reduce)
assert_array_equal(out.mask, np.zeros(a_reduce.shape, bool))
@pytest.mark.parametrize('axis', (0, 1, None))
def test_minimum_reduce(self, axis):
ma_reduce = np.minimum.reduce(self.ma, axis=axis)
expected_data = np.minimum.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_maximum_reduce(self, axis):
ma_reduce = np.maximum.reduce(self.ma, axis=axis)
expected_data = np.maximum.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
class TestMaskedArrayUfuncs(MaskedUfuncTests):
# multiply.reduce does not work with units, so test only for plain array.
@pytest.mark.parametrize('axis', (0, 1, None))
def test_multiply_reduce(self, axis):
ma_reduce = np.multiply.reduce(self.ma, axis=axis)
expected_data = np.multiply.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
def test_ufunc_not_implemented_for_other(self):
"""
If the unmasked operation returns NotImplemented, this
should lead to a TypeError also for the masked version.
"""
a = np.array([1, 2])
b = 3 * u.m
with pytest.raises(TypeError):
a & b
ma = Masked(a)
with pytest.raises(TypeError):
ma & b
class TestMaskedQuantityUfuncs(MaskedUfuncTests, QuantitySetup):
def test_ufunc_inplace_error2(self):
out = Masked(np.zeros(self.ma.shape))
with pytest.raises(TypeError):
np.add(self.ma, self.mb, out=out)
class TestMaskedLongitudeUfuncs(MaskedUfuncTests, LongitudeSetup):
def test_ufunc_inplace_quantity_initial(self):
out = Masked(np.zeros(self.ma.shape) << u.m)
result = np.add(self.ma, self.mb, out=out)
assert result is out
expected = np.add(self.ma, self.mb).view(Quantity)
assert_masked_equal(result, expected)
class TestMaskedArrayConcatenation(MaskedArraySetup):
def test_concatenate(self):
mb = self.mb[np.newaxis]
concat_a_b = np.concatenate((self.ma, mb), axis=0)
expected_data = np.concatenate((self.a, self.b[np.newaxis]), axis=0)
expected_mask = np.concatenate((self.mask_a, self.mask_b[np.newaxis]),
axis=0)
assert_array_equal(concat_a_b.unmasked, expected_data)
assert_array_equal(concat_a_b.mask, expected_mask)
def test_concatenate_not_all_masked(self):
mb = self.mb[np.newaxis]
concat_a_b = np.concatenate((self.a, mb), axis=0)
expected_data = np.concatenate((self.a, self.b[np.newaxis]), axis=0)
expected_mask = np.concatenate((np.zeros(self.a.shape, bool),
self.mask_b[np.newaxis]), axis=0)
assert_array_equal(concat_a_b.unmasked, expected_data)
assert_array_equal(concat_a_b.mask, expected_mask)
@pytest.mark.parametrize('obj', (1, slice(2, 3)))
def test_insert(self, obj):
mc_in_a = np.insert(self.ma, obj, self.mc, axis=-1)
expected = Masked(np.insert(self.a, obj, self.c, axis=-1),
np.insert(self.mask_a, obj, self.mask_c, axis=-1))
assert_masked_equal(mc_in_a, expected)
def test_insert_masked_obj(self):
with pytest.raises(TypeError):
np.insert(self.ma, Masked(1, mask=False), self.mc, axis=-1)
def test_append(self):
mc_to_a = np.append(self.ma, self.mc, axis=-1)
expected = Masked(np.append(self.a, self.c, axis=-1),
np.append(self.mask_a, self.mask_c, axis=-1))
assert_masked_equal(mc_to_a, expected)
class TestMaskedQuantityConcatenation(TestMaskedArrayConcatenation,
QuantitySetup):
pass
class TestMaskedLongitudeConcatenation(TestMaskedArrayConcatenation,
LongitudeSetup):
pass
class TestMaskedArrayBroadcast(MaskedArraySetup):
def test_broadcast_to(self):
shape = self.ma.shape
ba = np.broadcast_to(self.mb, shape, subok=True)
assert ba.shape == shape
assert ba.mask.shape == shape
expected = Masked(np.broadcast_to(self.mb.unmasked, shape, subok=True),
np.broadcast_to(self.mb.mask, shape, subok=True))
assert_masked_equal(ba, expected)
def test_broadcast_to_using_apply(self):
# Partially just to ensure we cover the relevant part of _apply.
shape = self.ma.shape
ba = self.mb._apply(np.broadcast_to, shape=shape, subok=True)
assert ba.shape == shape
assert ba.mask.shape == shape
expected = Masked(np.broadcast_to(self.mb.unmasked, shape, subok=True),
np.broadcast_to(self.mb.mask, shape, subok=True))
assert_masked_equal(ba, expected)
def test_broadcast_arrays(self):
mb = np.broadcast_arrays(self.ma, self.mb, self.mc, subok=True)
b = np.broadcast_arrays(self.a, self.b, self.c, subok=True)
bm = np.broadcast_arrays(self.mask_a, self.mask_b, self.mask_c)
for mb_, b_, bm_ in zip(mb, b, bm):
assert_array_equal(mb_.unmasked, b_)
assert_array_equal(mb_.mask, bm_)
def test_broadcast_arrays_not_all_masked(self):
mb = np.broadcast_arrays(self.a, self.mb, self.c, subok=True)
assert_array_equal(mb[0], self.a)
expected1 = np.broadcast_to(self.mb, self.a.shape, subok=True)
assert_masked_equal(mb[1], expected1)
expected2 = np.broadcast_to(self.c, self.a.shape, subok=True)
assert_array_equal(mb[2], expected2)
def test_broadcast_arrays_subok_false(self):
# subok affects ndarray subclasses but not masking itself.
mb = np.broadcast_arrays(self.ma, self.mb, self.mc, subok=False)
assert all(type(mb_.unmasked) is np.ndarray for mb_ in mb)
b = np.broadcast_arrays(self.a, self.b, self.c, subok=False)
mask_b = np.broadcast_arrays(self.mask_a, self.mask_b,
self.mask_c, subok=False)
for mb_, b_, mask_ in zip(mb, b, mask_b):
assert_array_equal(mb_.unmasked, b_)
assert_array_equal(mb_.mask, mask_)
class TestMaskedQuantityBroadcast(TestMaskedArrayBroadcast, QuantitySetup):
pass
class TestMaskedLongitudeBroadcast(TestMaskedArrayBroadcast, LongitudeSetup):
pass
class TestMaskedArrayCalculation(MaskedArraySetup):
@pytest.mark.parametrize('n,axis', [(1, -1), (2, -1), (1, 0)])
def test_diff(self, n, axis):
mda = np.diff(self.ma, n=n, axis=axis)
expected_data = np.diff(self.a, n, axis)
nan_mask = np.zeros_like(self.a)
nan_mask[self.ma.mask] = np.nan
expected_mask = np.isnan(np.diff(nan_mask, n=n, axis=axis))
assert_array_equal(mda.unmasked, expected_data)
assert_array_equal(mda.mask, expected_mask)
def test_diff_explicit(self):
ma = Masked(np.arange(8.),
[True, False, False, False, False, True, False, False])
mda = np.diff(ma)
assert np.all(mda.unmasked == 1.)
assert np.all(mda.mask ==
[True, False, False, False, True, True, False])
mda = np.diff(ma, n=2)
assert np.all(mda.unmasked == 0.)
assert np.all(mda.mask == [True, False, False, True, True, True])
class TestMaskedQuantityCalculation(TestMaskedArrayCalculation, QuantitySetup):
pass
class TestMaskedLongitudeCalculation(TestMaskedArrayCalculation,
LongitudeSetup):
pass
class TestMaskedArraySorting(MaskedArraySetup):
@pytest.mark.parametrize('axis', [-1, 0])
def test_lexsort1(self, axis):
ma_lexsort =
|
np.lexsort((self.ma,), axis=axis)
|
numpy.lexsort
|
#
import numpy as np
import matplotlib.pyplot as plt
class Chp003C002(object):
def __init__(self):
self.name = ''
def run(self):
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.title('牛顿法求解示意')
self.draw_curve()
self.draw_x0()
self.draw_x1()
plt.show()
def draw_curve(self):
# 绘制x轴
xx = np.array([0.0, 5.0])
xy = np.array([0.0, 0.0])
plt.plot(xx, xy, '-g')
# 绘制曲线
x = np.linspace(0, 5, 100)
y = x*x - 2.25
plt.plot(x, y, '-b')
plt.annotate(s=r'目标(1.5)',xy=(1.5, 0.0),\
xytext=(1.0,2.0),weight='bold',color='black',\
arrowprops=dict(arrowstyle='-|>',\
connectionstyle='arc3',color='blue'),\
bbox=dict(boxstyle='round,pad=0.5', fc='white', \
ec='k',lw=1 ,alpha=0.4))
def draw_x0(self):
# x0点垂线
x00x = np.array([4.5, 4.5])
x00y =
|
np.array([0.0, 4.5*4.5-2.25])
|
numpy.array
|
import unittest
import tensorflow as tf
import numpy as np
from .segment import segment_topk
class TestSegmentTopK(unittest.TestCase):
def test_top1(self):
sess = tf.InteractiveSession()
try:
preds = tf.constant(np.array([
.3,
.7,
.33,
.33,
.66,
1.,
]))
sample_ids = tf.constant(np.array([
0,
0,
1,
1,
1,
2,
]))
new_preds, idxs, new_ids = segment_topk(preds, sample_ids, 1, 'top1')
fetches = [new_preds, idxs, new_ids]
fetches = sess.run(fetches=fetches)
actual_preds = fetches[0]
actual_idxs = fetches[1]
actual_new_ids = fetches[2]
expected_preds = np.array([.7, .66, 1.])
self.assertTrue(np.allclose(actual_preds, expected_preds),
'expected {} got {}'.format(expected_preds, actual_preds))
expected_idxs = np.array([1, 4, 5])
self.assertTrue(np.all(actual_idxs == expected_idxs),
'expected {} got {}'.format(expected_idxs, actual_idxs))
expected_ids = np.array([0, 1, 2])
self.assertTrue(np.all(expected_ids == actual_new_ids),
'expected {} got {}'.format(expected_ids, actual_new_ids))
finally:
sess.close()
def test_top2(self):
sess = tf.InteractiveSession()
try:
preds = tf.constant(np.array([
.3,
.7,
.33,
.33,
.66,
1.,
]))
sample_ids = tf.constant(np.array([
0,
0,
1,
1,
1,
2,
]))
new_preds, idxs, new_ids = segment_topk(preds, sample_ids, 2, 'top2')
fetches = [new_preds, idxs, new_ids]
fetches = sess.run(fetches=fetches)
actual_preds = fetches[0]
actual_idxs = fetches[1]
actual_new_ids = fetches[2]
expected_preds = np.array([.7, .3, .66, .33, 1.])
self.assertTrue(np.allclose(actual_preds, expected_preds),
'expected {} got {}'.format(expected_preds, actual_preds))
expected_idxs = np.array([1, 0, 4, 2, 5])
self.assertTrue(np.all(actual_idxs == expected_idxs),
'expected {} got {}'.format(expected_idxs, actual_idxs))
expected_ids = np.array([0, 0, 1, 1, 2])
self.assertTrue(np.all(expected_ids == actual_new_ids),
'expected {} got {}'.format(expected_ids, actual_new_ids))
finally:
sess.close()
def test_topEmpty(self):
sess = tf.InteractiveSession()
try:
preds = tf.placeholder_with_default(
np.empty((0,), dtype=np.float64), shape=[None],
)
sample_ids = tf.placeholder_with_default(
|
np.empty((0,), dtype=np.int64)
|
numpy.empty
|
import numpy as np
import util
from linear_model import LinearModel
def main(train_path, eval_path, pred_path):
"""Problem 1(e): Gaussian discriminant analysis (GDA)
Args:
train_path: Path to CSV file containing dataset for training.
eval_path: Path to CSV file containing dataset for evaluation.
pred_path: Path to save predictions.
"""
# Load dataset
x_train, y_train = util.load_dataset(train_path, add_intercept=False)
# *** START CODE HERE ***
# Train GDA
model = GDA()
model.fit(x_train, y_train)
# Plot data and decision boundary
util.plot(x_train, y_train, model.theta, 'output/p01e_{}.png'.format(pred_path[-5]))
# Save predictions
x_eval, y_eval = util.load_dataset(eval_path, add_intercept=True)
y_pred = model.predict(x_eval)
|
np.savetxt(pred_path, y_pred > 0.5, fmt='%d')
|
numpy.savetxt
|
import imutils
import numpy as np
import cv2
lower =
|
np.array([0,20,30], dtype="uint8")
|
numpy.array
|
import sys
import datetime as dt
import pytest
import numpy as np
# Construction
class D:
def __index__(self) -> int:
return 0
class C:
def __complex__(self) -> complex:
return 3j
class B:
def __int__(self) -> int:
return 4
class A:
def __float__(self) -> float:
return 4.0
np.complex64(3j)
np.complex64(A())
np.complex64(C())
np.complex128(3j)
np.complex128(C())
np.complex128(None)
np.complex64("1.2")
np.complex128(b"2j")
np.int8(4)
np.int16(3.4)
np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
np.int32("1")
np.int64(b"2")
np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
np.float32("1")
np.float16(b"2.5")
if sys.version_info >= (3, 8):
np.uint64(D())
np.float32(D())
np.complex64(D())
np.bytes_(b"hello")
np.bytes_("hello", 'utf-8')
np.bytes_("hello", encoding='utf-8')
np.str_("hello")
np.str_(b"hello", 'utf-8')
np.str_(b"hello", encoding='utf-8')
# Array-ish semantics
np.int8().real
np.int16().imag
np.int32().data
np.int64().flags
np.uint8().itemsize * 2
np.uint16().ndim + 1
np.uint32().strides
np.uint64().shape
# Time structures
np.datetime64()
np.datetime64(0, "D")
np.datetime64(0, b"D")
np.datetime64(0, ('ms', 3))
np.datetime64("2019")
np.datetime64(b"2019")
np.datetime64("2019", "D")
np.datetime64(np.datetime64())
np.datetime64(dt.datetime(2000, 5, 3))
np.datetime64(dt.date(2000, 5, 3))
np.datetime64(None)
np.datetime64(None, "D")
np.timedelta64()
np.timedelta64(0)
np.timedelta64(0, "D")
np.timedelta64(0, ('ms', 3))
np.timedelta64(0, b"D")
np.timedelta64("3")
np.timedelta64(b"5")
np.timedelta64(np.timedelta64(2))
np.timedelta64(dt.timedelta(2))
np.timedelta64(None)
np.timedelta64(None, "D")
np.void(1)
np.void(np.int64(1))
np.void(True)
np.void(np.bool_(True))
np.void(b"test")
np.void(np.bytes_("test"))
# Protocols
i8 = np.int64()
u8 = np.uint64()
f8 = np.float64()
c16 = np.complex128()
b_ = np.bool_()
td = np.timedelta64()
U = np.str_("1")
S = np.bytes_("1")
AR = np.array(1, dtype=np.float64)
int(i8)
int(u8)
int(f8)
int(b_)
int(td)
int(U)
int(S)
int(AR)
with pytest.warns(np.ComplexWarning):
int(c16)
float(i8)
float(u8)
float(f8)
float(b_)
float(td)
float(U)
float(S)
float(AR)
with pytest.warns(np.ComplexWarning):
float(c16)
complex(i8)
complex(u8)
complex(f8)
complex(c16)
complex(b_)
complex(td)
complex(U)
complex(AR)
# Misc
c16.dtype
c16.real
c16.imag
c16.real.real
c16.real.imag
c16.ndim
c16.size
c16.itemsize
c16.shape
c16.strides
c16.squeeze()
c16.byteswap()
c16.transpose()
# Aliases
np.str0()
np.bool8()
np.bytes0()
np.string_()
np.object0()
np.void0(0)
np.byte()
np.short()
|
np.intc()
|
numpy.intc
|
# tools for calculating/comparing the area under the receiver operator characteristic curve
import numpy as np
from scipy.stats import norm
import scipy as sp
# used to calculate exact AUROC (factoring in ties)
from sklearn import metrics
def calc_auc(pred, target, with_ci=False, alpha=0.05):
# calculate the AUROC given one prediction or a set of predictions
# returns a float if only one set of predictions given
# returns a tuple if multiple predictions are given
if type(pred) == 'list':
pred = np.asarray(pred,dtype=float)
if type(target) == 'list':
target = np.asarray(target,dtype=float)
if len(pred) == len(target):
# we are calculating AUROC for a single prediction
# encase it in a tuple for compatibility .. unwrap it later !
pred = [pred]
onePred = True
else:
if 'array' not in (pred[0]):
print('Input sizes may not match!')
if len(pred[0]) != len(target):
print('Input sizes may not match!')
onePred = False
P = len(pred)
N = len(target)
W = list()
for p in range(P):
W.append(metrics.roc_auc_score(target, pred[p]))
W = np.asarray(W,dtype=float)
# collapse W down from array if only one prediction given
if onePred == True:
W = W[0]
if with_ci == False:
return W
# calculate confidence interval and also return that
S = calc_auc_cov(pred, target)
if onePred == True:
# collapse S into a single value
# this allows auc_ci to be a (2,) sized array, rather than (1,2)
S = S[0,0]
auc_ci = norm.ppf([alpha/2.0, 1-(alpha/2.0)], loc=W, scale=np.sqrt(S))
return W, auc_ci
def calc_auc_no_ties(pred, target):
# calculate the AUROC given one prediction or a set of predictions
# returns a float if only one set of predictions given
# returns a tuple if multiple predictions are given
if len(pred) == len(target):
# we are calculating AUROC for a single prediction
# encase it in a tuple for compatibility .. unwrap it later !
pred = [pred]
P = len(pred)
N = len(target)
W = list()
for p in range(P):
idx = np.argsort(pred[p])
tar = target[idx]==0
# calculate the number of negative cases below the current case
pos = np.cumsum(tar)
# index to only positive cases - i.e. number of negative cases below each positive case
pos = pos[~tar]
# sum the number of negative cases below each positive case
W.append(np.sum(pos))
W = np.asarray(W,dtype=float)
N0 = np.sum(target==0)
N1 = N-N0
W = W / (N0*N1)
if len(W)==1:
W = W[0]
return W
def calc_auc_cov(pred, target):
P = len(pred) # number of predictors
N = len(target) # number of observations
# convert from tuple of predictions to matrix of X/Y
idx = target==1
# DeLong and DeLong define X as the group with *positive* target
# Y as the group with *negative* target
N_X = sum( idx) # number of positive cases
N_Y = sum(~idx) # number of negative cases
X = np.zeros([N_X, P])
Y = np.zeros([N_Y, P])
for p in range(P):
X[:,p] = pred[p][ idx]
Y[:,p] = pred[p][~idx]
theta=np.zeros([P,1],dtype=float);
V10=np.zeros([N_X,P],dtype=float);
V01=np.zeros([N_Y,P],dtype=float);
for p in range(P): # For each X/Y column pair
# compare 0s to 1s
for i in range(N_X):
phi1=np.sum( X[i,p] > Y[:,p] ); # Xi>Y
phi2=np.sum( X[i,p] == Y[:,p] ); # Xi=Y
V10[i,p]=(phi1+phi2*0.5);
theta[p]=theta[p]+phi1+phi2*0.5;
theta[p] = theta[p]/(N_X*N_Y);
for j in range(N_Y):
phi1=np.sum( X[:,p] > Y[j,p] ); # X>Yj
phi2=np.sum( X[:,p] == Y[j,p] ); # X=Yj
V01[j,p] = (phi1+phi2*0.5);
V10 = V10/N_Y
V01 = V01/N_X
# Calculate S01 and S10, covariance matrices of V01 and V10
theta_svd = np.dot(theta,np.transpose(theta))
S01 = (1.0/(N_Y-1))*(np.dot(np.transpose(V01),V01) - N_Y*theta_svd);
S10 = (1.0/(N_X-1))*(np.dot(np.transpose(V10),V10) - N_X*theta_svd);
# Combine for S, covariance matrix of theta
S = (1.0/N_Y)*S01 + (1.0/N_X)*S10;
return S
def test_auroc(pred1, pred2, target, alpha=0.95):
# compare if two predictions have AUROCs which are statistically significantly different
S = calc_auc_cov(pred=(pred1, pred2), target=target);
theta = calc_auc(pred=(pred1, pred2), target=target)
S_sz = S.shape;
theta_sz = theta.shape;
L = np.reshape(np.asarray([1, -1]),[1,2]) # the default contrast - compare pred1 to pred2
LSL = np.dot(np.dot(L, S), np.transpose(L))
# Compute p-value using normal distribution
mu=np.dot(L,theta);
sigma=np.sqrt(LSL);
pval = sp.stats.distributions.norm.cdf(0,loc=mu,scale=sigma);
pval = pval[0][0]
# 2-sided test, double the tails -> double the p-value
if mu<0:
pval=2*(1-pval);
else:
pval=2*pval;
# also output 95% confidence interval
ci = sp.stats.distributions.norm.ppf([alpha/2,1-alpha/2],loc=theta[0],scale=sigma);
return pval, ci
#TODO: also allow for comparing using contrast matrix / chi2 test
# bootstrap AUROC
def bootstrap_auc(pred, target, B=100):
auc = np.zeros(B,dtype=float)
N = len(target)
for b in range(B):
idx = np.random.randint(0, high=N, size=N)
auc[b] = calc_auc(pred[idx], target[idx])
# get confidence intervals using percentiles of AUC
ci = np.percentile(auc, [5,95])
auc = calc_auc(pred, target)
return auc, ci
# === binormal AUROC is a parametric estimate of the ROC curve
# can be useful if you have low sample sizes
# assumes that the predictor is normally distributed, so sometimes it helps to
# transform the predictor to be more normally distributed, e.g. apply log, etc.
def binormal_auroc(X, Y):
# calculates the AUROC assuming X and Y are normally distributed
# this is frequently called the "Binormal AUROC"
# X should contain predictions for observations with an outcome of 1
# Y should contain predictions for observations with an outcome of 0
x_mu = np.mean(X)
x_s = np.std(X)
y_mu = np.mean(Y)
y_s = np.std(Y)
a = (x_mu - y_mu) / x_s
b = y_s / x_s
return norm.cdf( a / (np.sqrt(1+(b**2))) )
def binormal_roc(X, Y, thr=None):
# calculates the ROC curve assuming X and Y are normally distributed
# uses evenly spaced points specified by thr
# this is frequently called the "Binormal AUROC"
# X should contain predictions for observations with an outcome of 1
# Y should contain predictions for observations with an outcome of 0
if thr is None:
# get all possible criterion values
c_vec = np.unique(np.concatenate([X, Y]))
# create a vector of thresholds
c_vec = np.linspace(np.min(c_vec), np.max(c_vec), 101)
x_mu = np.mean(X)
x_s =
|
np.std(X)
|
numpy.std
|
import operator
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray.core import formatting
from xarray.core.npcompat import IS_NEP18_ACTIVE
pint = pytest.importorskip("pint")
DimensionalityError = pint.errors.DimensionalityError
unit_registry = pint.UnitRegistry()
Quantity = unit_registry.Quantity
pytestmark = [
pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NUMPY_EXPERIMENTAL_ARRAY_FUNCTION is not enabled"
),
# TODO: remove this once pint has a released version with __array_function__
pytest.mark.skipif(
not hasattr(unit_registry.Quantity, "__array_function__"),
reason="pint does not implement __array_function__ yet",
),
# pytest.mark.filterwarnings("ignore:::pint[.*]"),
]
def array_extract_units(obj):
raw = obj.data if hasattr(obj, "data") else obj
try:
return raw.units
except AttributeError:
return None
def array_strip_units(array):
try:
return array.magnitude
except AttributeError:
return array
def array_attach_units(data, unit, convert_from=None):
try:
unit, convert_from = unit
except TypeError:
pass
if isinstance(data, Quantity):
if not convert_from:
raise ValueError(
"cannot attach unit {unit} to quantity ({data.units})".format(
unit=unit, data=data
)
)
elif isinstance(convert_from, unit_registry.Unit):
data = data.magnitude
elif convert_from is True: # intentionally accept exactly true
if data.check(unit):
convert_from = data.units
data = data.magnitude
else:
raise ValueError(
"cannot convert quantity ({data.units}) to {unit}".format(
unit=unit, data=data
)
)
else:
raise ValueError(
"cannot convert from invalid unit {convert_from}".format(
convert_from=convert_from
)
)
# to make sure we also encounter the case of "equal if converted"
if convert_from is not None:
quantity = (data * convert_from).to(
unit
if isinstance(unit, unit_registry.Unit)
else unit_registry.dimensionless
)
else:
try:
quantity = data * unit
except np.core._exceptions.UFuncTypeError:
if unit != 1:
raise
quantity = data
return quantity
def extract_units(obj):
if isinstance(obj, xr.Dataset):
vars_units = {
name: array_extract_units(value) for name, value in obj.data_vars.items()
}
coords_units = {
name: array_extract_units(value) for name, value in obj.coords.items()
}
units = {**vars_units, **coords_units}
elif isinstance(obj, xr.DataArray):
vars_units = {obj.name: array_extract_units(obj)}
coords_units = {
name: array_extract_units(value) for name, value in obj.coords.items()
}
units = {**vars_units, **coords_units}
elif isinstance(obj, Quantity):
vars_units = {"<array>": array_extract_units(obj)}
units = {**vars_units}
else:
units = {}
return units
def strip_units(obj):
if isinstance(obj, xr.Dataset):
data_vars = {name: strip_units(value) for name, value in obj.data_vars.items()}
coords = {name: strip_units(value) for name, value in obj.coords.items()}
new_obj = xr.Dataset(data_vars=data_vars, coords=coords)
elif isinstance(obj, xr.DataArray):
data = array_strip_units(obj.data)
coords = {
name: (
(value.dims, array_strip_units(value.data))
if isinstance(value.data, Quantity)
else value # to preserve multiindexes
)
for name, value in obj.coords.items()
}
new_obj = xr.DataArray(name=obj.name, data=data, coords=coords, dims=obj.dims)
elif hasattr(obj, "magnitude"):
new_obj = obj.magnitude
else:
new_obj = obj
return new_obj
def attach_units(obj, units):
if not isinstance(obj, (xr.DataArray, xr.Dataset)):
return array_attach_units(obj, units.get("data", 1))
if isinstance(obj, xr.Dataset):
data_vars = {
name: attach_units(value, units) for name, value in obj.data_vars.items()
}
coords = {
name: attach_units(value, units) for name, value in obj.coords.items()
}
new_obj = xr.Dataset(data_vars=data_vars, coords=coords, attrs=obj.attrs)
else:
# try the array name, "data" and None, then fall back to dimensionless
data_units = (
units.get(obj.name, None)
or units.get("data", None)
or units.get(None, None)
or 1
)
data = array_attach_units(obj.data, data_units)
coords = {
name: (
(value.dims, array_attach_units(value.data, units.get(name) or 1))
if name in units
# to preserve multiindexes
else value
)
for name, value in obj.coords.items()
}
dims = obj.dims
attrs = obj.attrs
new_obj = xr.DataArray(
name=obj.name, data=data, coords=coords, attrs=attrs, dims=dims
)
return new_obj
def assert_equal_with_units(a, b):
# works like xr.testing.assert_equal, but also explicitly checks units
# so, it is more like assert_identical
__tracebackhide__ = True
if isinstance(a, xr.Dataset) or isinstance(b, xr.Dataset):
a_units = extract_units(a)
b_units = extract_units(b)
a_without_units = strip_units(a)
b_without_units = strip_units(b)
assert a_without_units.equals(b_without_units), formatting.diff_dataset_repr(
a, b, "equals"
)
assert a_units == b_units
else:
a = a if not isinstance(a, (xr.DataArray, xr.Variable)) else a.data
b = b if not isinstance(b, (xr.DataArray, xr.Variable)) else b.data
assert type(a) == type(b) or (
isinstance(a, Quantity) and isinstance(b, Quantity)
)
# workaround until pint implements allclose in __array_function__
if isinstance(a, Quantity) or isinstance(b, Quantity):
assert (
hasattr(a, "magnitude") and hasattr(b, "magnitude")
) and np.allclose(a.magnitude, b.magnitude, equal_nan=True)
assert (hasattr(a, "units") and hasattr(b, "units")) and a.units == b.units
else:
assert np.allclose(a, b, equal_nan=True)
@pytest.fixture(params=[float, int])
def dtype(request):
return request.param
class method:
def __init__(self, name, *args, **kwargs):
self.name = name
self.args = args
self.kwargs = kwargs
def __call__(self, obj, *args, **kwargs):
from collections.abc import Callable
from functools import partial
all_args = list(self.args) + list(args)
all_kwargs = {**self.kwargs, **kwargs}
func = getattr(obj, self.name, None)
if func is None or not isinstance(func, Callable):
# fall back to module level numpy functions if not a xarray object
if not isinstance(obj, (xr.Variable, xr.DataArray, xr.Dataset)):
numpy_func = getattr(np, self.name)
func = partial(numpy_func, obj)
# remove typical xr args like "dim"
exclude_kwargs = ("dim", "dims")
all_kwargs = {
key: value
for key, value in all_kwargs.items()
if key not in exclude_kwargs
}
else:
raise AttributeError(f"{obj} has no method named '{self.name}'")
return func(*all_args, **all_kwargs)
def __repr__(self):
return f"method_{self.name}"
class function:
def __init__(self, name):
self.name = name
self.func = getattr(np, name)
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __repr__(self):
return f"function_{self.name}"
@pytest.mark.parametrize("func", (xr.zeros_like, xr.ones_like))
def test_replication(func, dtype):
array = np.linspace(0, 10, 20).astype(dtype) * unit_registry.s
data_array = xr.DataArray(data=array, dims="x")
numpy_func = getattr(np, func.__name__)
expected = xr.DataArray(data=numpy_func(array), dims="x")
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.xfail(
reason="np.full_like on Variable strips the unit and pint does not allow mixed args"
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.m, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.ms, None, id="compatible_unit"),
pytest.param(unit_registry.s, None, id="identical_unit"),
),
)
def test_replication_full_like(unit, error, dtype):
array = np.linspace(0, 5, 10) * unit_registry.s
data_array = xr.DataArray(data=array, dims="x")
fill_value = -1 * unit
if error is not None:
with pytest.raises(error):
xr.full_like(data_array, fill_value=fill_value)
else:
result = xr.full_like(data_array, fill_value=fill_value)
expected = np.full_like(array, fill_value=fill_value)
assert_equal_with_units(expected, result)
class TestDataArray:
@pytest.mark.filterwarnings("error:::pint[.*]")
@pytest.mark.parametrize(
"variant",
(
pytest.param(
"with_dims",
marks=pytest.mark.xfail(reason="units in indexes are not supported"),
),
pytest.param("with_coords"),
pytest.param("without_coords"),
),
)
def test_init(self, variant, dtype):
array = np.linspace(1, 2, 10, dtype=dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.s
y = x.to(unit_registry.ms)
variants = {
"with_dims": {"x": x},
"with_coords": {"y": ("x", y)},
"without_coords": {},
}
kwargs = {"data": array, "dims": "x", "coords": variants.get(variant)}
data_array = xr.DataArray(**kwargs)
assert isinstance(data_array.data, Quantity)
assert all(
{
name: isinstance(coord.data, Quantity)
for name, coord in data_array.coords.items()
}.values()
)
@pytest.mark.filterwarnings("error:::pint[.*]")
@pytest.mark.parametrize(
"func", (pytest.param(str, id="str"), pytest.param(repr, id="repr"))
)
@pytest.mark.parametrize(
"variant",
(
pytest.param(
"with_dims",
marks=pytest.mark.xfail(reason="units in indexes are not supported"),
),
pytest.param("with_coords"),
pytest.param("without_coords"),
),
)
def test_repr(self, func, variant, dtype):
array = np.linspace(1, 2, 10, dtype=dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.s
y = x.to(unit_registry.ms)
variants = {
"with_dims": {"x": x},
"with_coords": {"y": ("x", y)},
"without_coords": {},
}
kwargs = {"data": array, "dims": "x", "coords": variants.get(variant)}
data_array = xr.DataArray(**kwargs)
# FIXME: this just checks that the repr does not raise
# warnings or errors, but does not check the result
func(data_array)
@pytest.mark.parametrize(
"func",
(
pytest.param(
function("all"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
function("any"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
function("argmax"),
marks=pytest.mark.xfail(
reason="comparison of quantity with ndarrays in nanops not implemented"
),
),
pytest.param(
function("argmin"),
marks=pytest.mark.xfail(
reason="comparison of quantity with ndarrays in nanops not implemented"
),
),
function("max"),
function("mean"),
pytest.param(
function("median"),
marks=pytest.mark.xfail(
reason="np.median on DataArray strips the units"
),
),
function("min"),
pytest.param(
function("prod"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
function("sum"),
marks=pytest.mark.xfail(
reason="comparison of quantity with ndarrays in nanops not implemented"
),
),
function("std"),
function("var"),
function("cumsum"),
pytest.param(
function("cumprod"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
method("all"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
method("any"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
method("argmax"),
marks=pytest.mark.xfail(
reason="comparison of quantities with ndarrays in nanops not implemented"
),
),
pytest.param(
method("argmin"),
marks=pytest.mark.xfail(
reason="comparison of quantities with ndarrays in nanops not implemented"
),
),
method("max"),
method("mean"),
method("median"),
method("min"),
pytest.param(
method("prod"),
marks=pytest.mark.xfail(
reason="comparison of quantity with ndarrays in nanops not implemented"
),
),
pytest.param(
method("sum"),
marks=pytest.mark.xfail(
reason="comparison of quantity with ndarrays in nanops not implemented"
),
),
method("std"),
method("var"),
method("cumsum"),
pytest.param(
method("cumprod"),
marks=pytest.mark.xfail(reason="pint does not implement cumprod yet"),
),
),
ids=repr,
)
def test_aggregation(self, func, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
expected = xr.DataArray(data=func(array))
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func",
(
pytest.param(operator.neg, id="negate"),
pytest.param(abs, id="absolute"),
pytest.param(
np.round,
id="round",
marks=pytest.mark.xfail(reason="pint does not implement round"),
),
),
)
def test_unary_operations(self, func, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
expected = xr.DataArray(data=func(array))
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func",
(
pytest.param(lambda x: 2 * x, id="multiply"),
pytest.param(lambda x: x + x, id="add"),
pytest.param(lambda x: x[0] + x, id="add scalar"),
pytest.param(
lambda x: x.T @ x,
id="matrix multiply",
marks=pytest.mark.xfail(
reason="pint does not support matrix multiplication yet"
),
),
),
)
def test_binary_operations(self, func, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
expected = xr.DataArray(data=func(array))
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"comparison",
(
pytest.param(operator.lt, id="less_than"),
pytest.param(operator.ge, id="greater_equal"),
pytest.param(operator.eq, id="equal"),
),
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, ValueError, id="without_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incorrect_unit"),
pytest.param(unit_registry.m, None, id="correct_unit"),
),
)
def test_comparison_operations(self, comparison, unit, error, dtype):
array = (
np.array([10.1, 5.2, 6.5, 8.0, 21.3, 7.1, 1.3]).astype(dtype)
* unit_registry.m
)
data_array = xr.DataArray(data=array)
value = 8
to_compare_with = value * unit
# incompatible units are all not equal
if error is not None and comparison is not operator.eq:
with pytest.raises(error):
comparison(array, to_compare_with)
with pytest.raises(error):
comparison(data_array, to_compare_with)
else:
result = comparison(data_array, to_compare_with)
# pint compares incompatible arrays to False, so we need to extend
# the multiplication works for both scalar and array results
expected = xr.DataArray(
data=comparison(array, to_compare_with)
* np.ones_like(array, dtype=bool)
)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"units,error",
(
pytest.param(unit_registry.dimensionless, None, id="dimensionless"),
pytest.param(unit_registry.m, DimensionalityError, id="incorrect unit"),
pytest.param(unit_registry.degree, None, id="correct unit"),
),
)
def test_univariate_ufunc(self, units, error, dtype):
array = np.arange(10).astype(dtype) * units
data_array = xr.DataArray(data=array)
if error is not None:
with pytest.raises(error):
np.sin(data_array)
else:
expected = xr.DataArray(data=np.sin(array))
result = np.sin(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="pint's implementation of `np.maximum` strips units")
def test_bivariate_ufunc(self, dtype):
unit = unit_registry.m
array = np.arange(10).astype(dtype) * unit
data_array = xr.DataArray(data=array)
expected = xr.DataArray(np.maximum(array, 0 * unit))
assert_equal_with_units(expected, np.maximum(data_array, 0 * unit))
assert_equal_with_units(expected, np.maximum(0 * unit, data_array))
@pytest.mark.parametrize("property", ("T", "imag", "real"))
def test_numpy_properties(self, property, dtype):
array = (
np.arange(5 * 10).astype(dtype)
+ 1j * np.linspace(-1, 0, 5 * 10).astype(dtype)
).reshape(5, 10) * unit_registry.s
data_array = xr.DataArray(data=array, dims=("x", "y"))
expected = xr.DataArray(
data=getattr(array, property),
dims=("x", "y")[:: 1 if property != "T" else -1],
)
result = getattr(data_array, property)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func",
(
method("conj"),
method("argsort"),
method("conjugate"),
method("round"),
pytest.param(
method("rank", dim="x"),
marks=pytest.mark.xfail(reason="pint does not implement rank yet"),
),
),
ids=repr,
)
def test_numpy_methods(self, func, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array, dims="x")
expected = xr.DataArray(func(array), dims="x")
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func", (method("clip", min=3, max=8), method("searchsorted", v=5)), ids=repr
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_numpy_methods_with_args(self, func, unit, error, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
scalar_types = (int, float)
kwargs = {
key: (value * unit if isinstance(value, scalar_types) else value)
for key, value in func.kwargs.items()
}
if error is not None:
with pytest.raises(error):
func(data_array, **kwargs)
else:
expected = func(array, **kwargs)
if func.name not in ["searchsorted"]:
expected = xr.DataArray(data=expected)
result = func(data_array, **kwargs)
if func.name in ["searchsorted"]:
assert np.allclose(expected, result)
else:
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func", (method("isnull"), method("notnull"), method("count")), ids=repr
)
def test_missing_value_detection(self, func, dtype):
array = (
np.array(
[
[1.4, 2.3, np.nan, 7.2],
[np.nan, 9.7, np.nan, np.nan],
[2.1, np.nan, np.nan, 4.6],
[9.9, np.nan, 7.2, 9.1],
]
)
* unit_registry.degK
)
x = np.arange(array.shape[0]) * unit_registry.m
y = np.arange(array.shape[1]) * unit_registry.m
data_array = xr.DataArray(data=array, coords={"x": x, "y": y}, dims=("x", "y"))
expected = func(strip_units(data_array))
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="ffill and bfill lose units in data")
@pytest.mark.parametrize("func", (method("ffill"), method("bfill")), ids=repr)
def test_missing_value_filling(self, func, dtype):
array = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.degK
)
x = np.arange(len(array))
data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"])
result_without_units = func(strip_units(data_array), dim="x")
result = xr.DataArray(
data=result_without_units.data * unit_registry.degK,
coords={"x": x},
dims=["x"],
)
expected = attach_units(
func(strip_units(data_array), dim="x"), {"data": unit_registry.degK}
)
result = func(data_array, dim="x")
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="fillna drops the unit")
@pytest.mark.parametrize(
"fill_value",
(
pytest.param(
-1,
id="python scalar",
marks=pytest.mark.xfail(
reason="python scalar cannot be converted using astype()"
),
),
pytest.param(np.array(-1), id="numpy scalar"),
pytest.param(np.array([-1]), id="numpy array"),
),
)
def test_fillna(self, fill_value, dtype):
unit = unit_registry.m
array = np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype) * unit
data_array = xr.DataArray(data=array)
expected = attach_units(
strip_units(data_array).fillna(value=fill_value), {"data": unit}
)
result = data_array.fillna(value=fill_value * unit)
assert_equal_with_units(expected, result)
def test_dropna(self, dtype):
array = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.m
)
x = np.arange(len(array))
data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"])
expected = attach_units(
strip_units(data_array).dropna(dim="x"), {"data": unit_registry.m}
)
result = data_array.dropna(dim="x")
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="pint does not implement `numpy.isin`")
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="same_unit"),
),
)
def test_isin(self, unit, dtype):
array = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.m
)
data_array = xr.DataArray(data=array, dims="x")
raw_values = np.array([1.4, np.nan, 2.3]).astype(dtype)
values = raw_values * unit
result_without_units = strip_units(data_array).isin(raw_values)
if unit != unit_registry.m:
result_without_units[:] = False
result_with_units = data_array.isin(values)
assert_equal_with_units(result_without_units, result_with_units)
@pytest.mark.parametrize(
"variant",
(
pytest.param(
"masking",
marks=pytest.mark.xfail(reason="nan not compatible with quantity"),
),
pytest.param(
"replacing_scalar",
marks=pytest.mark.xfail(reason="scalar not convertible using astype"),
),
pytest.param(
"replacing_array",
marks=pytest.mark.xfail(
reason="replacing using an array drops the units"
),
),
pytest.param(
"dropping",
marks=pytest.mark.xfail(reason="nan not compatible with quantity"),
),
),
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="same_unit"),
),
)
def test_where(self, variant, unit, error, dtype):
def _strip_units(mapping):
return {key: array_strip_units(value) for key, value in mapping.items()}
original_unit = unit_registry.m
array = np.linspace(0, 1, 10).astype(dtype) * original_unit
data_array = xr.DataArray(data=array)
condition = data_array < 0.5 * original_unit
other = np.linspace(-2, -1, 10).astype(dtype) * unit
variant_kwargs = {
"masking": {"cond": condition},
"replacing_scalar": {"cond": condition, "other": -1 * unit},
"replacing_array": {"cond": condition, "other": other},
"dropping": {"cond": condition, "drop": True},
}
kwargs = variant_kwargs.get(variant)
kwargs_without_units = _strip_units(kwargs)
if variant not in ("masking", "dropping") and error is not None:
with pytest.raises(error):
data_array.where(**kwargs)
else:
expected = attach_units(
strip_units(array).where(**kwargs_without_units),
{"data": original_unit},
)
result = data_array.where(**kwargs)
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="interpolate strips units")
def test_interpolate_na(self, dtype):
array = (
np.array([-1.03, 0.1, 1.4, np.nan, 2.3, np.nan, np.nan, 9.1])
* unit_registry.m
)
x = np.arange(len(array))
data_array = xr.DataArray(data=array, coords={"x": x}, dims="x").astype(dtype)
expected = attach_units(
strip_units(data_array).interpolate_na(dim="x"), {"data": unit_registry.m}
)
result = data_array.interpolate_na(dim="x")
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="uses DataArray.where, which currently fails")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_combine_first(self, unit, error, dtype):
array = np.zeros(shape=(2, 2), dtype=dtype) * unit_registry.m
other_array = np.ones_like(array) * unit
data_array = xr.DataArray(
data=array, coords={"x": ["a", "b"], "y": [-1, 0]}, dims=["x", "y"]
)
other = xr.DataArray(
data=other_array, coords={"x": ["b", "c"], "y": [0, 1]}, dims=["x", "y"]
)
if error is not None:
with pytest.raises(error):
data_array.combine_first(other)
else:
expected = attach_units(
strip_units(data_array).combine_first(strip_units(other)),
{"data": unit_registry.m},
)
result = data_array.combine_first(other)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(
unit_registry.cm,
id="compatible_unit",
marks=pytest.mark.xfail(reason="identical does not check units yet"),
),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"variation",
(
"data",
pytest.param(
"dims", marks=pytest.mark.xfail(reason="units in indexes not supported")
),
"coords",
),
)
@pytest.mark.parametrize("func", (method("equals"), method("identical")), ids=repr)
def test_comparisons(self, func, variation, unit, dtype):
data = np.linspace(0, 5, 10).astype(dtype)
coord = np.arange(len(data)).astype(dtype)
base_unit = unit_registry.m
quantity = data * base_unit
x = coord * base_unit
y = coord * base_unit
units = {
"data": (unit, base_unit, base_unit),
"dims": (base_unit, unit, base_unit),
"coords": (base_unit, base_unit, unit),
}
data_unit, dim_unit, coord_unit = units.get(variation)
data_array = xr.DataArray(
data=quantity, coords={"x": x, "y": ("x", y)}, dims="x"
)
other = attach_units(
strip_units(data_array),
{
None: (data_unit, base_unit if quantity.check(data_unit) else None),
"x": (dim_unit, base_unit if x.check(dim_unit) else None),
"y": (coord_unit, base_unit if y.check(coord_unit) else None),
},
)
# TODO: test dim coord once indexes leave units intact
# also, express this in terms of calls on the raw data array
# and then check the units
equal_arrays = (
np.all(quantity == other.data)
and (np.all(x == other.x.data) or True) # dims can't be checked yet
and np.all(y == other.y.data)
)
equal_units = (
data_unit == unit_registry.m
and coord_unit == unit_registry.m
and dim_unit == unit_registry.m
)
expected = equal_arrays and (func.name != "identical" or equal_units)
result = func(data_array, other)
assert expected == result
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
def test_broadcast_equals(self, unit, dtype):
left_array = np.ones(shape=(2, 2), dtype=dtype) * unit_registry.m
right_array = array_attach_units(
np.ones(shape=(2,), dtype=dtype),
unit,
convert_from=unit_registry.m if left_array.check(unit) else None,
)
left = xr.DataArray(data=left_array, dims=("x", "y"))
right = xr.DataArray(data=right_array, dims="x")
expected = np.all(left_array == right_array[:, None])
result = left.broadcast_equals(right)
assert expected == result
@pytest.mark.parametrize(
"func",
(
method("pipe", lambda da: da * 10),
method("assign_coords", y2=("y", np.arange(10) * unit_registry.mm)),
method("assign_attrs", attr1="value"),
method("rename", x2="x_mm"),
method("swap_dims", {"x": "x2"}),
method(
"expand_dims",
dim={"z": np.linspace(10, 20, 12) * unit_registry.s},
axis=1,
),
method("drop", labels="x"),
method("reset_coords", names="x2"),
method("copy"),
pytest.param(
method("astype", np.float32),
marks=pytest.mark.xfail(reason="units get stripped"),
),
pytest.param(
method("item", 1), marks=pytest.mark.xfail(reason="units get stripped")
),
),
ids=repr,
)
def test_content_manipulation(self, func, dtype):
quantity = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype)
* unit_registry.pascal
)
x = np.arange(quantity.shape[0]) * unit_registry.m
y = np.arange(quantity.shape[1]) * unit_registry.m
x2 = x.to(unit_registry.mm)
data_array = xr.DataArray(
name="data",
data=quantity,
coords={"x": x, "x2": ("x", x2), "y": y},
dims=("x", "y"),
)
stripped_kwargs = {
key: array_strip_units(value) for key, value in func.kwargs.items()
}
expected = attach_units(
func(strip_units(data_array), **stripped_kwargs),
{
"data": quantity.units,
"x": x.units,
"x_mm": x2.units,
"x2": x2.units,
"y": y.units,
},
)
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func",
(
pytest.param(
method("drop", labels=np.array([1, 5]), dim="x"),
marks=pytest.mark.xfail(
reason="selecting using incompatible units does not raise"
),
),
pytest.param(method("copy", data=np.arange(20))),
),
ids=repr,
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, KeyError, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_content_manipulation_with_units(self, func, unit, error, dtype):
quantity = np.linspace(0, 10, 20, dtype=dtype) * unit_registry.pascal
x = np.arange(len(quantity)) * unit_registry.m
data_array = xr.DataArray(name="data", data=quantity, coords={"x": x}, dims="x")
kwargs = {
key: (value * unit if isinstance(value, np.ndarray) else value)
for key, value in func.kwargs.items()
}
stripped_kwargs = func.kwargs
expected = attach_units(
func(strip_units(data_array), **stripped_kwargs),
{"data": quantity.units if func.name == "drop" else unit, "x": x.units},
)
if error is not None and func.name == "drop":
with pytest.raises(error):
func(data_array, **kwargs)
else:
result = func(data_array, **kwargs)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"indices",
(
pytest.param(4, id="single index"),
pytest.param([5, 2, 9, 1], id="multiple indices"),
),
)
def test_isel(self, indices, dtype):
array = np.arange(10).astype(dtype) * unit_registry.s
x = np.arange(len(array)) * unit_registry.m
data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"])
expected = attach_units(
strip_units(data_array).isel(x=indices),
{"data": unit_registry.s, "x": unit_registry.m},
)
result = data_array.isel(x=indices)
assert_equal_with_units(expected, result)
@pytest.mark.xfail(
reason="xarray does not support duck arrays in dimension coordinates"
)
@pytest.mark.parametrize(
"values",
(
pytest.param(12, id="single value"),
pytest.param([10, 5, 13], id="list of multiple values"),
pytest.param(np.array([9, 3, 7, 12]), id="array of multiple values"),
),
)
@pytest.mark.parametrize(
"units,error",
(
pytest.param(1, KeyError, id="no units"),
pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"),
pytest.param(unit_registry.degree, KeyError, id="incorrect unit"),
pytest.param(unit_registry.s, None, id="correct unit"),
),
)
def test_sel(self, values, units, error, dtype):
array = np.linspace(5, 10, 20).astype(dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.s
data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"])
values_with_units = values * units
if error is not None:
with pytest.raises(error):
data_array.sel(x=values_with_units)
else:
result_array = array[values]
result_data_array = data_array.sel(x=values_with_units)
assert_equal_with_units(result_array, result_data_array)
@pytest.mark.xfail(
reason="xarray does not support duck arrays in dimension coordinates"
)
@pytest.mark.parametrize(
"values",
(
pytest.param(12, id="single value"),
pytest.param([10, 5, 13], id="list of multiple values"),
pytest.param(np.array([9, 3, 7, 12]), id="array of multiple values"),
),
)
@pytest.mark.parametrize(
"units,error",
(
pytest.param(1, KeyError, id="no units"),
pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"),
pytest.param(unit_registry.degree, KeyError, id="incorrect unit"),
pytest.param(unit_registry.s, None, id="correct unit"),
),
)
def test_loc(self, values, units, error, dtype):
array = np.linspace(5, 10, 20).astype(dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.s
data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"])
values_with_units = values * units
if error is not None:
with pytest.raises(error):
data_array.loc[values_with_units]
else:
result_array = array[values]
result_data_array = data_array.loc[values_with_units]
assert_equal_with_units(result_array, result_data_array)
@pytest.mark.xfail(reason="tries to coerce using asarray")
@pytest.mark.parametrize(
"shape",
(
pytest.param((10, 20), id="nothing squeezable"),
pytest.param((10, 20, 1), id="last dimension squeezable"),
pytest.param((10, 1, 20), id="middle dimension squeezable"),
pytest.param((1, 10, 20), id="first dimension squeezable"),
pytest.param((1, 10, 1, 20), id="first and last dimension squeezable"),
),
)
def test_squeeze(self, shape, dtype):
names = "xyzt"
coords = {
name: np.arange(length).astype(dtype)
* (unit_registry.m if name != "t" else unit_registry.s)
for name, length in zip(names, shape)
}
array = np.arange(10 * 20).astype(dtype).reshape(shape) * unit_registry.J
data_array = xr.DataArray(
data=array, coords=coords, dims=tuple(names[: len(shape)])
)
result_array = array.squeeze()
result_data_array = data_array.squeeze()
assert_equal_with_units(result_array, result_data_array)
# try squeezing the dimensions separately
names = tuple(dim for dim, coord in coords.items() if len(coord) == 1)
for index, name in enumerate(names):
assert_equal_with_units(
np.squeeze(array, axis=index), data_array.squeeze(dim=name)
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, None, id="no_unit"),
pytest.param(unit_registry.dimensionless, None, id="dimensionless"),
pytest.param(unit_registry.s, None, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_interp(self, unit, error):
array = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK
new_coords = (np.arange(10) + 0.5) * unit
coords = {
"x": np.arange(10) * unit_registry.m,
"y": np.arange(5) * unit_registry.m,
}
data_array = xr.DataArray(array, coords=coords, dims=("x", "y"))
if error is not None:
with pytest.raises(error):
data_array.interp(x=new_coords)
else:
new_coords_ = (
new_coords.magnitude if hasattr(new_coords, "magnitude") else new_coords
)
result_array = strip_units(data_array).interp(
x=new_coords_ * unit_registry.degK
)
result_data_array = data_array.interp(x=new_coords)
assert_equal_with_units(result_array, result_data_array)
@pytest.mark.xfail(reason="tries to coerce using asarray")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, None, id="no_unit"),
pytest.param(unit_registry.dimensionless, None, id="dimensionless"),
pytest.param(unit_registry.s, None, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_interp_like(self, unit, error):
array = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK
coords = {
"x": (np.arange(10) + 0.3) * unit_registry.m,
"y": (np.arange(5) + 0.3) * unit_registry.m,
}
data_array = xr.DataArray(array, coords=coords, dims=("x", "y"))
new_data_array = xr.DataArray(
data=np.empty((20, 10)),
coords={"x": np.arange(20) * unit, "y": np.arange(10) * unit},
dims=("x", "y"),
)
if error is not None:
with pytest.raises(error):
data_array.interp_like(new_data_array)
else:
result_array = (
xr.DataArray(
data=array.magnitude,
coords={name: value.magnitude for name, value in coords.items()},
dims=("x", "y"),
).interp_like(strip_units(new_data_array))
* unit_registry.degK
)
result_data_array = data_array.interp_like(new_data_array)
assert_equal_with_units(result_array, result_data_array)
@pytest.mark.xfail(
reason="pint does not implement np.result_type in __array_function__ yet"
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, None, id="no_unit"),
pytest.param(unit_registry.dimensionless, None, id="dimensionless"),
pytest.param(unit_registry.s, None, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_reindex(self, unit, error):
array = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK
new_coords = (np.arange(10) + 0.5) * unit
coords = {
"x": np.arange(10) * unit_registry.m,
"y": np.arange(5) * unit_registry.m,
}
data_array = xr.DataArray(array, coords=coords, dims=("x", "y"))
if error is not None:
with pytest.raises(error):
data_array.interp(x=new_coords)
else:
result_array = strip_units(data_array).reindex(
x=(
new_coords.magnitude
if hasattr(new_coords, "magnitude")
else new_coords
)
* unit_registry.degK
)
result_data_array = data_array.reindex(x=new_coords)
assert_equal_with_units(result_array, result_data_array)
@pytest.mark.xfail(
reason="pint does not implement np.result_type in __array_function__ yet"
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, None, id="no_unit"),
pytest.param(unit_registry.dimensionless, None, id="dimensionless"),
pytest.param(unit_registry.s, None, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_reindex_like(self, unit, error):
array = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK
coords = {
"x": (np.arange(10) + 0.3) * unit_registry.m,
"y": (np.arange(5) + 0.3) * unit_registry.m,
}
data_array = xr.DataArray(array, coords=coords, dims=("x", "y"))
new_data_array = xr.DataArray(
data=np.empty((20, 10)),
coords={"x": np.arange(20) * unit, "y":
|
np.arange(10)
|
numpy.arange
|
'''This script is used to generate dummy CSD sources,
to test the various kCSD methods
'''
import numpy as np
from numpy import exp, isfinite
from functools import wraps
def repeatUntilValid(f):
"""
A decorator (wrapper).
If output of `f(..., seed)` contains either NaN or infinite, repeats
calculations for other `seed` (randomly generated for the current `seed`)
until the result is valid.
:param f: function of two arguments (the latter is `seed`)
:return: wrapped function f
"""
@wraps(f)
def wrapper(arg, seed=0):
for seed in seedSequence(seed):
result = f(arg, seed)
if isfinite(result).all():
return result
# Python 2.7 walkarround necessary for test purposes
if not hasattr(wrapper, '__wrapped__'):
setattr(wrapper, '__wrapped__', f)
return wrapper
def seedSequence(seed):
"""
Yields a sequence of unique, pseudorandom, deterministic seeds.
:param seed: beginning of the sequence
:return: seed generator
"""
previous = set()
rstate = np.random.RandomState(seed)
while True:
yield seed
previous.add(seed)
while seed in previous:
seed = rstate.randint(2 ** 32)
def get_states_1D(seed, n=1):
"""
Used in the random seed generation
creates a matrix that will generate seeds, here for gaussians:
amplitude (-1., 1.), location (0,1)*ndim, sigma(0,1)
"""
ndim = 1
if seed == 0:
states = np.array([1., 0.5, 0.5], ndmin=2)
rstate = np.random.RandomState(seed)
states = rstate.random_sample(n * (ndim + 2)).reshape((n, (ndim + 2)))
states[:, 0] = (2 * states[:, 0]) - 1.
return states, rstate
def add_1d_gaussians(x, states):
'''Function used for adding multiple 1D gaussians'''
f = np.zeros(x.shape)
for i in range(states.shape[0]):
gauss = states[i, 0]*np.exp(-((x - states[i, 1])**2)/(2.*states[i, 2])
)*(2*np.pi*states[i, 2])**-0.5
f += gauss
return f
def gauss_1d_mono(x, seed=0):
'''Random monopole in 1D'''
states, rstate = get_states_1D(seed, n=1)
f = add_1d_gaussians(x, states)
return f
def gauss_1d_dipole(x, seed=0):
'''Random dipole source in 1D'''
states, rstate = get_states_1D(seed, n=1)
offset = rstate.random_sample(1) - 0.5
states = np.tile(states, (2, 1))
states[1, 0] *= -1. # A Sink
states[1, 1] += offset
f = add_1d_gaussians(x, states)
return f
def get_states_2D(seed):
"""
Used in the random seed generation for 2d sources
"""
rstate = np.random.RandomState(seed)
states = rstate.random_sample(24)
states[0:12] = 2*states[0:12] - 1.
return states
@repeatUntilValid
def gauss_2d_large(csd_at, seed=0):
'''random quadpolar'large source' profile in 2012 paper in 2D'''
x, y = csd_at
states = get_states_2D(seed)
z = 0
zz = states[0:4]
zs = states[4:8]
mag = states[8:12]
loc = states[12:20]
scl = states[20:24]
f1 = mag[0]*exp( (-1*(x-loc[0])**2 - (y-loc[4])**2) /scl[0])* exp(-(z-zz[0])**2 / zs[0]) /exp(-(zz[0])**2/zs[0])
f2 = mag[1]*exp( (-2*(x-loc[1])**2 - (y-loc[5])**2) /scl[1])* exp(-(z-zz[1])**2 / zs[1]) /exp(-(zz[1])**2/zs[1]);
f3 = mag[2]*exp( (-3*(x-loc[2])**2 - (y-loc[6])**2) /scl[2])* exp(-(z-zz[2])**2 / zs[2]) /exp(-(zz[2])**2/zs[2]);
f4 = mag[3]*exp( (-4*(x-loc[3])**2 - (y-loc[7])**2) /scl[3])* exp(-(z-zz[3])**2 / zs[3]) /exp(-(zz[3])**2/zs[3]);
f = f1+f2+f3+f4
return f
def gauss_2d_small(csd_at, seed=0):
'''random quadpolar small source in 2D'''
x, y = csd_at
def gauss2d(x, y, p):
"""
p: list of parameters of the Gauss-function
[XCEN,YCEN,SIGMAX,SIGMAY,AMP,ANGLE]
SIGMA = FWHM / (2*sqrt(2*log(2)))
ANGLE = rotation of the X,Y direction of the Gaussian in radians
Returns
-------
the value of the Gaussian described by the parameters p
at position (x,y)
"""
rcen_x = p[0] * np.cos(p[5]) - p[1] * np.sin(p[5])
rcen_y = p[0] * np.sin(p[5]) + p[1] * np.cos(p[5])
xp = x * np.cos(p[5]) - y * np.sin(p[5])
yp = x * np.sin(p[5]) + y * np.cos(p[5])
g = p[4]*np.exp(-(((rcen_x-xp)/p[2])**2 +
((rcen_y-yp)/p[3])**2)/2.)
return g
states = get_states_2D(seed)
angle = states[18]*180.
x_amp = 0.038
y_amp = 0.056
f1 = gauss2d(x, y, [states[12], states[14], x_amp, y_amp, 0.5, angle])
f2 = gauss2d(x, y, [states[12], states[15], x_amp, y_amp, -0.5, angle])
f3 = gauss2d(x, y, [states[13], states[14], x_amp, y_amp, 0.5, angle])
f4 = gauss2d(x, y, [states[13], states[15], x_amp, y_amp, -0.5, angle])
f = f1+f2+f3+f4
return f
def gauss_2d_random(size_seed=0, n=100):
'''random quadpolar source in 2D'''
x, y = csd_at
np.random.seed(size_seed)
large_count = np.random.randint(0, n)
small_count = n - large_count
random_indices = np.array(([gauss_2d_large]*large_count +
[gauss_2d_small]*small_count))
np.random.shuffle(random_indices)
return random_indices
def get_states_3D(seed):
"""
Used in the random seed generation for 3D sources
"""
rstate = np.random.RandomState(seed) # seed here!
states = rstate.random_sample(24)
return states
def gauss_3d_small(csd_at, seed=0):
'''A random quadpole small souce in 3D'''
x, y, z = csd_at
states = get_states_3D(seed)
x0, y0, z0 = states[0:3]
x1, y1, z1 = states[3:6]
if states[6] < 0.01:
states[6] *= 25
sig_2 = states[6] / 75.
p1, p2, p3 = (ii*0.5 for ii in states[8:11])
A = (2*np.pi*sig_2)**-1
f1 = A*np.exp((-(x-x0)**2 - (y-y0)**2 - (z-z0)**2) / (2*sig_2))
f2 = -1*A*np.exp((-(x-x1)**2 - (y-y1)**2 - (z-z1)**2) / (2*sig_2))
x2 = np.modf(x0+p1)[0]
y2 = np.modf(y0+p2)[0]
z2 = np.modf(z0+p3)[0]
f3 = A*np.exp((-(x-x2)**2 - (y-y2)**2 - (z-z2)**2) / (2*sig_2))
x3 = np.modf(x1+p1)[0]
y3 = np.modf(y1+p2)[0]
z3 = np.modf(z1+p3)[0]
f4 = -1*A*np.exp((-(x-x3)**2 - (y-y3)**2 - (z-z3)**2) / (2*sig_2))
f = f1+f2+f3+f4
return f
def gauss_3d_large(csd_at, seed=0):
'''A random dipolar Large source in 3D'''
x, y, z = csd_at
states = get_states_3D(seed)
x0, y0, z0 = states[7:10]
x1, y1, z1 = states[10:13]
if states[1] < 0.01:
states[1] *= 25
sig_2 = states[1] * 5
A = (2*np.pi*sig_2)**-1
f1 = A*np.exp((-(x-x0)**2 - (y-y0)**2 - (z-z0)**2) / (2*sig_2))
f2 = -1*A*np.exp((-(x-x1)**2 - (y-y1)**2 - (z-z1)**2) / (2*sig_2))
f = f1+f2
return f
def gauss_1d_dipole_f(x):
"""1D Gaussian dipole source is placed between 0 and 1
to be used to test the CSD
Parameters
----------
x : np.array
Spatial pts. at which the true csd is evaluated
Returns
-------
f : np.array
The value of the csd at the requested points
"""
src = 0.5*exp(-((x-0.7)**2)/(2.*0.3))*(2*np.pi*0.3)**-0.5
snk = -0.5*exp(-((x-0.3)**2)/(2.*0.3))*(2*np.pi*0.3)**-0.5
f = src+snk
return f
def gauss_2d_small_f(csd_at):
'''Source from Jan 2012 kCSD paper'''
x, y = csd_at
def gauss2d(x, y, p):
"""
p: list of parameters of the Gauss-function
[XCEN,YCEN,SIGMAX,SIGMAY,AMP,ANGLE]
SIGMA = FWHM / (2*sqrt(2*log(2)))
ANGLE = rotation of the X,Y direction of the Gaussian in radians
Returns
-------
the value of the Gaussian described by the parameters p
at position (x,y)
"""
rcen_x = p[0] * np.cos(p[5]) - p[1] * np.sin(p[5])
rcen_y = p[0] * np.sin(p[5]) + p[1] * np.cos(p[5])
xp = x * np.cos(p[5]) - y * np.sin(p[5])
yp = x * np.sin(p[5]) + y * np.cos(p[5])
g = p[4]*np.exp(-(((rcen_x-xp)/p[2])**2 +
((rcen_y-yp)/p[3])**2)/2.)
return g
f1 = gauss2d(x, y, [0.3, 0.7, 0.038, 0.058, 0.5, 0.])
f2 = gauss2d(x, y, [0.3, 0.6, 0.038, 0.058, -0.5, 0.])
f3 = gauss2d(x, y, [0.45, 0.7, 0.038, 0.058, 0.5, 0.])
f4 = gauss2d(x, y, [0.45, 0.6, 0.038, 0.058, -0.5, 0.])
f = f1+f2+f3+f4
return f
def gauss_2d_large_f(csd_at):
'''Fixed 'large source' profile in 2012 paper'''
x, y = csd_at
z = 0
zz = [0.4, -0.3, -0.1, 0.6]
zs = [0.2, 0.3, 0.4, 0.2]
f1 = 0.5965*exp((-1*(x-0.1350)**2 - (y-0.8628)**2) / 0.4464)*
|
exp(-(z-zz[0])**2 / zs[0])
|
numpy.exp
|
"""
This is the main execution environment for the LSHC procedure.
https://github.com/stefanvanberkum/LSHC
"""
import random
import re
import time
from math import comb
import numpy as np
import spacy
from sklearn.cluster import KMeans
from LSH import convert_binary, convert_binary_alt, convert_binary_old, minhash, lsh, common_count
from data_loader import load
def main():
"""
Runs the whole LSHC procedure, and stores results in a csv file.
:return:
"""
identify_common_count = False
run_lsh = True
write_result = True
thresholds = [x / 100 for x in range(5, 100, 5)]
bootstraps = 1
random.seed(0)
file_path = "data/TVs.json"
result_path = "results/"
start_time = time.time()
data_list, duplicates = load(file_path)
if identify_common_count:
common_count(data_list)
if run_lsh:
if write_result:
with open(result_path + "results.csv", 'w') as out:
out.write(
"t,comparisons,pq,pc,f1_star,f1,comparisons_alt,pq_alt,pc_alt,f1_star_alt,f1_alt,comparisons_old,"
"pq_old,pc_old,f1_star_old,f1_old\n")
for t in thresholds:
print("t = ", t)
# Initialize statistics, where results = [comparisons, pq, pc, f1_star, f1].
results = np.zeros(5)
results_alt = np.zeros(5)
results_old = np.zeros(5)
for run in range(bootstraps):
data_sample, duplicates_sample = bootstrap(data_list, duplicates)
comparisons_run, pq_run, pc_run, f1_star_run, f1_run = do_lshc(data_sample, duplicates_sample, t)
results += np.array([comparisons_run, pq_run, pc_run, f1_star_run, f1_run])
comparisons_alt_run, pq_alt_run, pc_alt_run, f1_star_alt_run, f1_alt_run = do_lshc_alt(data_sample,
duplicates_sample,
t)
results_alt += np.array([comparisons_alt_run, pq_alt_run, pc_alt_run, f1_star_alt_run, f1_alt_run])
comparisons_old_run, pq_old_run, pc_old_run, f1_star_old_run, f1_old_run = do_lshc_old(data_sample,
duplicates_sample,
t)
results_old += np.array([comparisons_old_run, pq_old_run, pc_old_run, f1_star_old_run, f1_old_run])
# Compute average statistics over all bootstraps.
statistics = results / bootstraps
statistics_alt = results_alt / bootstraps
statistics_old = results_old / bootstraps
if write_result:
with open(result_path + "results.csv", 'a') as out:
out.write(str(t))
for stat in statistics:
out.write("," + str(stat))
for stat in statistics_alt:
out.write("," + str(stat))
for stat in statistics_old:
out.write("," + str(stat))
out.write("\n")
end_time = time.time()
print("Elapsed time:", end_time - start_time, "seconds")
def do_lshc(data_list, duplicates, t):
"""
Bins items using MinHash and LSH, clusters using K-means, and computes and returns performance metrics based on
the matrix of true duplicates.
:param data_list: a list of items
:param duplicates: a binary matrix where item (i, j) is equal to one if items i and j are duplicates, and zero
otherwise
:param t: the threshold value
:return: the fraction of comparisons, pair quality, pair completeness, F_1^* measure, and F_1 measure.
"""
binary_vec = convert_binary(data_list)
n = round(round(0.5 * len(binary_vec)) / 100) * 100
signature = minhash(binary_vec, n)
candidates = lsh(signature, t)
# Compute number of comparisons.
comparisons = np.sum(candidates) / 2
comparison_frac = comparisons / comb(len(data_list), 2)
# Compute matrix of correctly binned duplicates, where element (i, j) is equal to one if item i and item j are
# duplicates, and correctly classified as such by LSH.
correct = np.where(duplicates + candidates == 2, 1, 0)
n_correct = np.sum(correct) / 2
# Compute Pair Quality (PQ)
pq = n_correct / comparisons
# Compute Pair Completeness (PC)
pc = n_correct / (np.sum(duplicates) / 2)
# Compute F_1^* measure.
f1_star = 2 * pq * pc / (pq + pc)
# Cluster and compute F_1 measure.
tp, precision = cluster(data_list, candidates, duplicates)
recall = tp / (np.sum(duplicates) / 2)
f1 = 2 * precision * recall / (precision + recall)
return comparison_frac, pq, pc, f1_star, f1
def do_lshc_alt(data_list, duplicates, t):
"""
Bins items using MinHash and LSH, clusters using K-means, and computes and returns performance metrics based on
the matrix of true duplicates.
:param data_list: a list of items
:param duplicates: a binary matrix where item (i, j) is equal to one if items i and j are duplicates, and zero
otherwise
:param t: the threshold value
:return: the fraction of comparisons, pair quality, pair completeness, F_1^* measure, and F_1 measure
"""
binary_vec = convert_binary_alt(data_list)
n = round(round(0.5 * len(binary_vec)) / 100) * 100
signature = minhash(binary_vec, n)
candidates = lsh(signature, t)
# Compute number of comparisons.
comparisons = np.sum(candidates) / 2
comparison_frac = comparisons / comb(len(data_list), 2)
# Compute matrix of correctly binned duplicates, where element (i, j) is equal to one if item i and item j are
# duplicates, and correctly classified as such by LSH.
correct = np.where(duplicates + candidates == 2, 1, 0)
n_correct = np.sum(correct) / 2
# Compute Pair Quality (PQ)
pq = n_correct / comparisons
# Compute Pair Completeness (PC)
pc = n_correct / (np.sum(duplicates) / 2)
# Compute F_1^* measure.
f1_star = 2 * pq * pc / (pq + pc)
# Cluster and compute F_1 measure.
tp, precision = cluster(data_list, candidates, duplicates)
recall = tp / (np.sum(duplicates) / 2)
f1 = 2 * precision * recall / (precision + recall)
return comparison_frac, pq, pc, f1_star, f1
def do_lshc_old(data_list, duplicates, t):
"""
Bins items using MinHash and LSH, clusters using K-means, and computes and returns performance metrics based on
the matrix of true duplicates.
NOTE. This is the old implementation by Hartveld et al. (2018), implemented for evaluation purposes.
:param data_list: a list of items
:param duplicates: a binary matrix where item (i, j) is equal to one if items i and j are duplicates, and zero
otherwise
:param t: the threshold value
:return: the fraction of comparisons, pair quality, pair completeness, F_1^* measure, and F_1 measure
"""
binary_vec = convert_binary_old(data_list)
n = round(round(0.5 * len(binary_vec)) / 100) * 100
signature = minhash(binary_vec, n)
candidates = lsh(signature, t)
# Compute number of comparisons.
comparisons = np.sum(candidates) / 2
comparison_frac = comparisons / comb(len(data_list), 2)
# Compute matrix of correctly binned duplicates, where element (i, j) is equal to one if item i and item j are
# duplicates, and correctly classified as such by LSH.
correct = np.where(duplicates + candidates == 2, 1, 0)
n_correct =
|
np.sum(correct)
|
numpy.sum
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 10 14:36:07 2021
@author: utric
"""
import matplotlib.pyplot as plt
import numpy as np
from numpy import cos, sin, arctan2 as atan, sqrt, pi as π, sign, log
from scipy.integrate import quad, dblquad
from scipy.special import ellipk as ellK, ellipe as ellE
from scipy.special import ellipkinc as ellK_inc, ellipeinc as ellE_inc
from scipy.constants import mu_0 as μ0
_vec_0 = np.array([0.,0.,0.])
_vec_x = np.array([1.,0.,0.])
_vec_y = np.array([0.,1.,0.])
_vec_z = np.array([0.,0.,1.])
import pycoilib as pycoil
z0, r0 = 0., 1000e-3
φ1 = π/10
a0 = 0.5e-3
z, r = 0, r0-a0
def get_A_φ(φ):
m = (4*r0*r / ( (r0+r)**2 +(z-z0)**2 ) )
def f2(φ0):
ψ = (φ0-φ-π)/2
t1 = (r0**2 + r**2 +(z-z0)**2) /sqrt( (r0+r)**2 +(z-z0)**2)
t2 =
|
sqrt( (r0+r)**2+(z-z0)**2 )
|
numpy.sqrt
|
'''
This script is helper function for preprocessing.
Most of the code are converted from LayoutNet official's matlab code.
All functions, naming rule and data flow follow official for easier
converting and comparing.
Code is not optimized for python or numpy yet.
Author: <NAME>
Email : <EMAIL>
'''
import sys
import numpy as np
from scipy.ndimage import map_coordinates
import cv2
from pylsd import lsd
import torch
def computeUVN(n, in_, planeID):
'''
compute v given u and normal.
'''
if planeID == 2:
n = np.array([n[1], n[2], n[0]])
elif planeID == 3:
n = np.array([n[2], n[0], n[1]])
bc = n[0] * np.sin(in_) + n[1] * np.cos(in_)
bs = n[2]
out = np.arctan(-bc / (bs + 1e-9))
return out
def computeUVN_vec(n, in_, planeID):
'''
vectorization version of computeUVN
@n N x 3
@in_ MN x 1
@planeID N
'''
n = n.copy()
if (planeID == 2).sum():
n[planeID == 2] = np.roll(n[planeID == 2], 2, axis=1)
if (planeID == 3).sum():
n[planeID == 3] = np.roll(n[planeID == 3], 1, axis=1)
n = np.repeat(n, in_.shape[0] // n.shape[0], axis=0)
assert n.shape[0] == in_.shape[0]
bc = n[:, [0]] * np.sin(in_) + n[:, [1]] * np.cos(in_)
bs = n[:, [2]]
out = np.arctan(-bc / (bs + 1e-9))
return out
def xyz2uvN(xyz, planeID=1):
ID1 = (int(planeID) - 1 + 0) % 3
ID2 = (int(planeID) - 1 + 1) % 3
ID3 = (int(planeID) - 1 + 2) % 3
normXY = np.sqrt(xyz[:, [ID1]] ** 2 + xyz[:, [ID2]] ** 2)
normXY[normXY < 0.000001] = 0.000001
normXYZ = np.sqrt(xyz[:, [ID1]] ** 2 + xyz[:, [ID2]] ** 2 + xyz[:, [ID3]] ** 2)
v = np.arcsin(xyz[:, [ID3]] / normXYZ)
u = np.arcsin(xyz[:, [ID1]] / normXY)
valid = (xyz[:, [ID2]] < 0) & (u >= 0)
u[valid] = np.pi - u[valid]
valid = (xyz[:, [ID2]] < 0) & (u <= 0)
u[valid] = -np.pi - u[valid]
uv = np.hstack([u, v])
uv[np.isnan(uv[:, 0]), 0] = 0
return uv
def uv2xyzN(uv, planeID=1):
ID1 = (int(planeID) - 1 + 0) % 3
ID2 = (int(planeID) - 1 + 1) % 3
ID3 = (int(planeID) - 1 + 2) % 3
xyz = np.zeros((uv.shape[0], 3))
xyz[:, ID1] = np.cos(uv[:, 1]) * np.sin(uv[:, 0])
xyz[:, ID2] = np.cos(uv[:, 1]) * np.cos(uv[:, 0])
xyz[:, ID3] = np.sin(uv[:, 1])
return xyz
def uv2xyzN_vec(uv, planeID):
'''
vectorization version of uv2xyzN
@uv N x 2
@planeID N
'''
assert (planeID.astype(int) != planeID).sum() == 0
planeID = planeID.astype(int)
ID1 = (planeID - 1 + 0) % 3
ID2 = (planeID - 1 + 1) % 3
ID3 = (planeID - 1 + 2) % 3
ID = np.arange(len(uv))
xyz = np.zeros((len(uv), 3))
xyz[ID, ID1] = np.cos(uv[:, 1]) * np.sin(uv[:, 0])
xyz[ID, ID2] = np.cos(uv[:, 1]) * np.cos(uv[:, 0])
xyz[ID, ID3] = np.sin(uv[:, 1])
return xyz
def warpImageFast(im, XXdense, YYdense):
minX = max(1., np.floor(XXdense.min()) - 1)
minY = max(1., np.floor(YYdense.min()) - 1)
maxX = min(im.shape[1], np.ceil(XXdense.max()) + 1)
maxY = min(im.shape[0], np.ceil(YYdense.max()) + 1)
im = im[int(round(minY-1)):int(round(maxY)),
int(round(minX-1)):int(round(maxX))]
assert XXdense.shape == YYdense.shape
out_shape = XXdense.shape
coordinates = [
(YYdense - minY).reshape(-1),
(XXdense - minX).reshape(-1),
]
im_warp = np.stack([
map_coordinates(im[..., c], coordinates, order=1).reshape(out_shape)
for c in range(im.shape[-1])],
axis=-1)
return im_warp
def rotatePanorama(img, vp=None, R=None):
'''
Rotate panorama
if R is given, vp (vanishing point) will be overlooked
otherwise R is computed from vp
'''
sphereH, sphereW, C = img.shape
# new uv coordinates
TX, TY = np.meshgrid(range(1, sphereW + 1), range(1, sphereH + 1))
TX = TX.reshape(-1, 1, order='F')
TY = TY.reshape(-1, 1, order='F')
ANGx = (TX - sphereW/2 - 0.5) / sphereW * np.pi * 2
ANGy = -(TY - sphereH/2 - 0.5) / sphereH * np.pi
uvNew = np.hstack([ANGx, ANGy])
xyzNew = uv2xyzN(uvNew, 1)
# rotation matrix
if R is None:
R = np.linalg.inv(vp.T)
xyzOld = np.linalg.solve(R, xyzNew.T).T
uvOld = xyz2uvN(xyzOld, 1)
Px = (uvOld[:, 0] + np.pi) / (2*np.pi) * sphereW + 0.5
Py = (-uvOld[:, 1] + np.pi/2) / np.pi * sphereH + 0.5
Px = Px.reshape(sphereH, sphereW, order='F')
Py = Py.reshape(sphereH, sphereW, order='F')
# boundary
imgNew = np.zeros((sphereH+2, sphereW+2, C), np.float64)
imgNew[1:-1, 1:-1, :] = img
imgNew[1:-1, 0, :] = img[:, -1, :]
imgNew[1:-1, -1, :] = img[:, 0, :]
imgNew[0, 1:sphereW//2+1, :] = img[0, sphereW-1:sphereW//2-1:-1, :]
imgNew[0, sphereW//2+1:-1, :] = img[0, sphereW//2-1::-1, :]
imgNew[-1, 1:sphereW//2+1, :] = img[-1, sphereW-1:sphereW//2-1:-1, :]
imgNew[-1, sphereW//2+1:-1, :] = img[0, sphereW//2-1::-1, :]
imgNew[0, 0, :] = img[0, 0, :]
imgNew[-1, -1, :] = img[-1, -1, :]
imgNew[0, -1, :] = img[0, -1, :]
imgNew[-1, 0, :] = img[-1, 0, :]
rotImg = warpImageFast(imgNew, Px+1, Py+1)
return rotImg
def imgLookAt(im, CENTERx, CENTERy, new_imgH, fov):
sphereH = im.shape[0]
sphereW = im.shape[1]
warped_im = np.zeros((new_imgH, new_imgH, 3))
TX, TY = np.meshgrid(range(1, new_imgH + 1), range(1, new_imgH + 1))
TX = TX.reshape(-1, 1, order='F')
TY = TY.reshape(-1, 1, order='F')
TX = TX - 0.5 - new_imgH/2
TY = TY - 0.5 - new_imgH/2
r = new_imgH / 2 / np.tan(fov/2)
# convert to 3D
R = np.sqrt(TY ** 2 + r ** 2)
ANGy = np.arctan(- TY / r)
ANGy = ANGy + CENTERy
X = np.sin(ANGy) * R
Y = -np.cos(ANGy) * R
Z = TX
INDn = np.nonzero(np.abs(ANGy) > np.pi/2)
# project back to sphere
ANGx = np.arctan(Z / -Y)
RZY = np.sqrt(Z ** 2 + Y ** 2)
ANGy = np.arctan(X / RZY)
ANGx[INDn] = ANGx[INDn] + np.pi
ANGx = ANGx + CENTERx
INDy = np.nonzero(ANGy < -np.pi/2)
ANGy[INDy] = -np.pi - ANGy[INDy]
ANGx[INDy] = ANGx[INDy] + np.pi
INDx = np.nonzero(ANGx <= -np.pi); ANGx[INDx] = ANGx[INDx] + 2 * np.pi
INDx = np.nonzero(ANGx > np.pi); ANGx[INDx] = ANGx[INDx] - 2 * np.pi
INDx = np.nonzero(ANGx > np.pi); ANGx[INDx] = ANGx[INDx] - 2 * np.pi
INDx = np.nonzero(ANGx > np.pi); ANGx[INDx] = ANGx[INDx] - 2 * np.pi
Px = (ANGx + np.pi) / (2*np.pi) * sphereW + 0.5
Py = ((-ANGy) + np.pi/2) / np.pi * sphereH + 0.5
INDxx = np.nonzero(Px < 1)
Px[INDxx] = Px[INDxx] + sphereW
im = np.concatenate([im, im[:, :2]], 1)
Px = Px.reshape(new_imgH, new_imgH, order='F')
Py = Py.reshape(new_imgH, new_imgH, order='F')
warped_im = warpImageFast(im, Px, Py)
return warped_im
def separatePano(panoImg, fov, x, y, imgSize=320):
'''cut a panorama image into several separate views'''
assert x.shape == y.shape
if not isinstance(fov, np.ndarray):
fov = fov * np.ones_like(x)
sepScene = [
{
'img': imgLookAt(panoImg.copy(), xi, yi, imgSize, fovi),
'vx': xi,
'vy': yi,
'fov': fovi,
'sz': imgSize,
}
for xi, yi, fovi in zip(x, y, fov)
]
return sepScene
def lsdWrap(img):
'''
Opencv implementation of
<NAME>, <NAME>, <NAME>, and <NAME>,
LSD: a Line Segment Detector, Image Processing On Line, vol. 2012.
[Rafael12] http://www.ipol.im/pub/art/2012/gjmr-lsd/?utm_source=doi
@img
input image
'''
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
lines = lsd(img, quant=0.7)
if lines is None:
return np.zeros_like(img), np.array([])
edgeMap = np.zeros_like(img)
for i in range(lines.shape[0]):
pt1 = (int(lines[i, 0]), int(lines[i, 1]))
pt2 = (int(lines[i, 2]), int(lines[i, 3]))
width = lines[i, 4]
cv2.line(edgeMap, pt1, pt2, 255, int(np.ceil(width / 2)))
edgeList = np.concatenate([lines, np.ones_like(lines[:, :2])], 1)
return edgeMap, edgeList
def edgeFromImg2Pano(edge):
edgeList = edge['edgeLst']
if len(edgeList) == 0:
return np.array([])
vx = edge['vx']
vy = edge['vy']
fov = edge['fov']
imH, imW = edge['img'].shape
R = (imW/2) / np.tan(fov/2)
# im is the tangent plane, contacting with ball at [x0 y0 z0]
x0 = R * np.cos(vy) * np.sin(vx)
y0 = R * np.cos(vy) * np.cos(vx)
z0 = R * np.sin(vy)
vecposX = np.array([np.cos(vx), -np.sin(vx), 0])
vecposY = np.cross(np.array([x0, y0, z0]), vecposX)
vecposY = vecposY / np.sqrt(vecposY @ vecposY.T)
vecposX = vecposX.reshape(1, -1)
vecposY = vecposY.reshape(1, -1)
Xc = (0 + imW-1) / 2
Yc = (0 + imH-1) / 2
vecx1 = edgeList[:, [0]] - Xc
vecy1 = edgeList[:, [1]] - Yc
vecx2 = edgeList[:, [2]] - Xc
vecy2 = edgeList[:, [3]] - Yc
vec1 = np.tile(vecx1, [1, 3]) * vecposX + np.tile(vecy1, [1, 3]) * vecposY
vec2 = np.tile(vecx2, [1, 3]) * vecposX + np.tile(vecy2, [1, 3]) * vecposY
coord1 = [[x0, y0, z0]] + vec1
coord2 = [[x0, y0, z0]] + vec2
normal = np.cross(coord1, coord2, axis=1)
normal = normal / np.linalg.norm(normal, axis=1, keepdims=True)
panoList = np.hstack([normal, coord1, coord2, edgeList[:, [-1]]])
return panoList
def _intersection(range1, range2):
if range1[1] < range1[0]:
range11 = [range1[0], 1]
range12 = [0, range1[1]]
else:
range11 = range1
range12 = [0, 0]
if range2[1] < range2[0]:
range21 = [range2[0], 1]
range22 = [0, range2[1]]
else:
range21 = range2
range22 = [0, 0]
b = max(range11[0], range21[0]) < min(range11[1], range21[1])
if b:
return b
b2 = max(range12[0], range22[0]) < min(range12[1], range22[1])
b = b or b2
return b
def _insideRange(pt, range):
if range[1] > range[0]:
b = pt >= range[0] and pt <= range[1]
else:
b1 = pt >= range[0] and pt <= 1
b2 = pt >= 0 and pt <= range[1]
b = b1 or b2
return b
def combineEdgesN(edges):
'''
Combine some small line segments, should be very conservative
OUTPUT
lines: combined line segments
ori_lines: original line segments
line format [nx ny nz projectPlaneID umin umax LSfov score]
'''
arcList = []
for edge in edges:
panoLst = edge['panoLst']
if len(panoLst) == 0:
continue
arcList.append(panoLst)
arcList = np.vstack(arcList)
# ori lines
numLine = len(arcList)
ori_lines = np.zeros((numLine, 8))
areaXY = np.abs(arcList[:, 2])
areaYZ = np.abs(arcList[:, 0])
areaZX = np.abs(arcList[:, 1])
planeIDs = np.argmax(np.stack([areaXY, areaYZ, areaZX], -1), 1) + 1 # XY YZ ZX
for i in range(numLine):
ori_lines[i, :3] = arcList[i, :3]
ori_lines[i, 3] = planeIDs[i]
coord1 = arcList[i, 3:6]
coord2 = arcList[i, 6:9]
uv = xyz2uvN(np.stack([coord1, coord2]), planeIDs[i])
umax = uv[:, 0].max() + np.pi
umin = uv[:, 0].min() + np.pi
if umax - umin > np.pi:
ori_lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi
else:
ori_lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi
ori_lines[i, 6] = np.arccos((
np.dot(coord1, coord2) / (np.linalg.norm(coord1) * np.linalg.norm(coord2))
).clip(-1, 1))
ori_lines[i, 7] = arcList[i, 9]
# additive combination
lines = ori_lines.copy()
for _ in range(3):
numLine = len(lines)
valid_line = np.ones(numLine, bool)
for i in range(numLine):
if not valid_line[i]:
continue
dotProd = (lines[:, :3] * lines[[i], :3]).sum(1)
valid_curr = np.logical_and((np.abs(dotProd) > np.cos(np.pi / 180)), valid_line)
valid_curr[i] = False
for j in np.nonzero(valid_curr)[0]:
range1 = lines[i, 4:6]
range2 = lines[j, 4:6]
valid_rag = _intersection(range1, range2)
if not valid_rag:
continue
# combine
I = np.argmax(np.abs(lines[i, :3]))
if lines[i, I] * lines[j, I] > 0:
nc = lines[i, :3] * lines[i, 6] + lines[j, :3] * lines[j, 6]
else:
nc = lines[i, :3] * lines[i, 6] - lines[j, :3] * lines[j, 6]
nc = nc / np.linalg.norm(nc)
if _insideRange(range1[0], range2):
nrmin = range2[0]
else:
nrmin = range1[0]
if _insideRange(range1[1], range2):
nrmax = range2[1]
else:
nrmax = range1[1]
u = np.array([[nrmin], [nrmax]]) * 2 * np.pi - np.pi
v = computeUVN(nc, u, lines[i, 3])
xyz = uv2xyzN(np.hstack([u, v]), lines[i, 3])
l = np.arccos(np.dot(xyz[0, :], xyz[1, :]).clip(-1, 1))
scr = (lines[i,6]*lines[i,7] + lines[j,6]*lines[j,7]) / (lines[i,6]+lines[j,6])
lines[i] = [*nc, lines[i, 3], nrmin, nrmax, l, scr]
valid_line[j] = False
lines = lines[valid_line]
return lines, ori_lines
def combineEdgesN_v2(edges):
'''
Combine some small line segments, should be very conservative
OUTPUT
lines: combined line segments
line format [nx ny nz projectPlaneID umin umax LSfov score]
coordN_lines: combined line segments with normal, start coordinate, and end coordinate
'''
arcList = []
for edge in edges:
panoLst = edge['panoLst']
if len(panoLst) == 0:
continue
arcList.append(panoLst)
arcList = np.vstack(arcList)
# ori lines
numLine = len(arcList)
ori_lines = np.zeros((numLine, 8))
ori_coordN_lines = np.zeros((numLine, 9)) # Line containing coordinate and normals
areaXY = np.abs(arcList[:, 2])
areaYZ = np.abs(arcList[:, 0])
areaZX = np.abs(arcList[:, 1])
planeIDs = np.argmax(np.stack([areaXY, areaYZ, areaZX], -1), 1) + 1 # XY YZ ZX
for i in range(numLine):
ori_lines[i, :3] = arcList[i, :3]
ori_lines[i, 3] = planeIDs[i]
coord1 = arcList[i, 3:6]
coord2 = arcList[i, 6:9]
uv = xyz2uvN(np.stack([coord1, coord2]), planeIDs[i])
umax = uv[:, 0].max() + np.pi
umin = uv[:, 0].min() + np.pi
if umax - umin > np.pi:
ori_lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi
else:
ori_lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi
ori_lines[i, 6] = np.arccos((
np.dot(coord1, coord2) / (
|
np.linalg.norm(coord1)
|
numpy.linalg.norm
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2021 by the authors listed in the LICENSE file
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime
import pandas as pd
import numpy as np
import scipy
from scipy import stats
from sklearn import metrics
from statsmodels.stats import multitest
from statistics import median, mean
import matplotlib.pyplot as plt
from math import log, exp, log10, isnan, sqrt
from random import random
import os
import sys
import json
import urllib.parse
import urllib.request
from random import randint
import multiprocessing
import threading
import traceback
normalized_suffix = ", normalized"
pval_adjust_methods = {"bonferroni": "Bonferoni, one-step correction",
"sidak": "Sidak, one-step correction",
"holm-sidak": "Holm-Sidak, step down method using Sidak adjustments",
"holm": "Holm, step-down method using Bonferroni adjustments",
"simes-hochberg": "Simes-Hochberg, step-up method (independent)",
"hommel": "Hommel closed method based on Simes tests (non-negative)",
"fdr_bh": "Benjamini/Hochberg (non-negative)",
"fdr_by": "Benjamini/Yekutieli (negative)",
"fdr_tsbh": "two stage Benjamini/Hochberg fdr correction (non-negative)",
"fdr_tsbky": "two stage fdr correction (non-negative)"
}
ttest_types = {"two-sided": "two-sided",
"less": "one-sided, less",
"greater": "one-sided, greater"}
def get_relative_path(file_path):
script_path = os.getcwd()
if os.path.isabs(file_path):
script_tokens = os.path.normpath(script_path).split(os.sep)
file_tokens = os.path.normpath(file_path).split(os.sep)
if script_tokens[0] == file_tokens[0]:
while len(script_tokens) > 0 and len(file_tokens) > 0 and script_tokens[0] == file_tokens[0]:
script_tokens = script_tokens[1:]
file_tokens = file_tokens[1:]
file_path = os.path.join(*[".."] * len(script_tokens), *file_tokens) if len(file_tokens) > 0 else "."
return file_path
class LNTT(multiprocessing.Process):
COLOR_REGULATION_TREATED = "#998ec3"
COLOR_REGULATION_TREATED_LABEL = "#6a6388"
COLOR_REGULATION_WT = "#f1a340"
COLOR_REGULATION_WT_LABEL = "#b67b30"
COLOR_UNREGULATED = "grey"
COLOR_FC_BOUNDARIES = "darkred"
COLOR_DISTRIBUTION_BAR = "#67a9cf"
def __init__(self, lntt_queue, external_queue):
super(LNTT, self).__init__()
self.logging = external_queue
self.lntt_queue = lntt_queue
self.internal_queue = multiprocessing.Queue()
self.interrupt = False
def run(self):
while True:
task = self.lntt_queue.get()
if type(task) == dict:
self.interrupt = False
multiprocessing.Process(target = self.process, args=(task, self.internal_queue)).start()
elif type(task) == int:
self.internal_queue.put(task)
else:
self.internal_queue.put(1)
break
def data_frame_imputation(self, data_frame, column_names, parameters, internal_queue, report):
data_frame_imputation = parameters["data_imputation_value"]
self.logging.put("Imputing the data containing at most %i missing values per entitiy" % data_frame_imputation)
num_pre = len(data_frame)
if data_frame_imputation > 0:
if report != None: report.append("Values were imputed when an entity contained up to %i missing values." % data_frame_imputation)
rows_to_drop = list(data_frame[column_names].isnull().sum(axis = 1) >= data_frame_imputation)
rows_to_drop = np.array([idx for idx, v in enumerate(rows_to_drop) if v])
data_frame.drop(rows_to_drop, inplace = True)
data_frame.reset_index(inplace = True, drop = True)
set_col_names = set(column_names)
for ci, col in enumerate(data_frame):
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
if col not in set_col_names: continue
l = sorted(list(v for v in data_frame[col] if not isnan(v)))
max_val = l[int(len(l) * 0.05)]
for i, v in enumerate(data_frame[col].isnull()):
if v:
data_frame.iloc[i, ci] = max_val * random()
else:
# delete all rows that are not fully filled
if report != None: report.append("All entities at least one with missing value were discarded.")
rows_to_drop = list(data_frame[column_names].isnull().any(axis = 1))
rows_to_drop = np.array([idx for idx, v in enumerate(rows_to_drop) if v])
data_frame.drop(rows_to_drop, inplace = True)
data_frame.reset_index(inplace = True, drop = True)
num_post = len(data_frame)
self.logging.put("After data imputation, %s of %s entities remain" % (num_post, num_pre))
if report != None: report.append("After this filtering step, %i entities remained." % len(data_frame))
def cv_filtering(self, data_frame, column_names, parameters, internal_queue, report):
cv_threshold = parameters["cv_threshold"]
num_pre = len(data_frame)
self.logging.put("Filtering out entities having a CV <= %0.3f" % cv_threshold)
if report != None: report.append("The entities were filtered by the covariance of variation (CV): when the entities had a CV < %0.1f %% over all measurement (regardless of their conditions), they were discarded." % (cv_threshold * 100))
cv = data_frame[column_names].std(axis = 1, skipna = True) / data_frame[column_names].mean(axis = 1, skipna = True)
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
rows_to_drop = np.array([i for i, v in enumerate(list(cv < cv_threshold)) if v])
data_frame.drop(rows_to_drop, inplace = True)
data_frame.reset_index(inplace = True, drop = True)
num_post = len(data_frame)
self.logging.put("After CV filtering, %s of %s entities remain" % (num_post, num_pre))
if report != None: report.append("After CV filtering, %i entities remained." % len(data_frame))
def normalization(self, original_data_frame, output_folder, column_names, conditions, parameters, internal_queue, report):
global normalized_suffix
reference_titles = conditions[parameters["norm_ref_condition"]]
with_plotting = parameters["with_normalization_plotting"]
self.logging.put("Normalizing columns applying rank invariant set normalization")
if report != None: report.append("Normalization was performed by applying 'global rank-invariant set normalization' (DOI: 10.1186/1471-2105-9-520).")
col_cnt = len([c for c in original_data_frame])
data_frame = pd.DataFrame(original_data_frame[column_names])
n_rows, n_cols = len(data_frame), len(data_frame.keys())
# save data_frame before normalization
violin_data_frame, violin_data_frame_2, violin_col_names = [], [], []
violin_distribution, violin_distribution_2, vd_col_names = [], [], []
reference, max_val = 0, 0
for col in reference_titles:
l = np.sum(~np.isnan(data_frame[col]))
if max_val < l:
reference = col
max_val = l
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
# computing standard deviation of reference columns for median window estimation
if len(reference_titles) > 1:
std = data_frame[reference_titles].std(axis = 1, skipna = True)
# sorting all columns according to reference
masked_ref = np.ma.masked_where(np.isnan(data_frame[reference]), data_frame[reference])
sorting = np.argsort(masked_ref)
unsorting = np.zeros(len(sorting), dtype = "int64")
for i, v in enumerate(sorting): unsorting[v] = i
# sort all columns according to the reference column
for t in data_frame:
data_frame[t] = np.array([data_frame[t][j] for j in sorting], dtype=np.float64)
# set the reference columns and its masked version
RefX = np.array(data_frame[reference])
masked_X = np.ma.masked_where(np.isnan(RefX), RefX)
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
# linear regression for estimating appropriate boundaries for
# consecutive moving average window, in equation: y = m * x + n
if len(reference_titles) > 1:
std = np.array([std[j] for j in sorting], dtype=np.float64)
std_not_nan = ~np.isnan(std)
ref_not_nan = ~np.isnan(RefX)
m, n = np.polyfit(masked_X[ref_not_nan & std_not_nan], std[ref_not_nan & std_not_nan], 1)
else:
m, n = 1. / 10., 0
# compute boundaries of the median window for all values
L = np.searchsorted(RefX, masked_X - 3 * m * masked_X)
R = np.searchsorted(masked_X, masked_X + 3 * m * masked_X)
reference_col_num = 0
min_points_in_window = 10
num_nan = max(np.sum(np.isnan(RefX)), 1)
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
for cc, col_name in enumerate(data_frame):
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
self.logging.put(" - Normalizing column '%s'" % col_name)
Y = data_frame[col_name]
masked_Y = np.ma.masked_where(np.isnan(Y), Y)
violin_data = np.log10(Y)
violin_distribution.append(np.array(violin_data[~np.isnan(violin_data)]).tolist())
if col_name == reference:
reference_col_num = cc
violin_data = np.log10(Y)
violin_distribution_2.append(np.array(violin_data[~np.isnan(violin_data)]).tolist())
Y_no_nans = ~np.isnan(Y)
vd_col_names.append("%s (%i)" % (col_name, np.sum(Y_no_nans)))
original_data_frame.insert(col_cnt, "%s%s" % (col_name, normalized_suffix), np.array([Y[j] for j in unsorting]), True)
col_cnt += 1
continue
violin_data_col = np.log10(data_frame[col_name] / RefX)
violin_data_frame.append(np.array(violin_data_col[~np.isnan(violin_data_col)]).tolist())
Y_l = np.array(Y / RefX)
prevL, last = np.zeros(len(Y), dtype = "int64"), 0
Y_no_nans = ~np.isnan(Y_l)
for i in range(1, len(Y)):
prevL[i] = last
if Y_no_nans[i]: last = i
nextR, last = np.zeros(len(Y), dtype = "int64"), len(Y) - 1
for i in range(len(Y) - 2, -1, -1):
nextR[i] = last
if Y_no_nans[i]: last = i
min_points = min(min_points_in_window, log(np.sum(Y_no_nans)) + 1)
YY = np.array([np.nan] * n_rows, dtype = "float64")
nan_start = -1
for z, (l, r, y) in enumerate(zip(L, R, Y)):
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
if np.isnan(RefX[z]):
nan_start = z
break
if np.ma.is_masked(y): continue
Y_sub = Y_l[l : r + 1]
n = list(Y_sub[~np.isnan(Y_sub)])
while len(n) < min_points:
if l > 0:
l = prevL[l]
if ~np.isnan(Y_l[l]): n.append(Y_l[l])
if r < n_rows - num_nan:
r = nextR[r]
if ~np.isnan(Y_l[r]): n.append(Y_l[r])
if l == 0 and r >= n_rows - num_nan: break
YY[z] = np.median(n)
normalized_column_data = Y / YY
violin_data_col = np.log10(normalized_column_data / RefX)
violin_data_frame_2.append(np.array(violin_data_col[~np.isnan(violin_data_col)]).tolist())
Y_no_nans = ~np.isnan(normalized_column_data / RefX)
violin_col_names.append("%s (%i)" % (col_name, np.sum(Y_no_nans)))
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
# infer normalization factor for intensities without corresponding reference value
# by considering normalized neighbors
if np.isnan(RefX[-1]):
Y_sorted_index = np.argsort(np.argsort(masked_Y))
for i in range(nan_start, n_rows):
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
if np.isnan(Y[i]): continue
n, l, r = [], i, i
if not np.isnan(YY[Y_sorted_index[i]]): n.append(YY[Y_sorted_index[i]])
while len(n) < 5:
if l > 0:
l -= 1
if not np.isnan(YY[Y_sorted_index[l]]): n.append(YY[Y_sorted_index[l]])
if r < n_rows - 1:
r += 1
if not np.isnan(YY[Y_sorted_index[r]]): n.append(YY[Y_sorted_index[r]])
YY[i] = np.median(n)
normalized_column_data = Y / YY
violin_data = np.log10(normalized_column_data)
violin_distribution_2.append(np.array(violin_data[~np.isnan(violin_data)]).tolist())
Y_no_nans = ~np.isnan(normalized_column_data)
vd_col_names.append("%s (%i)" % (col_name, np.sum(Y_no_nans)))
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
# plot a normalization figure to check the normalization quality
if with_plotting:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5), sharey = True) #, tight_layout=True)
plot_X = RefX[~np.isnan(RefX) & ~np.isnan(Y)]
plot_Y = Y[~np.isnan(RefX) & ~np.isnan(Y)]
plot_Y_corr = normalized_column_data[~np.isnan(RefX) & ~np.isnan(Y)]
plot_YY = YY[~np.isnan(RefX) & ~np.isnan(Y)]
ax2.axhline(linewidth=2, color='r')
ax1.scatter(np.log10(plot_X), np.log2(plot_Y / plot_X))
ax1.plot(np.log10(plot_X), np.log2(plot_YY), '-', color = "r")
ax2.scatter(np.log10(plot_X), np.log2(plot_Y_corr / plot_X))
ax1.set_ylabel("log${}_2$ (protein abundance ratio)")
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("log${}_{10}$ protein abundances of '%s'" % reference)
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
clean_col_name = col_name.replace("/", "").replace("\\", "")
clean_col_name = clean_col_name.replace(":", "").replace("*", "")
clean_col_name = clean_col_name.replace("?", "").replace("!", "")
clean_col_name = clean_col_name.replace("<", "").replace(">", "")
clean_col_name = clean_col_name.replace("\"", "").replace("'", "").replace("|", "")
fig.suptitle("Condition '%s' against reference\n'%s' during normalization" % (col_name, reference))
#plt.tight_layout()
fig_filename = os.path.join(output_folder, "%s-normalization.pdf" % clean_col_name)
plt.savefig(fig_filename, dpi = 600)
try:
result = internal_queue.get_nowait()
if result != None:
self.interrupt = True
return
except:
pass
fig_filename = os.path.join(output_folder, "%s-normalization.png" % clean_col_name)
plt.savefig(fig_filename, dpi = 600)
plt.cla() # Clear axis
plt.clf() # Clear figure
plt.close() # Close a figure window
original_data_frame.insert(col_cnt, "%s%s" % (col_name, normalized_suffix),
|
np.array([normalized_column_data[j] for j in unsorting])
|
numpy.array
|
import pickle as pkl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import FixedLocator, FixedFormatter
def log_histogram(data, name):
"""
Create ridgeline plots, where each individual plot is a
log-y scaled histogram.
data: 2D Numpy array
Each row contains all the values to include in a single histogram.
0th row will be in the lowest ridgeline plot.
Every Nth row will be a separate histogram. N is selected according
to n_target_rows, below to avoid overwhelming the viewer.
name: str
A name stem for the results image.
"""
dpi = 300
border = .125
eps = 1e-6
# n_bins = 50
n_bins = 100
n_target_rows = 16
fig = plt.figure()
ax = fig.gca()
n_rows, n_cols = data.shape
d_keep = np.maximum(1, np.floor(n_rows / n_target_rows))
i_keep = np.arange(0, n_rows, d_keep, dtype=int)
data = data[i_keep, :]
n_rows, n_cols = data.shape
x_min = np.min(data)
x_max =
|
np.max(data)
|
numpy.max
|
import math
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as sg
from astropy.io import fits
from scipy.interpolate import CubicSpline
from scipy.ndimage.filters import percentile_filter
from scipy.signal import convolve2d
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.neighbors import KernelDensity
from collections import Counter
__author__ = 'nate'
def cantor(a, b):
"""Cantor pairing function, used to give unique int name to each observation"""
a = int(a)
b = int(b)
return (a + b) * (a + b + 1) / 2 + b
def decantor(z):
"""Inverse Cantor"""
w = math.floor(math.sqrt(8 * z + 1) / 2 - 0.5)
t = ((w + 1) * w) / 2
y = z - t
x = w - y
return int(x), int(y)
class APFRunString(str):
def __add__(self, x):
# If we're trying to add anything but an int, do normal string
# addition.
if type(x) is not int:
return str.__add__(self, x)
res = ''
i = len(self)-1
while x > 0:
# Get the ASCII code of the i-th letter and "normalize" it
# so that a is 0, b is 1, etc.
# If we are at the end of the string, make it -1, so that if we
# need to "add" 1, we get a.
if i >= 0:
c = ord(self[i]) - 97
else:
c = -1
# Calculate the number of positions by which the letter is to be
# "rotated".
pos = x % 26
# Calculate x for the next letter, add a "carry" if needed.
x //= 26
if c + pos >= 26:
x += 1
# Do the "rotation".
c = (c + pos) % 26
# Add the letter at the beginning of the string.
res = chr(c + 97) + res
i -= 1
# If we didn't reach the end of the string, add the rest of the string back.
if i >= 0:
res = self[:i+1] + res
return APFRunString(res)
def filterbypercentile(arr, top, bottom):
topp = np.percentile(arr, top)
bottomp = np.percentile(arr, bottom)
shortened = [bottomp < x < topp for x in arr]
return shortened
def reject_outliers(data, m=2.): # this must be modified so that it does a biased above outlier rejection
p = 50 # PERCENTILE TO USE for split
perc = np.percentile(data, p)
upper_half = data[data > perc]
d = np.abs(data - np.median(upper_half))
d2 = np.abs(upper_half - np.median(upper_half))
mdev = np.median(d2)
s = d / mdev if mdev else 0.
return s < m
def gauss(x, *p):
"""
Simple Gaussian function
:param x: ind var
:param p: coefficients A, mu, sigma
:return: numpy array gauss(x)
"""
A, mu, sigma = p
return A * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2))
def csq_red(model, data, dof=3.):
"""
Computed a reduced cui square fit of data to model
Assumes model sums to one
:param model: expectation value of model
:param data: observed data
:param dof: number of degrees of freedom
:return: Reduced chi square (float)
"""
total = np.sum(data)
csq = np.power(np.array(model) * total - np.array(data), 2)
error = np.array(data)
error[np.where(error < 9)] = 9
csq = csq / error
csq = np.sum(csq)
csq /= len(data) - dof
return csq
def minimal_csq(coeffs, data, dof=3., n_it=20, min_thresh=0.005):
"""
Does a binary search to find a minimal chi square.
:param coeffs: gaussian fit coefficients
:param data: column to compute fit
:param dof: number of DOF for reduced chi square computation
:param n_it: number of iterations of Binary Search
:param min_thresh: difference in BS iterations deemed "sufficiently close"
:return: minimal value of chi square given n_it and min_thresh
"""
# TODO - anonymize from specific function type
indices = np.arange(len(data))
ub = coeffs[1] + 0.9
lb = coeffs[1] - 1
ctr = n_it
csq_past = 100
csq_now = 0
quick_csq = lambda x: csq_red(gauss(indices, *[coeffs[0], x, coeffs[2]]), data, dof=dof)
while ctr and (csq_past - csq_now > min_thresh or ctr == n_it - 1):
csq_past = csq_now
midpoint = (ub + lb) / 2.
l_midpoint = (lb + midpoint) / 2.
r_midpoint = (ub + midpoint) / 2.
csq_l = quick_csq(l_midpoint)
csq_r = quick_csq(r_midpoint)
if csq_r < csq_l:
lb = midpoint
csq_now = csq_r
else:
ub = midpoint
csq_now = csq_l
ctr -= 1
midpoint = (ub + lb) / 2.
return csq_red(gauss(indices, *[coeffs[0], midpoint, coeffs[2]]), data, dof=dof)
def makegaussian(size, fwhm=3, center=None):
"""
Adapted from <NAME> on GitHub
Make a square gaussian kernel.
size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
"""
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / fwhm ** 2)
def find_nearest(array, value):
"""Finds nearest value in array"""
idx = (np.abs(array - value)).argmin()
return array[idx]
def pythag_dist_kernel(size=3):
s = size + size + 1
offsetx = np.repeat(np.reshape(range(s), (s, 1)), s, axis=1)
offsety = np.transpose(offsetx)
return np.square(offsetx - size) + np.square(offsety - size)
def get_header_info(rolist, info=['RA'], loc='/mir3/iodfitsdb'):
"""
Get header info for HIRES
:param rolist:
:param info:
:param loc:
:return:
"""
targ = 'Target'
info.insert(0, targ)
out = []
print (info)
for i in rolist:
t = fits.open(loc + '/rj' + str(i[0]) + '.' + str(i[1]) + '.fits')[0]
prt = [t.header['TARGNAME'].strip(' ')]
for j in info[1:]:
prt.append(t.header[j])
print (prt)
out.append(prt)
return out
def conv(arr, kernel=[[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]):
return convolve2d(arr, kernel)
# ------------------------------------------------------------------------------
# Utilities for fitting and interpolation
# ------------------------------------------------------------------------------
def spline_interpolate(arr, nseg=20, percentile=50):
"""
Returns the cubic spline describing data, split into nseg segments.
:param arr: input array
:param nseg: number of same-size segments
:param percentile: what percentile of each segment to use as midpt y value?
:return: scipy.interpolate.CubicSpline
"""
l = len(arr)
midpts = [np.median(arr[:int(l // nseg // 2)])]
x = [0]
for i in range(nseg):
x += [int((0.5+i)*(l/nseg))]
midpts += [np.percentile(arr[i * (l // nseg):(i + 1) * (l // nseg)], percentile)]
x += [l-1]
midpts += [np.median(arr[-int(l // nseg / 2):])]
return CubicSpline(x,midpts)
def poly_interpolator(arr,degree=4, nseg=5,percentile=95):
"""
Returns the function describing a polynomial fitting the data, split into nseg segments
:param arr: input array
:param degree: degree of polynpomial fit
:param nseg: number of segments of equal length to use.
:param percentile:
:return:
"""
l = len(arr)
midpts = [np.median(arr[:int(l // nseg // 2)])]
x = [0]
for i in range(nseg):
x += [int((0.5 + i) * (l // nseg))]
midpts += [np.percentile(arr[i * (l // nseg):(i + 1) * (l // nseg)], percentile)]
x += [l - 1]
midpts += [np.median(arr[-int(l // nseg // 2):])]
return np.poly1d(np.polyfit(x, midpts, deg=degree))
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, <NAME>, <NAME>, <NAME>
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
# This is the most effective continuum fit
def continuum_fit(arr, percentile_kernel = 501,savitzky_kernel = 2001, savitzky_degree=4, perc=50):
# This fixes the singularities
# Value of 500 chosen arbitrarily - should not have too much of an effect
fixval = np.max([np.abs(np.min(arr) * 2),500.])
fix = arr + fixval
pcf = percentile_filter(fix, perc, size=percentile_kernel)
sav = savitzky_golay(pcf, savitzky_kernel, savitzky_degree)
return fix/(sav/np.max(sav)) - fixval
def deblaze(arr, method = 'savitzky', percentile_kernel = 101, savitzky_kernel=2001, savitzky_degree=4, perc=50):
if method == 'savitzky':
return continuum_fit(arr, percentile_kernel=percentile_kernel, savitzky_kernel=savitzky_kernel,
savitzky_degree=savitzky_degree, perc=perc)
elif method == 'meanshift':
median_of_array = np.median((arr) + 1000.)
bandwidth = estimate_bandwidth(arr[:, np.newaxis], quantile=0.1)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(arr[:, np.newaxis])
# xvals = np.arange(4608)
test = np.array(arr)
labels = ms.labels_
# Replace the missing values (not at maximal cluster) with median of array values in cluster in original array
test[labels != 0] = np.median(test[labels == 0])
test[test == 0] = 1
med_test = sg.medfilt(test, kernel_size=101)
return arr / med_test * median_of_array
elif method == 'percentile':
# This code has been adapted - the percentile kernel specified is used to Lower Bound only
fixval = np.max([np.abs(np.min(arr) * 2), 500.])
fix = arr + fixval
pcf = np.max(np.array([percentile_filter(fix, perc, size=percentile_kernel),
percentile_filter(fix, perc, size=101)]), axis=0)
return fix / (pcf / np.mean(pcf)) - fixval
else:
raise KeyError('The deblaze method you have passed is not implemented. Please pick from savitzky, bstar, and meanshift')
# ------------------------------------------------------------------------------
# Utilities for laser search
# ------------------------------------------------------------------------------
def getpercentile(order, perc, method='meanshift', kernel_bandwidth=100, kernel='epanechnikov'):
"""
Returns value of 'perc'th percentile
(usually 75th) count value in 'order'
:param order: Spectral order to compute percentile on
:param perc: What(th) %ile to compute.
"""
#TODO - add support for piecewise thresholds
if method == 'percentile':
nsplits = 1 # Compute percentile piecewise - I have not been
maximum_thresh = 0
l=len(order)
inc = l / nsplits
for i in range(nsplits):
sub = order[i * inc:(i + 1) * inc]
percentile = np.percentile(sub, perc)
if maximum_thresh < percentile:
maximum_thresh = percentile
return maximum_thresh
elif method == 'kde':
kde = KernelDensity(kernel=kernel, bandwidth=kernel_bandwidth).fit(order)
elif method == 'meanshift':
order_subset = order[::10,np.newaxis]
try:
bandwidth = estimate_bandwidth(order_subset, quantile=0.1)
except ValueError:
# Replace the NaN with enarest value
order_to_estimate = order_subset
ind = np.where(~np.isinf(order_to_estimate))[0]
first, last = ind[0], ind[-1]
order_to_estimate[:first] = order_to_estimate[first]
order_to_estimate[last + 1:] = order_to_estimate[last]
bandwidth = estimate_bandwidth(order_to_estimate, quantile=0.1)
# print ('Bandwidth is {0}'.format(bandwidth))
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(order_subset)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
#for k in range(n_clusters_):
# my_members = labels == k
# print "cluster {0}: {1}".format(k, order[:, np.newaxis][my_members, 0])
#return cluster_centers[0][0]
# print(cluster_centers)
# TODO Determine why there is a index error ocurring here - should there be more than 3 clusters
# or is this normal behavior?
label_counter = Counter(labels)
top_labels = filter(lambda x: x[1] > 100, label_counter.most_common())
if top_labels.__len__() == 0:
top_labels = filter(lambda x: x[1] > 50, label_counter.most_common())
return np.max(map(lambda x: cluster_centers[x[0]][0],top_labels))
else:
raise KeyError
def contiguous_regions(condition):
"""
Borrowed from <NAME> on StackOverflow
Finds contiguous True regions of the boolean array 'condition'. Returns
a 2D array where the first column is the start index of the region and the
second column is the end index.
"""
# Find the indices of changes in 'condition'
d = np.diff(condition)
idx, = d.nonzero()
# We need to start things after the change in 'condition'. Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size] # Edit
# Reshape the result into two columns
idx.shape = (-1, 2)
return idx
def finddeviates(order, thresh, npix=3):
"""returns a list of deviation indices [start,stop]."""
out = []
#plt.plot(order)
#plt.show()
#print(order, thresh)
#plt.cla()
for start, stop in contiguous_regions(order > thresh):
diff = stop - start
if diff >= npix:
out.append([diff, start])
return out
def findthresh(order, npix=3.0, method='full_deviation'):
"""
Computes threshold using median absolute deviation (MAD) method
note here that 'order' passed to this function is generally:
spectral order - 75th percentile count value of spectral order
That is, the sum of 3 consecutive points in order being above 0
is the same as sum of 3 being above 75th %ile in spectral order.
Returns the Median Absolute (positive) Deviation of npix (usually 3)
pixel bins above the percentile set in findhigher.
"""
# Number of pixels to demand consecutively deviant. 3 is appropriate for HIRES.
# Convolution as a way of binning by 3 pixels to see groups that exceed
if method == '3pix':
binned_ord = np.convolve(np.ones(npix) / npix, order, 'valid')
deviations = finddeviates(binned_ord, 0, npix)
uppies = []
for j in deviations:
for i in range(j[0]):
uppies.append(binned_ord[j[1] + i])
elif method == 'full_deviation':
deviations = finddeviates(order, 0, npix)
uppies = []
for j in deviations:
uppies.append(np.median(order[j[1]:j[1] + j[0]]))
return np.median(uppies)
def has_singularity(order):
""" Tests order for a 'Singularity', or a common reduction error resulting in huge counts"""
order = order[4:-4]
order_c = np.convolve(np.abs(order), [1, 1])
big = np.where(order_c > 500)[0]
zero_crossings = np.where(np.diff(np.sign(order)))[0]
return True if np.intersect1d(big, zero_crossings).__len__() else False
def hires_ignored_wavelengths(wav):
""" Ignore results from these wavelength ranges."""
ignore_lb = np.array([6557.78, 4855.78, 4335.78, 3964.78, 3928.46, 3963.25,
5890.7, 5884.73, 7585.0, 7964.0, 6863.0])
ignore_ub = np.array([6568.22, 4866.22, 4346.22, 3975.22, 3938.9, 3973.69,
5901.14, 5895.17, 7660.0, 7965.2, 6920.0])
onesarr = np.ones(len(ignore_lb))
return np.any(np.logical_xor(
|
np.greater(ignore_lb, wav * onesarr)
|
numpy.greater
|
import os
import numpy as np
import copy
from .ann import exportNet
import os
import numpy as np
import copy
from .ann import exportNet
class WannDataGatherer():
''' Data recorder for WANN algorithm'''
def __init__(self, filename, hyp):
"""
Args:
filename - (string) - path+prefix of file output destination
hyp - (dict) - algorithm hyperparameters
"""
self.filename = filename # File name path + prefix
self.p = hyp
# Initialize empty fields
self.elite = []
self.best = []
self.bestFitVec = []
self.spec_fit = []
self.field = ['x_scale','fit_med','fit_count', 'best_count','elite_count','fit_max','fit_top','fit_peak', 'fit_kl_stat', 'elite_kl_stat', 'best_kl_stat', \
'node_med','conn_med',\
'elite','best']
# self.field = ['x_scale','fit_med','fit_var','fit_novelty','best_var','elite_var', 'best_novelty','elite_novelty','fit_max','fit_top','fit_peak','kl_stat',\
# 'node_med','conn_med',\
# 'elite','best']
self.objVals = np.array([])
for f in self.field[:-2]:
exec('self.' + f + ' = np.array([])')
#e.g. self.fit_max = np.array([])
self.newBest = False
def gatherData(self, pop, species):
# Readabilityy
p = self.p
fitness = [ind.fitness for ind in pop]
# novelty = [ind.novelty for ind in pop]
kl_stat = [ind.kl_stat for ind in pop]
count = [ind.count for ind in pop]
# var = [ind.var for ind in pop]
peakfit = [ind.fitMax for ind in pop]
nodes = np.asarray([np.shape(ind.node)[1] for ind in pop])
conns = np.asarray([ind.nConn for ind in pop])
# --- Evaluation Scale ---------------------------------------------------
if len(self.x_scale) == 0:
self.x_scale = np.append(self.x_scale, len(pop))
else:
self.x_scale = np.append(self.x_scale, self.x_scale[-1]+len(pop))
# ------------------------------------------------------------------------
# --- Best Individual ----------------------------------------------------
if p['alg_selection'] == "mean":
self.elite.append(pop[np.argmax(fitness)])
if len(self.best) == 0:
self.best = copy.deepcopy(self.elite)
elif (self.elite[-1].fitness > self.best[-1].fitness):
self.best = np.append(self.best,copy.deepcopy(self.elite[-1]))
self.newBest = True
else:
self.best = np.append(self.best,copy.deepcopy(self.best[-1]))
self.newBest = False
# elif p['alg_selection'] == "novelty":
# self.elite.append(pop[np.argmax(novelty)])
# if len(self.best) == 0:
# self.best = copy.deepcopy(self.elite)
# elif (self.elite[-1].novelty > self.best[-1].novelty):
# self.best = np.append(self.best,copy.deepcopy(self.elite[-1]))
# self.newBest = True
# else:
# self.best = np.append(self.best,copy.deepcopy(self.best[-1]))
# self.newBest = False
elif p['alg_selection'] == "stats":
self.elite.append(pop[np.argmax(kl_stat)])
if len(self.best) == 0:
self.best = copy.deepcopy(self.elite)
elif (self.elite[-1].kl_stat > self.best[-1].kl_stat):
self.best = np.append(self.best,copy.deepcopy(self.elite[-1]))
self.newBest = True
else:
self.best = np.append(self.best,copy.deepcopy(self.best[-1]))
self.newBest = False
elif p['alg_selection'] == "count":
self.elite.append(pop[
|
np.argmax(count)
|
numpy.argmax
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy
import logging
import csv
import os
from skimage.feature import peak_local_max
from skimage.morphology import label, closing, disk, watershed
from skimage.measure import regionprops
from skimage.filters import gaussian
from skimage.restoration import denoise_tv_chambolle
from . import settings, tiles
log = logging.getLogger()
class Assignment:
"""
Representation of an assignment containing all the data (las, dom, dtm) to delineate the individual trees.
"""
__slots__ = ["tile_number", "tile_buffer", "extent", "params", "resolution", "csv_file", "timberline", "las", "dom",
"dtm"]
""" Using slots for memory performance boost. See: http://book.pythontips.com/en/latest/__slots__magic.html"""
def __init__(self, tile_number, tile_buffer, extent):
"""
Initialization of an assignment.
:param tile_number: The unique tile number
:type tile_number: Integer
:param tile_buffer: The buffer in meter to extend this assignment before processing
:type tile_buffer: Integer
:param extent: The extent of the tiles belonging to this assignment
:type extent: :class:`treedetection.tiles.Extent`
"""
self.tile_number = tile_number
self.tile_buffer = tile_buffer
self.extent = extent
self.params = WatershedParams()
self.resolution = self.params.resolution
self.csv_file = settings.get("data.tmp_trees").format(tile_number=self.tile_number)
self.timberline = settings.get("tree_restrictions.timberline")
las_path = settings.get("data.las")[tile_number % len(settings.get("data.las"))].format(tile_number=tile_number)
dom_path = settings.get("data.dom")[tile_number % len(settings.get("data.dom"))].format(tile_number=tile_number)
dtm_path = settings.get("data.dtm")[tile_number % len(settings.get("data.dtm"))].format(tile_number=tile_number)
self.las = tiles.LasTile(tile_number, "LAS", las_path, self.resolution, tiles.Neighborhood.CENTER, extent,
self.timberline)
self.dom = tiles.RasterTile(tile_number, "DOM", dom_path, self.resolution, tiles.Neighborhood.CENTER, extent,
self.timberline)
self.dtm = tiles.RasterTile(tile_number, "DTM", dtm_path, self.resolution, tiles.Neighborhood.CENTER, extent,
self.timberline)
def __repr__(self):
"""
String representation of this object
:return: The assignment as a string
"""
return "Assignment {}".format(self.tile_number)
def __eq__(self, other):
"""
Check equality of two assignments. Two assignments are equal when processing the same tile number.
:param other: The assignment to compare this object with
:type other: :class:`runner.Assignment`
:return: True, if the assignment represent the same tile number, false otherwise.
"""
return isinstance(other, self.__class__) and self.tile_number == other.tile_number
def set_neighbour(self, tile_number, extent, position):
"""
Append neighboring tiles to this assignment.
:param tile_number: The unique tile number
:type tile_number: Integer
:param extent: The extent of the tiles belonging to this assignment
:type extent: :class:`treedetection.tiles.Extent`
:param position: The position of this tile in the cartesian 8-neighborhood
:type position: :class:`treedetection.tiles.Neighborhood`
"""
las_path = settings.get("data.las")[tile_number % len(settings.get("data.las"))].format(tile_number=tile_number)
dom_path = settings.get("data.dom")[tile_number % len(settings.get("data.dom"))].format(tile_number=tile_number)
dtm_path = settings.get("data.dtm")[tile_number % len(settings.get("data.dtm"))].format(tile_number=tile_number)
self.las.append(tiles.LasTile(tile_number, "LAS", las_path, self.resolution, position, extent, self.timberline))
self.dom.append(
tiles.RasterTile(tile_number, "DOM", dom_path, self.resolution, position, extent, self.timberline))
self.dtm.append(
tiles.RasterTile(tile_number, "DTM", dtm_path, self.resolution, position, extent, self.timberline))
def ready(self):
"""
Check for existence of LAS, DOM and DTM files to run this assignment
:return: True, if LAS, DOM and DTM files are existing. False otherwise.
:rtype: Boolean
"""
return self.las.exists() and self.dom.exists() and self.dtm.exists()
def clear_data(self):
"""
Delete some data after delineation of trees to free some memory.
"""
del self.las
del self.dtm
del self.dom
del self.params
log.debug("Removed LAS, DTM and DOM from memory in {}".format(self))
def run(self):
"""
Main entry point to run this assignment. Checks for existence of required data before.
Prepare all the LAS-, DOM- and DTM-files, delineate the trees and export it to a CSV file.
"""
if not self.ready():
log.info("Skipping {}. Could not find all required data (LAS, DOM and DTM)".format(self))
elif os.path.exists(self.csv_file):
log.info("Skipping {}. Trees already delineated".format(self))
else:
log.info("Loading data for {}".format(self.tile_number))
self.las.prepare(self.tile_buffer)
self.dom.prepare(self.tile_buffer)
self.dtm.prepare(self.tile_buffer)
log.info("Delineating trees in {}".format(self.las.number))
trees = self._delineate_trees()
self.clear_data()
self.to_csv(trees)
def _delineate_trees(self):
"""
Private function to get the single trees by watershed segmentation.
Forest and open field areas are processed separately with distinct parameters.
:return: A list of trees
:rtype: Dictionary of :class:`treedetection.assignments.Tree`
"""
# Closing
closed = closing(self.las.image, disk(self.params.closing_radius))
log.debug("Morphologically closed {}".format(self.las.number))
# Create a mask for regions with trees
mask = numpy.copy(closed)
mask[mask != 0] = 1
del closed
veg_dom = numpy.ma.array(self.dom.image, mask=(1 - mask).astype(int), fill_value=0).filled()
# Separating field from forest regions
regions_field = label(mask)
regions_forest = numpy.copy(regions_field)
region_props = regionprops(regions_field, intensity_image=self.dtm.image)
forest_labels = [r.label for r in region_props if
r.filled_area / (
self.params.resolution * self.params.resolution) > self.params.forest_area_threshold or r.mean_intensity > self.params.conifer_height_threshold]
regions_forest[numpy.isin(regions_forest, forest_labels, invert=True)] = 0
regions_field[numpy.isin(regions_field, forest_labels)] = 0
field = numpy.ma.array(veg_dom, mask=regions_forest, fill_value=0).filled()
forest = numpy.ma.array(veg_dom, mask=regions_field, fill_value=0).filled()
log.debug("Separated forest and field areas for {}".format(self.las.number))
del veg_dom
trees_field = self._watershed(field, self.las.number, "field", self.params.field_denoising_weight,
self.params.field_sigma, self.params.field_truncate,
self.params.field_min_distance, self.params.field_compactness)
trees_forest = self._watershed(forest, self.las.number, "forest", self.params.forest_denoising_weight,
self.params.forest_sigma, self.params.forest_truncate,
self.params.forest_min_distance, self.params.forest_compactness)
trees = trees_field + (trees_forest * (numpy.max(trees_field) + 1))
del field
del forest
del trees_field
del trees_forest
log.info("Found {} trees in {}".format(len(regionprops(trees)), self.las.number))
return self._extract_tree_params(trees)
def _extract_tree_params(self, trees):
"""
Private method to extract tree params from delineated trees.
To get the shape the region properties of the trees has to be analyzed with the DOM.
To get the height an additional analyzing loop with the DTM is required.
Unrealistic trees will be eliminated according the settings.
:param trees: Regions of all trees found in the given area.
:type trees: numpy Array
:return: Dictionary object containing all trees
:rtype: Dictionary
"""
found_trees = {}
# Tree position (3D)
for tree in regionprops(trees, intensity_image=self.dom.image):
# Export with location of weighted_centroid
centroid = list(tree.weighted_centroid)
if self.params.tile_buffer * self.params.resolution < centroid[0] < self.dom.image.shape[0] - (
self.params.tile_buffer * self.params.resolution) and self.params.tile_buffer * self.params.resolution < \
centroid[1] < self.dom.image.shape[1] - (self.params.tile_buffer * self.params.resolution):
x = centroid[1] / self.params.resolution + self.las.extent.min_x
y = self.las.extent.max_y - centroid[0] / self.params.resolution
z = tree.max_intensity
loc_y, loc_x = numpy.where(tree.intensity_image == z)
x_height = loc_x[0]
y_height = loc_y[0]
chm = tree.intensity_image[numpy.nonzero(tree.intensity_image)]
minor_axis = max(1 / self.params.resolution, tree.minor_axis_length / self.params.resolution)
major_axis = max(1 / self.params.resolution, tree.major_axis_length / self.params.resolution)
found_trees[tree.label] = Tree(tree.label, x, y, z, x_height, y_height,
dom_max=tree.max_intensity,
dom_mean=numpy.mean(chm),
dom_median=numpy.median(chm),
dom_min=
|
numpy.min(chm)
|
numpy.min
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import matplotlib
from matplotlib.projections import register_projection
from matplotlib.ticker import (MultipleLocator, FixedLocator, AutoLocator,
ScalarFormatter)
from matplotlib.pyplot import rcParams, figure, show, draw
from numpy import (ma, array, linspace, logspace, log, cos, sin, pi, zeros,
exp, arange, trapz, where, concatenate, nan, isnan, argsort,
log10, meshgrid)
from datetime import datetime
import os
import sys
# Local thermodynamics stuff, see thermodynamics.py
try:
from .thermodynamics import (
VirtualTemp, Latentc, VaporPressure, MixRatio, GammaW,
VirtualTempFromMixR, MixR2VaporPress, DewPoint, Theta, TempK,
Density, DensHumid, ThetaE, ThetaV, barometric_equation_inv)
from .thermodynamics import Rs_da, Cp_da, Epsilon, degCtoK
except:
from thermodynamics import (
VirtualTemp, Latentc, VaporPressure, MixRatio, GammaW,
VirtualTempFromMixR, MixR2VaporPress, DewPoint, Theta, TempK,
Density, DensHumid, ThetaE, ThetaV, barometric_equation_inv)
from thermodynamics import Rs_da, Cp_da, Epsilon, degCtoK
# UserDict located in different places depending on Python 2 vs. 3
try:
from UserDict import UserDict
except ImportError:
from collections import UserDict
mpl_version = str(matplotlib.__version__)
mpl_version_digits = [int(ss) for ss in mpl_version.split('.')]
assert mpl_version_digits[0] >= 1, "Requires matplotlib version>1.0.0"
if mpl_version_digits[1] < 4:
# Thanks <NAME> for providing the original (and presumably, the current)
# implementation of the SkewX preojections
try:
from .skewx_projection_matplotlib_lt_1d4 import SkewXAxes
except:
from skewx_projection_matplotlib_lt_1d4 import SkewXAxes
else:
# See: http://matplotlib.org/mpl_examples/api/skewt.py
# The _only_ change to the code is on line 113, where I set
# rot=45 to suit my axes.
try:
from .skewx_projection import SkewXAxes
except:
from skewx_projection import SkewXAxes
# SkewT version
__version__ = "1.1.0"
class SkewXAxes(SkewXAxes):
# In the SkewT package, SkewXAxes is a subclass of the one provided
# by R<NAME>, either from the example on his webpage (circa 2011) or
# the example on the matplotlib page. I add the following methods.
def other_housekeeping(self, mixratio=array([])):
# Added by <NAME>
self.yaxis.grid(True, ls='-', color='y', lw=0.5)
majorLocatorDegC = MultipleLocator(10)
self.xaxis.set_major_locator(majorLocatorDegC)
self.xaxis.grid(True, color='y', lw=0.5, ls='-')
# self.set_ylabel('Pressure (hPa)')
self.set_xlabel('Temperature (C)')
yticks = linspace(100, 1000, 10)
if self.pmin < 100:
yticks = concatenate((array([50, 20, 10]), yticks))
self.set_yticks(yticks)
self.yaxis.set_major_formatter(ScalarFormatter())
self.set_xlim(self.tmin, self.tmax)
self.set_ylim(self.pmax, self.pmin)
self.spines['right'].set_visible(False)
self.get_yaxis().set_tick_params(which="both", size=0)
self.get_xaxis().set_tick_params(which="both", size=0)
def add_dry_adiabats(self, T0, P, do_labels=True, **kwargs):
# Added by <NAME>
P0 = 1000.
T = array([(st+degCtoK)*(P/P0)**(Rs_da/Cp_da)-degCtoK for st in T0])
labelt = [(st+degCtoK)*1**(Rs_da/Cp_da) for st in T0]
# gets a pressure level about 1/4 the way up the plot...
pp = 10**(log10(self.pmin**.2*self.pmax**.8))
xi = where(abs(P-pp) - abs(P-pp).min() < 1e-6)[0][0]
ndec = log10(self.pmax/pp)/log10(self.pmax/self.pmin)
tran = self.tmax - self.tmin
tminl = self.tmin - tran * ndec
tmaxl = self.tmax - tran * ndec
if 'color' in kwargs:
col = kwargs['color']
else:
col = 'k'
for tt, ll in zip(T, labelt):
self.plot(tt, P, **kwargs)
if do_labels:
if tt[xi] > tmaxl - 2:
continue
if tt[xi] < tminl + 2:
continue
self.text(tt[xi], P[xi]+10, '%d' % (ll), fontsize=8,
ha='center', va='bottom', rotation=-30, color=col,
bbox={'facecolor': 'w', 'edgecolor': 'w'})
return T
def add_moist_adiabats(self, T0, P0, do_labels=True, **kwargs):
# Added by <NAME>
moist_adiabats = array([moist_ascent(P0, st) for st in T0])
T = moist_adiabats[:, 1, :]
P = moist_adiabats[0, 0, :]
# gets a pressure level about 3/4 the way up the plot...
pp = 10**(log10(self.pmin**.75*self.pmax**.25))
xi = where(abs(P-pp) - abs(P-pp).min() < 1e-6)[0][0]
ndec = log10(self.pmax/pp) / log10(self.pmax/self.pmin)
tran = self.tmax - self.tmin
tminl = self.tmin - tran * ndec
tmaxl = self.tmax - tran * ndec
if 'color' in kwargs:
col = kwargs['color']
else:
col = 'k'
for tt in T:
self.plot(tt, P, **kwargs)
# if (tt[-1]>-60) and (tt[-1]<-10):
if do_labels:
if tt[xi] > tmaxl - 2:
continue
if tt[xi] < tminl + 2:
continue
self.text(
tt[xi], P[xi], '%d' % tt[0], ha='center', va='bottom',
fontsize=8,
bbox={'facecolor': 'w', 'edgecolor': 'w'}, color=col)
def add_mixratio_isopleths(self, w, P, do_labels=True, **kwargs):
# Added by <NAME>
e = array([P*ww/(.622+ww) for ww in w])
T = 243.5 / (17.67 / log(e/6.112) - 1)
if 'color' in kwargs:
col = kwargs['color']
else:
col = 'k'
pp = 700.
xi = where(abs(P-pp) - abs(P-pp).min() < 1e-6)[0][0]
ndec = log10(self.pmax/pp) / log10(self.pmax/self.pmin)
tran = self.tmax - self.tmin
tminl = self.tmin - tran * ndec
tmaxl = self.tmax - tran * ndec
for tt, mr in zip(T, w):
self.plot(tt, P.flatten(), **kwargs)
if do_labels:
if tt[xi] > tmaxl - 2:
continue
if tt[xi] < tminl + 2:
continue
if mr * 1000 < 0.1:
fmt = "%4.2f"
elif mr * 1000 <= 1.:
fmt = "%4.1f"
else:
fmt = "%d"
self.text(
tt[-1], P[-1], fmt % (mr * 1000), color=col, fontsize=8,
ha='center', va='bottom',
bbox={'facecolor': 'w', 'edgecolor': 'w'})
# Now register the projection with matplotlib so the user can select
# it.
register_projection(SkewXAxes)
class Sounding(UserDict):
# Copyright (c) 2013 <NAME>
"""Utilities to read, write and plot sounding data quickly and without fuss
INPUTS:
filename: If creating a sounding from a file, the full file name. The
format of this file is quite pedantic and needs to conform
to the format given by the University of Wyoming soundings
(see weather.uwyo.edu/upperair/sounding.html)
data: Soundings can be made from atmospheric data. This should be
in the form of a python dict with (at minimum) the following
fields:
TEMP: dry-bulb temperature (Deg C)
DWPT: dew point temperature (Deg C)
PRES: pressure (hPa)
SKNT: wind speed (knots)
WDIR: wind direction (deg)
The following fields are also used, but not required by the
plot_skewt routine:
HGHT (m)
RELH (%)
MIXR (g/kg)
THTA (K)
THTE (K)
THTV (K)
"""
_AllowedKeys = ['pres', 'hght', 'temp', 'dwpt', 'relh', 'mixr',
'drct', 'sknt', 'thta', 'thte', 'thtv']
def __init__(self, filename=None, soundingdata=None):
UserDict.__init__(self)
self.soundingdata = {}
if soundingdata is None:
self.uwyofile(filename)
else:
for kk in soundingdata.keys():
if kk.lower() not in Sounding._AllowedKeys:
self[kk] = soundingdata.pop(kk)
else:
dd = soundingdata[kk]
if hasattr(dd, 'mask'):
ddm = dd
else:
ddm = ma.masked_invalid(dd)
ddm = ma.masked_values(ddm, -999)
ddm = ma.masked_array(ddm, mask=False).harden_mask()
self.soundingdata[kk] = ddm
if 'StationNumber' not in self:
self['StationNumber'] = '(No Number)'
if 'SoundingDate' not in self:
self['SoundingDate'] = '(No Date)'
def plot_skewt(self, pmax=1050., pmin=100., parcel_type='most_unstable',
imagename=None, title=None, **kwargs):
"""A wrapper for plotting the skewt diagram for a Sounding instance."""
self.make_skewt_axes(pmax, pmin)
self.add_profile(**kwargs)
if parcel_type is not None:
parcel = self.get_parcel(parcel_type)
self.lift_parcel(*parcel)
self.column_diagnostics()
if isinstance(title, str):
self.skewxaxis.set_title(title)
else:
self.skewxaxis.set_title("%s %s" % (self["StationNumber"],
self['SoundingDate']))
if imagename is not None:
print("saving figure")
self.fig.savefig(imagename, dpi=100)
def add_profile(self, **kwargs):
"""Add a new profile to the SkewT plot.
This is abstracted from plot_skewt to enable the plotting of
multiple profiles on a single axis, by updating the data attribute.
For example:
>>>
S=SkewT.Sounding(soundingdata={})
S.make_skewt_axes()
S.uwyofile("../examples/94975.2013062800.txt")
S.add_profile(color="b",bloc=0.5)
S.uwyofile("../examples/94975.2013070900.txt")
S.add_profile(color="r",bloc=1.)
>>>
Use the kwarg 'bloc' to set the alignment of the wind barbs from
the centerline (useful if plotting multiple profiles on the one axis)
>>>
Modified 25/07/2013: enforce masking of input soundingdata for this
function (does not affect the data attribute).
"""
# I must be a dummy because I can't make
# this work any other way!!
if 'bloc' in kwargs:
bloc = kwargs.pop('bloc')
else:
bloc = 0.5
try:
pres = ma.masked_invalid(self.soundingdata['pres'])
except KeyError:
raise KeyError("Pres in hPa (PRES) is required!")
try:
tc = ma.masked_invalid(self.soundingdata['temp'])
except KeyError:
raise KeyError("Temperature in C (TEMP) is required!")
try:
dwpt = ma.masked_invalid(self.soundingdata['dwpt'])
except KeyError:
print("Warning: No DWPT available")
dwpt = ma.masked_array(zeros(pres.shape), mask=True)
try:
sknt = self.soundingdata['sknt']
drct = self.soundingdata['drct']
rdir = (270.-drct)*(pi/180.)
uu = ma.masked_invalid(sknt*cos(rdir))
vv = ma.masked_invalid(sknt*sin(rdir))
except KeyError:
print("Warning: No SKNT/DRCT available")
uu = ma.masked_array(zeros(pres.shape), mask=True)
vv = ma.masked_array(zeros(pres.shape), mask=True)
tcprof = self.skewxaxis.plot(tc, pres, zorder=5, **kwargs)
dpprof = self.skewxaxis.plot(dwpt, pres, zorder=5, **kwargs)
# this line should no longer cause an exception
nbarbs = (~uu.mask).sum()
skip = max(1, int(nbarbs//32))
if 'color' in kwargs:
bcol = kwargs['color']
else:
bcol = 'k'
if 'alpha' in kwargs:
balph = kwargs['alpha']
else:
balph = 1.
self.wbax.barbs((zeros(pres.shape)+bloc)[::skip]-0.5, pres[::skip],
uu[::skip], vv[::skip],
length=6, color=bcol, alpha=balph, lw=0.5)
self.skewxaxis.other_housekeeping()
return tcprof
def make_skewt_axes(self, pmax=1050., pmin=100., tmin=-40., tmax=30.,
fig=None):
"""Set up the skew-t axis the way I like to see it"""
if fig is None:
self.fig = figure(figsize=(8, 8))
self.fig.clf()
else:
self.fig = fig
rcParams.update({'font.size': 10, })
self.skewxaxis = self.fig.add_axes([.065, .1, .71, .8],
projection='skewx')
self.skewxaxis.set_yscale('log')
self.skewxaxis.pmax = pmax
self.skewxaxis.pmin = pmin
self.skewxaxis.tmax = tmax
self.skewxaxis.tmin = tmin
xticklocs = arange(-80, 45, 10)
T0 = xticklocs
# P=linspace(pmax,pmin,101)
P = logspace(log10(pmax), log10(pmin), 101)
w = array([0.00001, 0.0001, 0.0004, 0.001, 0.002, 0.004,
0.007, 0.01, 0.016, 0.024, 0.032])
self.skewxaxis.add_mixratio_isopleths(
w, P[P >= 700], color='g', ls='--', alpha=1., lw=0.5)
self.skewxaxis.add_dry_adiabats(
linspace(210, 550, 18)-degCtoK, P, color='g', ls='--', alpha=1.,
lw=0.5)
self.skewxaxis.add_moist_adiabats(
linspace(0, 44, 12), pmax, color='g', ls='--', alpha=1., lw=0.5)
self.skewxaxis.set_title("%s %s" % (self['StationNumber'],
self['SoundingDate']))
self.skewxaxis.other_housekeeping()
self.wbax = self.fig.add_axes([0.685, 0.1, 0.1, 0.8],
sharey=self.skewxaxis, frameon=False)
self.wbax.xaxis.set_ticks([], [])
self.wbax.yaxis.grid(True, ls='-', color='y', lw=0.5)
for tick in self.wbax.yaxis.get_major_ticks():
# tick.label1On = False
pass
self.wbax.get_yaxis().set_tick_params(size=0, color='y')
self.wbax.set_xlim(-1.5, 1.5)
self.wbax.get_yaxis().set_visible(False)
self.wbax.set_title('kn', fontsize=10, color='k', ha='right')
# Set up standard atmosphere height scale on
# LHS of plot.
majorLocatorKM = MultipleLocator(2)
majorLocatorKFT = MultipleLocator(5)
minorLocator = MultipleLocator(1)
# determine base height from base pressure (nominally 1050 hPa)
# via hydrostatic equilibrium for standard atmosphere
# model atmospheric conditions with constant lapse rate and
# NIST (1013.25hPa and 20C)
zmin = barometric_equation_inv(0, 293.15, 101325., pmax*100.)
zmax = barometric_equation_inv(0, 293.15, 101325., pmin*100.)
zminf = zmin * 3.2808
zmaxf = zmax * 3.2808
self.kmhax = self.fig.add_axes([0.775, 0.1, 1e-6, 0.8], frameon=True)
self.kmhax.xaxis.set_ticks([], [])
self.kmhax.spines['left'].set_color('k')
self.kmhax.spines['right'].set_visible(False)
self.kmhax.tick_params(axis='y', colors='k', labelsize=8)
self.kmhax.set_ylim(zmin*1e-3, zmax*1e-3)
self.kmhax.set_title("km/kft", fontsize=10)
self.kmhax.get_yaxis().set_tick_params(which="both", direction='out')
self.kmhax.yaxis.set_major_locator(majorLocatorKM)
self.kmhax.yaxis.set_minor_locator(minorLocator)
self.fthax = self.kmhax.twinx()
self.fthax.xaxis.set_ticks([], [])
self.fthax.tick_params(axis='y', colors='k', labelsize=8)
self.fthax.set_ylim(zminf*1e-3, zmaxf*1e-3)
self.fthax.get_yaxis().set_tick_params(which="both", direction='out')
self.fthax.yaxis.set_major_locator(majorLocatorKFT)
self.fthax.yaxis.set_minor_locator(minorLocator)
def uwyofile(self, fname):
"""Reads the raw profile data from a Universiy of Wyoming sounding file.
This is the primary method of IO for SkewT. The University of
Wyoming maintains a nice database of global upper air data which is
kept up-to-date. Given a filename, this method updates the sounding
data with the text data in the file.
NOTES
1. The input file has to conform *Exactly* to the University of
Wyoming file format. This is because I look for data fields at
specific places on each line.
2. I ignore the diagnostics at the end of the file, because the idea
is to calculate these myself.
3. When this no longer works I'll begin reading in a more array-esque
way.
"""
# -------------------------------------------------------------------
# This *should* be a convenient way to read a uwyo sounding
# -------------------------------------------------------------------
fid = open(fname)
lines = fid.readlines()
# New: handle whitespace at top of file if present
while not lines[0].strip():
lines.pop(0)
nlines = len(lines)
lhi = [1, 9, 16, 23, 30, 37, 46, 53, 58, 65, 72]
rhi = [7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77]
# initialise output data structure
output = {}
fields = lines[3].split()
units = lines[4].split()
# Handle the file header
# First line for WRF profiles differs from the UWYO soundings
header = lines[0]
if header[:5] == '00000':
# WRF profile
self['StationNumber'] = '00000'
self['Longitude'] = float(header.split()[5].strip(","))
self['Latitude'] = float(header.split()[6])
self['SoundingDate'] = header.split()[-1]
else:
self['StationNumber'] = header[:5]
dstr = (' ').join(header.split()[-4:])
self['SoundingDate'] = \
datetime.strptime(
dstr, "%HZ %d %b %Y").strftime("%Y-%m-%d_%H:%M:%S")
# This is a data pre-initialisation step. I have used the
# number of lines minus the number of lines of diagnostics.
for ff in fields:
# output[ff.lower()]=zeros((nlines-34))-999.
output[ff.lower()] = []
lcounter = 5
for line, idx in zip(lines[6:], range(nlines)):
lcounter += 1
# Version < 0.1.4
# try: output[fields[0].lower()][idx]=float(line[lhi[0]:rhi[0]])
# except ValueError: break
# New code. We test for pressure in the first column.
# If there's no pressure, we get out!
try:
output[fields[0].lower()].append(float(line[lhi[0]:rhi[0]]))
except ValueError:
break
for ii in range(1, len(rhi)):
try:
# Debug only:
# print fields[ii].lower(),
# float(line[lhi[ii]:rhi[ii]].strip())
# Version < 0.1.4
# output[fields[ii].lower()\
# [idx]=float(line[lhi[ii]:rhi[ii]].strip())
# New Code. Append to list instead of indexing
# pre-allocated data. Explicitly allocate -999
# for invalid data (catch ValueError)
textdata = line[lhi[ii]:rhi[ii]].strip()
output[fields[ii].lower()].append(float(textdata))
except ValueError:
output[fields[ii].lower()].append(-999.)
for field in fields:
ff = field.lower()
# set mask for missing data
dd = ma.masked_values(output[ff], -999.)
dd = ma.masked_array(dd, mask=False)
dd.harden_mask()
self.soundingdata[ff] = dd
return None
def column_diagnostics(self):
"""Wrapper for column diagnostics"""
self['Diagnostics'] = {}
dtext = "Column:\n"
self['Diagnostics']['TPW'] = self.precipitable_water()
dtext += "%4s:%6.1f mm" % ('TPW', self['Diagnostics']['TPW'])
self.fig.text(0.825, 0.65, dtext, fontname='monospace', va='top',
backgroundcolor='white')
def precipitable_water(self):
"""Calculate Total Precipitable Water (TPW) for sounding.
TPW is defined as the total column-integrated water vapour. I
calculate it from the dew point temperature because this is the
fundamental moisture variable in this module (even though it is RH
that is usually measured directly)
"""
tempk = self.soundingdata['temp'] + degCtoK
prespa = self.soundingdata['pres'] * 100.
hghtm = self.soundingdata['hght']
# Get Water Vapour Mixing Ratio, by calculation
# from dew point temperature
try:
dwptc = self.soundingdata['dwpt']
except KeyError:
print("Warning: No MIXR or DWPT for TPW calculation")
return -999.
vprespa = VaporPressure(dwptc)
mixrkg = MixRatio(vprespa, prespa)
# Calculate density of air (accounting for moisture)
rho = DensHumid(tempk, prespa, vprespa)
# Trapezoidal rule to approximate TPW (units kg/m^2==mm)
tpw = trapz(mixrkg*rho, hghtm)
return tpw
def get_cape(self, startp, startt, startdp, totalcape=False):
"""Wrapper for the numerics of calculating CAPE.
INPUTS:
startp,startt,startdp: Definition of the parcel that we will base
the calculations on. This can be the output
of Sounding.get_parcel() or it can be a user-
defined parcel.
totalcape [=False] : Flag defining method of identifying the so-
called "Equilibrium Level" (Reference).
If False (default), use the first stable
layer above the LFC, and ignore any CAPE in
unstable layers above this. If True, use all
CAPE up to the highest equilibrium level.
OUTPUTS:
P_lcl : The lifted condensation level (LCL)
P_lfc : The level of free convection (LFC). Can be
the same as the LCL, or can be NaN if there
are no unstable layers.
P_el : The Equilibrium Level, used to determine the
CAPE. If totalcape=True, use the highest
equilibrium level, otherwise use the first
stable equilibrium level above the LFC.
CAPE : CAPE calculated from virtual temperature
CIN : CIN calculated from virtual temperature
HINT:
parcel=S.get_parcel('mu')
lcl,lfc,el,cape,cin=get_cape(*parcel)
"""
from numpy import interp
assert startt >= startdp, "Not a valid parcel. Check Td<Tc"
# fundamental environmental variables
pres = self.soundingdata['pres']
temp = self.soundingdata['temp']
# Get Sub-LCL traces
presdry, tempdry, tempiso = dry_ascent(
startp, startt, startdp, nsteps=101)
# make lcl variables explicit
P_lcl = presdry[-1]
T_lcl = tempdry[-1]
# Now lift a wet parcel from the intersection point
# preswet=linspace(P_lcl,100,101)
preswet, tempwet = moist_ascent(P_lcl, T_lcl, nsteps=101)
# tparcel is the concatenation of tempdry and
# tempwet, and so on.
tparcel = concatenate((tempdry, tempwet[1:]))
pparcel = concatenate((presdry, preswet[1:]))
dpparcel = concatenate((tempiso, tempwet[1:]))
# Interpolating the environmental profile onto the
# parcel pressure coordinate
# tempenv=interp(preswet,pres[::-1],temp[::-1])
# NEW, for total column:
tempenv = interp(pparcel, pres[::-1], temp[::-1])
# now solve for the equlibrium levels above LCL
# (all of them, including unstable ones)
# eqlev,stab=solve_eq(preswet[::-1],(tempwet-tempenv)[::-1])
# NEW, for total column:
# On second thought, we don't really want/need
# any equilibrium levels below LCL
# eqlev,stab=solve_eq(pparcel[::-1],(tparcel-tempenv)[::-1])
# This is equivalent to the old statement :
eqlev, stab = solve_eq(pparcel[pparcel <= P_lcl][::-1],
(tparcel-tempenv)[pparcel <= P_lcl][::-1])
# Sorting index by decreasing pressure
I = argsort(eqlev)[::-1]
eqlev = eqlev[I]
stab = stab[I]
# temperatures at the equilibrium level
# tempeq=interp(eqlev,preswet[::-1],tempenv[::-1])
# NEW, for total column:
tempeq = interp(eqlev, pparcel[::-1], tparcel[::-1])
# This helps with debugging
# for ii,eq in enumerate(eqlev):
# print "%5.2f %5.2f %2d"%(eq,tempeq[ii],stab[ii])
# need environmental temperature at LCL
tenv_lcl = interp(P_lcl, pparcel[::-1], tempenv[::-1])
isstab = where(stab == 1., True, False)
unstab = where(stab == 1., False, True)
if eqlev.shape[0] == 0:
# no unstable layers in entire profile
# because the parcel never crosses the tenv
P_lfc = nan
P_el = nan
elif T_lcl > tenv_lcl:
# check LCL to see if this is unstable
P_lfc = P_lcl
if totalcape is True:
P_el = eqlev[isstab][-1]
else:
P_el = eqlev[isstab][0]
elif eqlev.shape[0] > 1:
# Parcel is stable at LCL so LFC is the
# first unstable equilibrium level and
# "EQ" level is the first stable equilibrium
# level
P_lfc = eqlev[unstab][0]
if totalcape is True:
P_el = eqlev[isstab][-1]
else:
P_el = eqlev[isstab][0]
else:
# catch a problem... if there is only
# one eqlev and it's unstable (this is
# unphysical), then it could be a vertical
# resolution thing. This is a kind of
# "null" option
if isstab[0]:
P_el = eqlev[isstab][0]
P_lfc = nan
else:
P_lfc = nan
P_el = nan
if isnan(P_lfc):
return P_lcl, P_lfc, P_el, 0, 0
# need to handle case where dwpt is not available
# above a certain level for any reason. Most simplest
# thing to do is set it to a reasonably low value;
# this should be a conservative approach!
dwpt = self.soundingdata['dwpt'].copy().soften_mask()
# raise ValueError
if dwpt[(pres >= P_el).data*(pres < P_lfc).data].mask.any():
print("WARNING: substituting -200C for masked values of",
"DWPT in this sounding")
dwpt[dwpt.mask] = -200
# dwptenv=interp(preswet,pres[::-1],dwpt[::-1])
# NEW:
dwptenv = interp(pparcel, pres[::-1], dwpt[::-1])
hght = self.soundingdata['hght']
if hght[(pres >= P_el).data].mask.any():
raise NotImplementedError(
"TODO: Implement standard atmos. to sub. missing heights")
# hghtenv=interp(preswet,pres[::-1],self.soundingdata['hght'][::-1])
# NEW:
hghtenv = interp(pparcel, pres[::-1], self.soundingdata['hght'][::-1])
# Areas of POSITIVE Bouyancy
cond1 = (tparcel >= tempenv) * (pparcel <= P_lfc) * (pparcel > P_el)
# Areas of NEGATIVE Bouyancy
if totalcape is True:
cond2 = (tparcel < tempenv) * (pparcel > P_el)
else:
cond2 = (tparcel < tempenv) * (pparcel > P_lfc)
# Do CAPE calculation
# 1. Virtual temperature of parcel...
# remember it's saturated above LCL.
# e_parcel=VaporPressure(tempwet)
# Tv_parcel=VirtualTemp(tempwet+degCtoK,preswet*100.,e_parcel)
# e_env=VaporPressure(dwptenv)
# Tv_env=VirtualTemp(tempenv+degCtoK,preswet*100.,e_env)
# TODO: Implement CAPE calculation with virtual temperature
# (This will affect the significant level calculations as well!!)
# e_parcel=VaporPressure(dpparcel)
# Tv_parcel=VirtualTemp(tparcel+degCtoK,pparcel*100.,e_parcel)
# e_env=VaporPressure(dwptenv)
# Tv_env=VirtualTemp(tempenv+degCtoK,pparcel*100.,e_env)
# CAPE=trapz(9.81*(Tv_parcel[cond1]-Tv_env[cond1])/\
# Tv_env[cond1],hghtenv[cond1])
# CIN=trapz(9.81*(Tv_parcel[cond2]-Tv_env[cond2])/\
# Tv_env[cond2],hghtenv[cond2])
CAPE = trapz(
9.81*(tparcel[cond1]-tempenv[cond1])/(tempenv[cond1] + 273.15),
hghtenv[cond1])
CIN = trapz(
9.81*(tparcel[cond2]-tempenv[cond2])/(tempenv[cond2] + 273.15),
hghtenv[cond2])
if False:
print("%3s %7s %7s %7s %7s %7s %7s %7s %7s" %
("IX", "PRES", "TPARCEL", "DPPARCE", "TENV",
"DPENV", "TV PARC", "TV ENV", "HEIGHT"))
for ix, c2 in enumerate(cond2):
if c2:
print(
"%3d %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" +
" %7.3f %7.3f" % (ix, pparcel[ix], tparcel[ix],
dpparcel[ix], tempenv[ix],
dwptenv[ix], Tv_parcel[ix],
Tv_env[ix], hghtenv[ix]))
return P_lcl, P_lfc, P_el, CAPE, CIN
def lift_parcel(self, *args, **kwargs):
"""Do a lifted parcel analysis on the sounding data"""
from numpy import interp
if 'totalcape' in kwargs:
totalcape = kwargs.pop('totalcape')
else:
totalcape = False
# Stuff for plotting
# zorder
zo = 4
# trace colour
col = [.6, .6, .6]
if len(args) == 4:
startp, startt, startdp, ptype = args
elif len(args) == 3:
startp, startt, startdp = args
ptype = ''
else:
raise NotImplementedError("expected 3 or 4 arguments")
# Get Sub-LCL traces
presdry, tempdry, tempiso = dry_ascent(startp, startt, startdp)
T_lcl = tempdry[-1]
# Parcel diagnostics
P_lcl, P_lfc, P_el, CAPE, CIN = self.get_cape(
startp, startt, startdp, totalcape=totalcape)
# Get moist ascent traces
preswet, tempwet = moist_ascent(P_lcl, T_lcl)
# tparcel is the concatenation of tempdry and
# tempwet, and so on.
tparcel = concatenate((tempdry, tempwet[1:]))
pparcel = concatenate((presdry, preswet[1:]))
T_lfc = interp(P_lfc, preswet[::-1], tempwet[::-1])
T_el = interp(P_el, preswet[::-1], tempwet[::-1])
# fundamental environmental variables
pres = self.soundingdata['pres']
temp = self.soundingdata['temp']
hght = self.soundingdata['hght']
dwpt = self.soundingdata['dwpt'].copy().soften_mask()
dwpt[dwpt.mask] = dwpt.min()
# interpolate to preswet coordinates
tempenv = interp(pparcel, pres[::-1], temp[::-1])
# Plot traces below LCL
self.skewxaxis.plot(tempdry, presdry, color=col, lw=2, zorder=zo)
self.skewxaxis.plot(tempiso, presdry, color=col, lw=2, zorder=zo)
self.skewxaxis.plot(T_lcl, P_lcl, ls='', marker='o', mec=col,
mfc=col, zorder=zo)
# Plot trace above LCL
self.skewxaxis.plot(tempwet, preswet, color=col, lw=2, zorder=zo)
# Plot LFC and EL
self.skewxaxis.plot(T_lfc, P_lfc, ls='', marker='o', mew=2, mec='b',
mfc='None', zorder=zo)
self.skewxaxis.plot(T_el, P_el, ls='', marker='o', mew=2, mec='r',
mfc='None', zorder=zo)
if not isnan(P_lfc):
# Hatch areas of POSITIVE Bouyancy
cond1 = (tparcel >= tempenv) * (pparcel <= P_lfc) * \
(pparcel > P_el)
self.skewxaxis.fill_betweenx(
pparcel, tparcel, tempenv, where=cond1,
color="none", hatch='XXX', edgecolor='k', zorder=zo)
# Hatch areas of NEGATIVE Bouyancy
if totalcape is True:
cond2 = (tparcel < tempenv) * (pparcel > P_el)
else:
cond2 = (tparcel < tempenv) * (pparcel > P_lfc)
self.skewxaxis.fill_betweenx(
pparcel, tparcel, tempenv, where=cond2,
color="none", hatch='///', edgecolor='r', zorder=zo)
# Add text to sounding
dtext = "Parcel: %s\n" % ptype.upper()
dtext += "Ps :%6.1fhPa\n" % startp
dtext += "TCs : %4.1fC\n" % startt
dtext += "TDs : %4.1fC\n" % startdp
dtext += "-------------\n"
dtext += "Plcl:%6.1fhPa\n" % P_lcl
dtext += "Tlcl: %4.1fC\n" % T_lcl
dtext += "Plfc:%6.1fhPa\n" % P_lfc
dtext += "P_el:%6.1fhPa\n" % P_el
dtext += "CAPE:%6.1fJ\n" % CAPE
dtext += "CIN: %6.1fJ" % CIN
if False:
h_lcl = interp(P_lcl, pres[::-1], hght[::-1])
h_lfc = interp(P_lfc, pres[::-1], hght[::-1])
h_el = interp(P_el, pres[::-1], hght[::-1])
dtext += "\n-------------\n"
dtext += "Hlcl:%6.1fm\n" % h_lcl
dtext += "Hlfc:%6.1fm\n" % h_lfc
dtext += "H_el:%6.1fm\n" % h_el
self["Parcel"] = {
"Ps": startp,
"TCs": startt,
"TDs": startdp,
"Plcl": P_lcl,
"Tlcl": T_lcl}
print("\n---- Lifted Parcel Quantities ----")
print(dtext)
self.fig.text(0.825, 0.895, dtext, fontname="monospace",
va='top', backgroundcolor='white')
draw()
def get_parcel(self, method='ml'):
"""Automatically generate a parcel based on the sounding characteristics
INPUTS
method ['mu'] : Parcel type. Choose from the following
Mixed Layer : 'ml'
Surface Based: 'sb'
Most Unstable: 'mu'
depth : Both the mixed layer and the most unstable parcel
require a threshold on the depth of the layer used
to determine the parcel
OUTPUTS
(pres,temp,dwpt): The parcel characteristics
"""
self.do_thermodynamics()
if method == 'most_unstable' or method == 'mu':
return self.most_unstable_parcel()
elif method == 'surface' or method == 'sb':
return self.surface_parcel()
if method == 'mixed_layer' or method == 'ml':
return self.mixed_layer_parcel()
else:
raise NotImplementedError
def surface_parcel(self):
"""Return ACUTAL lowest parcel, handling frequent missing data
from lowest levels"""
pres = self.soundingdata["pres"]
temp = self.soundingdata["temp"]
assert 'dwpt' in self.soundingdata, \
"Moisture needed for parcel calculation! Add DWPT"
dwpt = self.soundingdata["dwpt"]
ii = 0
while True:
if dwpt.mask[ii] or temp.mask[ii]:
ii += 1
else:
return pres[ii], temp[ii], dwpt[ii], 'sb'
def most_unstable_parcel(self, depth=300):
"""Return a parcel representing conditions for the most unstable
level in the lowest <depth> hPa"""
pres = self.soundingdata['pres']
temp = self.soundingdata['temp']
dwpt = self.soundingdata['dwpt']
thta = self.soundingdata['thta']
cape = zeros(pres.shape)
for ii in range((pres > pres[0]-depth).sum()):
if temp.mask[ii]:
continue
if dwpt.mask[ii]:
continue
theparcel = pres[ii], temp[ii], dwpt[ii]
try:
thecape = self.get_cape(*theparcel, totalcape=True)[-2]
except ValueError:
# this is raised when get_cape fails to find
# equilibrium levels, which happens when the
# parcel doesn't "completely" intersect the
# sounding profile.
continue
cape[ii] = thecape
# print "%7.2f %7.2f %7.2f %7.2f"%
# (pres[ii],temp[ii],dwpt[ii],thecape)
if cape.max() == 0.:
return self.surface_parcel()
# choose max cape
I = where(cape == cape.max())[0][0]
# need to descend along adiabat!
# convert parcel to equivalent surface parcel
# thetheta=thta[I]
# parceltemp=(temp[I]+degCtoK)*(pres[0]/pres[I])**(Rs_da/Cp_da)-degCtoK
# the_e=VaporPressure(dwpt[I])
# themixr=MixRatio(the_e,pres[I]*100)
# parcele=MixR2VaporPress(themixr,pres[0]*100)
# parceldwpt=DewPoint(parcele)
# return pres[0],parceltemp,parceldwpt,'mu'
# return conditions at the mu level.
return pres[I], temp[I], dwpt[I], 'mu'
def mixed_layer_parcel(self, depth=100):
"""Returns parameters for a parcel initialised by:
1. Surface pressure (i.e. pressure of lowest level)
2. Surface temperature determined from mean(theta) of lowest <depth> mb
3. Dew point temperature representative of lowest <depth> mbar
Inputs:
depth (mbar): depth to average mixing ratio over
"""
pres = self.soundingdata["pres"]
temp = self.soundingdata["temp"]
dwpt = self.soundingdata["dwpt"]
pres0, temp0, dwpt0, null = self.surface_parcel()
# identify the layers for averaging
layers = pres > (pres0-depth)
# average theta over mixheight to give
# parcel temperature
thta_mix = Theta(temp[layers]+degCtoK, pres[layers]*100.).mean()
temp_s = TempK(thta_mix, pres0*100) - degCtoK
# average mixing ratio over mixheight
vpres = VaporPressure(dwpt)
mixr = MixRatio(vpres, pres*100)
mixr_mix = mixr[layers].mean()
vpres_s = MixR2VaporPress(mixr_mix, pres0*100)
# surface dew point temp
dwpt_s = DewPoint(vpres_s)
# print "----- Mixed Layer Parcel Characteristics -----"
# print "Mixed layer depth : %5d mb "%depth
# print "Mean mixed layer potential temperature: %5.1f K"%thta_mix
# print "Mean mixed layer mixing ratio : %5.2f g/kg"%
# (mixr_mix*1e3)
return pres0, temp_s, dwpt_s, 'ml'
raise NotImplementedError
def do_thermodynamics(self):
assert 'temp' in self.soundingdata, \
"Temperature needed for thermodynamics! Add TEMP"
assert 'pres' in self.soundingdata, \
"Pressure needed for thermodynamics! Add PRES"
assert 'dwpt' in self.soundingdata, \
"Moisture needed for thermodynamics! Add DWPT"
# primary variables
prespa = self.soundingdata['pres'] * 100.
tempc = self.soundingdata['temp']
tempk = tempc + degCtoK
dwptc = self.soundingdata['dwpt']
# secondary variables
e = VaporPressure(dwptc)
esat = VaporPressure(tempc)
# assign/extract other variables
if 'thta' not in self.soundingdata:
self.soundingdata['thta'] = Theta(tempk, prespa)
if 'thte' not in self.soundingdata:
self.soundingdata['thte'] = ThetaE(tempk, prespa, e)
if 'thtv' not in self.soundingdata:
self.soundingdata['thtv'] = ThetaV(tempk, prespa, e)
if 'relh' not in self.soundingdata:
self.soundingdata['relh'] = 100. * e / esat
return
def solve_eq(preswet, func):
"""Solve the peicewise-linear stability of a parcel
INPUTS: variables from the most ascent of a parcel
preswet: pressure
func : piecewise linear function to solve (tw-te)
OUTPUTS:
solutions: zeros of the function (tw-te)
stability: indication of the stability of this solution.
NOTE ABOUT STABILITY
Stability is the sign of (d(func)/dP). So if you have used tw-te
like you were supposed to, d(tw-te)/dP>0 means this is a stbale
equilibrium level (flip the sign to envision d(tw-te)/dz).
"""
from numpy import sign, diff
# Sorry to be annoying but I'm going to force you to use
# a monotonically increasing variable
assert (sign(diff(preswet)) == 1).all(), \
"Use a monotonically increasing abscissa"
# Identify changes in sign of function
dsign = sign(func)
isdiff = zeros(dsign.shape, dtype=bool)
isdiff[1:] = abs(diff(dsign)).astype(bool)
# shift to get the value on the other side
# of the x-axis
shift = zeros(dsign.shape, dtype=bool)
shift[:-1] = isdiff[1:]
shift[-1] = isdiff[0]
# solve by linear interpolation between
# values points
sols = zeros((isdiff.sum()))
stab = zeros((isdiff.sum()))
for ii in range(isdiff.sum()):
f0 = func[isdiff][ii]
f1 = func[shift][ii]
p0 = preswet[isdiff][ii]
p1 = preswet[shift][ii]
slope = (f1-f0) / (p1-p0)
sols[ii] = p0-f0 / slope
stab[ii] = sign(slope)
# Debug with plots ###
# fig=figure()
# ax=fig.add_subplot(111)
# ax.plot(preswet,func)
# ax.plot(sols,zeros(sols.shape),ls='',marker='o')
# ax.plot(preswet[isdiff],func[isdiff],ls='',marker='+',mew=2)
# ax.plot(preswet[shift],func[shift],ls='',marker='x',mew=2)
# ax.grid(True)
# show()
return sols, stab
def dry_ascent(startp, startt, startdp, nsteps=101):
from numpy import interp
# -------------------------------------------------------------------
# Lift a parcel dry adiabatically from startp to LCL.
# Init temp is startt in C, Init dew point is stwrtdp,
# pressure levels are in hPa
# -------------------------------------------------------------------
assert startdp <= startt
if startdp == startt:
return array([startp]), array([startt]), array([startdp]),
# Pres=linspace(startp,600,nsteps)
Pres = logspace(log10(startp), log10(600), nsteps)
# Lift the dry parcel
T_dry = (startt+degCtoK) * (Pres/startp)**(Rs_da/Cp_da) - degCtoK
# Mixing ratio isopleth
starte = VaporPressure(startdp)
startw = MixRatio(starte, startp*100)
e = Pres * startw / (.622+startw)
T_iso = 243.5 / (17.67/log(e/6.112)-1)
# Solve for the intersection of these lines (LCL).
# interp requires the x argument (argument 2)
# to be ascending in order!
P_lcl = interp(0, T_iso-T_dry, Pres)
T_lcl =
|
interp(P_lcl, Pres[::-1], T_dry[::-1])
|
numpy.interp
|
import os
import glob
import shutil
import logging
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage, misc, signal, spatial
from skimage.filters import gaussian
import cv2
import math
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def re_mkdir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def init_log(output_dir):
re_mkdir(output_dir)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%Y%m%d-%H:%M:%S',
filename=os.path.join(output_dir, 'log.log'),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
return logging
def copy_file(path_s, path_t):
shutil.copy(path_s, path_t)
def get_files_in_folder(folder, file_ext=None):
files = glob.glob(os.path.join(folder, "*" + file_ext))
files_name = []
for i in files:
_, name = os.path.split(i)
name, ext = os.path.splitext(name)
files_name.append(name)
return np.asarray(files), np.asarray(files_name)
def point_rot(points, theta, b_size, a_size):
cosA = np.cos(theta)
sinA = np.sin(theta)
b_center = [b_size[1]/2.0, b_size[0]/2.0]
a_center = [a_size[1]/2.0, a_size[0]/2.0]
points = np.dot(points-b_center, np.array([[cosA,-sinA],[sinA,cosA]]))+a_center
return points
def mnt_reader(file_name):
f = open(file_name)
minutiae = []
for i, line in enumerate(f):
if i < 4 or len(line) == 0: continue
w, h, o = [float(x) for x in line.split()]
w, h = int(round(w)), int(round(h))
minutiae.append([w, h, o])
f.close()
return minutiae
def mnt_writer(mnt, image_name, image_size, file_name):
f = open(file_name, 'w')
f.write('%s\n'%(image_name))
f.write('%d %d %d\n'%(mnt.shape[0], image_size[0], image_size[1]))
for i in xrange(mnt.shape[0]):
f.write('%d %d %.6f %.4f\n'%(mnt[i,0], mnt[i,1], mnt[i,2], mnt[i,3]))
f.close()
return
def gabor_fn(ksize, sigma, theta, Lambda, psi, gamma):
sigma_x = sigma
sigma_y = float(sigma) / gamma
# Bounding box
nstds = 3
xmax = ksize[0]/2
ymax = ksize[1]/2
xmin = -xmax
ymin = -ymax
(y, x) = np.meshgrid(np.arange(ymin, ymax + 1), np.arange(xmin, xmax + 1))
# Rotation
x_theta = x * np.cos(theta) + y * np.sin(theta)
y_theta = -x * np.sin(theta) + y * np.cos(theta)
gb_cos = np.exp(-.5 * (x_theta ** 2 / sigma_x ** 2 + y_theta ** 2 / sigma_y ** 2)) * np.cos(2 * np.pi / Lambda * x_theta + psi)
gb_sin = np.exp(-.5 * (x_theta ** 2 / sigma_x ** 2 + y_theta ** 2 / sigma_y ** 2)) * np.sin(2 * np.pi / Lambda * x_theta + psi)
return gb_cos, gb_sin
def gabor_bank(stride=2,Lambda=8):
filters_cos = np.ones([25,25,180/stride], dtype=float)
filters_sin = np.ones([25,25,180/stride], dtype=float)
for n, i in enumerate(xrange(-90,90,stride)):
theta = i*np.pi/180.
kernel_cos, kernel_sin = gabor_fn((24,24),4.5, -theta, Lambda, 0, 0.5)
filters_cos[..., n] = kernel_cos
filters_sin[..., n] = kernel_sin
filters_cos = np.reshape(filters_cos,[25,25,1,-1])
filters_sin = np.reshape(filters_sin,[25,25,1,-1])
return filters_cos, filters_sin
def gaussian2d(shape=(5,5),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def gausslabel(length=180, stride=2):
gaussian_pdf = signal.gaussian(length+1, 3)
label = np.reshape(np.arange(stride/2, length, stride), [1,1,-1,1])
y = np.reshape(np.arange(stride/2, length, stride), [1,1,1,-1])
delta = np.array(np.abs(label - y), dtype=int)
delta = np.minimum(delta, length-delta)+length/2
return gaussian_pdf[delta]
def angle_delta(A, B, max_D=np.pi*2):
delta = np.abs(A - B)
delta = np.minimum(delta, max_D-delta)
return delta
def fmeasure(P, R):
return 2*P*R/(P+R+1e-10)
def distance(y_true, y_pred, max_D=16, max_O=np.pi/6):
D = spatial.distance.cdist(y_true[:, :2], y_pred[:, :2], 'euclidean')
O = spatial.distance.cdist(np.reshape(y_true[:, 2], [-1, 1]), np.reshape(y_pred[:, 2], [-1, 1]), angle_delta)
return (D<=max_D)*(O<=max_O)
def metric_P_R_F(y_true, y_pred, maxd=16, maxo=np.pi/6):
# Calculate Precision, Recall, F-score
if y_pred.shape[0]==0 or y_true.shape[0]==0:
return 0,0,0,0,0
y_true, y_pred = np.array(y_true), np.array(y_pred)
total_gt, total = float(y_true.shape[0]), float(y_pred.shape[0])
# Using L2 loss
dis = spatial.distance.cdist(y_pred[:, :2], y_true[:, :2], 'euclidean')
mindis,idx = dis.min(axis=1),dis.argmin(axis=1)
#Change to adapt to new annotation: old version. When training, comment it
# y_pred[:,2] = -y_pred[:,2]
angle = abs(np.mod(y_pred[:,2],2*np.pi) - y_true[idx,2])
angle = np.asarray([angle, 2*np.pi-angle]).min(axis=0)
# Satisfy the threshold
tmp=(mindis <= maxd) & (angle<=maxo)
#print('mindis,idx,angle,tmp=%s,%s,%s,%s'%(mindis,idx,angle,tmp))
precision = len(np.unique(idx[(mindis <= maxd) & (angle<=maxo)]))/float(y_pred.shape[0])
recall = len(np.unique(idx[(mindis <= maxd) & (angle<=maxo)]))/float(y_true.shape[0])
#print('pre=%f/ %f'%(len(np.unique(idx[(mindis <= maxd) & (angle<=maxo)])),float(y_pred.shape[0])))
#print('recall=%f/ %f'%(len(np.unique(idx[(mindis <= maxd) & (angle<=maxo)])),float(y_true.shape[0])))
if recall!=0:
loc = np.mean(mindis[(mindis <= maxd) & (angle<=maxo)])
ori = np.mean(angle[(mindis <= maxd) & (angle<=maxo)])
else:
loc = 0
ori = 0
return precision, recall, fmeasure(precision, recall), loc, ori
def nms(mnt):
if mnt.shape[0]==0:
return mnt
# sort score
mnt_sort = mnt.tolist()
mnt_sort.sort(key=lambda x:x[3], reverse=True)
mnt_sort = np.array(mnt_sort)
# cal distance
inrange = distance(mnt_sort, mnt_sort, max_D=16, max_O=np.pi/6).astype(np.float32)
keep_list = np.ones(mnt_sort.shape[0])
for i in xrange(mnt_sort.shape[0]):
if keep_list[i] == 0:
continue
keep_list[i+1:] = keep_list[i+1:]*(1-inrange[i, i+1:])
return mnt_sort[keep_list.astype(np.bool), :]
def fuse_nms(mnt, mnt_set_2):
if mnt.shape[0]==0:
return mnt
# sort score
all_mnt = np.concatenate((mnt, mnt_set_2))
mnt_sort = all_mnt.tolist()
mnt_sort.sort(key=lambda x:x[3], reverse=True)
mnt_sort = np.array(mnt_sort)
# cal distance
inrange = distance(mnt_sort, mnt_sort, max_D=16, max_O=2*np.pi).astype(np.float32)
keep_list = np.ones(mnt_sort.shape[0])
for i in xrange(mnt_sort.shape[0]):
if keep_list[i] == 0:
continue
keep_list[i+1:] = keep_list[i+1:]*(1-inrange[i, i+1:])
return mnt_sort[keep_list.astype(np.bool), :]
def py_cpu_nms(det, thresh):
if det.shape[0]==0:
return det
dets = det.tolist()
dets.sort(key=lambda x:x[3], reverse=True)
dets = np.array(dets)
box_sz = 25
x1 = np.reshape(dets[:,0],[-1,1]) -box_sz
y1 = np.reshape(dets[:,1],[-1,1]) -box_sz
x2 = np.reshape(dets[:,0],[-1,1]) +box_sz
y2 = np.reshape(dets[:,1],[-1,1]) +box_sz
scores = dets[:, 2]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return dets[keep, :]
def draw_minutiae(image, minutiae, fname, saveimage= False, r=15, drawScore=False):
image = np.squeeze(image)
fig = plt.figure()
plt.imshow(image,cmap='gray')
plt.hold(True)
# Check if no minutiae
if minutiae.shape[0] > 0:
plt.plot(minutiae[:, 0], minutiae[:, 1], 'rs', fillstyle='none', linewidth=1)
for x, y, o, s in minutiae:
plt.plot([x, x+r*np.cos(o)], [y, y+r*np.sin(o)], 'r-')
if drawScore == True:
plt.text(x - 10, y - 10, '%.2f' % s, color='yellow', fontsize=4)
plt.axis([0,image.shape[1],image.shape[0],0])
plt.axis('off')
if saveimage:
plt.savefig(fname, dpi=500, bbox_inches='tight', pad_inches = 0)
plt.close(fig)
else:
plt.show()
return
def draw_minutiae_overlay(image, minutiae, mnt_gt, fname, saveimage= False, r=15, drawScore=False):
image = np.squeeze(image)
fig = plt.figure()
plt.imshow(image,cmap='gray')
plt.hold(True)
if mnt_gt.shape[1] > 3:
mnt_gt = mnt_gt[:,:3]
if mnt_gt.shape[0] > 0:
if mnt_gt.shape[1] > 3:
mnt_gt = mnt_gt[:, :3]
plt.plot(mnt_gt[:, 0], mnt_gt[:, 1], 'bs', fillstyle='none', linewidth=1)
for x, y, o in mnt_gt:
plt.plot([x, x+r*np.cos(o)], [y, y+r*np.sin(o)], 'b-')
if minutiae.shape[0] > 0:
plt.plot(minutiae[:, 0], minutiae[:, 1], 'rs', fillstyle='none', linewidth=1)
for x, y, o in minutiae:
plt.plot([x, x+r*np.cos(o)], [y, y+r*np.sin(o)], 'r-')
if drawScore == True:
plt.text(x - 10, y - 10, '%.2f' % s, color='yellow', fontsize=4)
plt.axis([0,image.shape[1],image.shape[0],0])
plt.axis('off')
plt.show()
if saveimage:
plt.savefig(fname, dpi=500, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return
def draw_minutiae_overlay_with_score(image, minutiae, mnt_gt, fname, saveimage=False, r=15):
image = np.squeeze(image)
fig = plt.figure()
plt.imshow(image, cmap='gray')
plt.hold(True)
if mnt_gt.shape[0] > 0:
plt.plot(mnt_gt[:, 0], mnt_gt[:, 1], 'bs', fillstyle='none', linewidth=1)
if mnt_gt.shape[1] > 3:
for x, y, o, s in mnt_gt:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'b-')
plt.text(x - 10, y - 5, '%.2f' % s, color='green', fontsize=4)
else:
for x, y, o in mnt_gt:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'b-')
if minutiae.shape[0] > 0:
plt.plot(minutiae[:, 0], minutiae[:, 1], 'rs', fillstyle='none', linewidth=1)
for x, y, o, s in minutiae:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'r-')
plt.text(x-10,y-10,'%.2f'%s,color='yellow',fontsize=4)
plt.axis([0, image.shape[1], image.shape[0], 0])
plt.axis('off')
if saveimage:
plt.savefig(fname, dpi=500, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return
def draw_ori_on_img(img, ori, mask, fname, saveimage=False, coh=None, stride=16):
ori = np.squeeze(ori)
#mask = np.squeeze(np.round(mask))
img = np.squeeze(img)
ori = ndimage.zoom(ori, np.array(img.shape)/np.array(ori.shape, dtype=float), order=0)
if mask.shape != img.shape:
mask = ndimage.zoom(mask, np.array(img.shape)/np.array(mask.shape, dtype=float), order=0)
if coh is None:
coh = np.ones_like(img)
fig = plt.figure()
plt.imshow(img,cmap='gray')
plt.hold(True)
for i in xrange(stride,img.shape[0],stride):
for j in xrange(stride,img.shape[1],stride):
if mask[i, j] == 0:
continue
x, y, o, r = j, i, ori[i,j], coh[i,j]*(stride*0.9)
plt.plot([x, x+r*np.cos(o)], [y, y+r*np.sin(o)], 'r-')
plt.axis([0,img.shape[1],img.shape[0],0])
plt.axis('off')
if saveimage:
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return
def local_constrast_enhancement(img):
img = img.astype(np.float32)
meanV = cv2.blur(img,(15,15))
normalized = img - meanV
var = abs(normalized)
var = cv2.blur(var,(15,15))
normalized = normalized/(var+10) *0.75
normalized = np.clip(normalized, -1, 1)
normalized = (normalized+1)*127.5
return normalized
def get_quality_map_ori_dict(img, dict, spacing, dir_map = None, block_size = 16):
if img.dtype=='uint8':
img = img.astype(np.float)
img = FastEnhanceTexture(img)
h, w = img.shape
blkH, blkW = dir_map.shape
quality_map = np.zeros((blkH,blkW),dtype=np.float)
fre_map = np.zeros((blkH,blkW),dtype=np.float)
ori_num = len(dict)
#dir_map = math.pi/2 - dir_map
dir_ind = dir_map*ori_num/math.pi
dir_ind = dir_ind.astype(np.int)
dir_ind = dir_ind%ori_num
patch_size = np.sqrt(dict[0].shape[1])
patch_size = patch_size.astype(np.int)
pad_size = (patch_size-block_size)//2
img = np.lib.pad(img, (pad_size, pad_size), 'symmetric')
for i in range(0,blkH):
for j in range(0,blkW):
ind = dir_ind[i,j]
patch = img[i*block_size:i*block_size+patch_size,j*block_size:j*block_size+patch_size]
patch = patch.reshape(patch_size*patch_size,)
patch = patch - np.mean(patch)
patch = patch / (np.linalg.norm(patch)+0.0001)
patch[patch>0.05] = 0.05
patch[patch<-0.05] = -0.05
simi = np.dot(dict[ind], patch)
similar_ind = np.argmax(abs(simi))
quality_map[i,j] = np.max(abs(simi))
fre_map[i,j] = 1./spacing[ind][similar_ind]
quality_map = gaussian(quality_map,sigma=2)
return quality_map, fre_map
def FastEnhanceTexture(img,sigma=2.5,show=False):
img = img.astype(np.float32)
h, w = img.shape
h2 = 2 ** nextpow2(h)
w2 = 2 ** nextpow2(w)
FFTsize = np.max([h2, w2])
x, y = np.meshgrid(range(-FFTsize / 2, FFTsize / 2), range(-FFTsize / 2, FFTsize / 2))
r = np.sqrt(x * x + y * y) + 0.0001
r = r/FFTsize
L = 1. / (1 + (2 * math.pi * r * sigma)** 4)
img_low = LowpassFiltering(img, L)
gradim1= compute_gradient_norm(img)
gradim1 = LowpassFiltering(gradim1,L)
gradim2= compute_gradient_norm(img_low)
gradim2 = LowpassFiltering(gradim2,L)
diff = gradim1-gradim2
ar1 = np.abs(gradim1)
diff[ar1>1] = diff[ar1>1]/ar1[ar1>1]
diff[ar1 <= 1] = 0
cmin = 0.3
cmax = 0.7
weight = (diff-cmin)/(cmax-cmin)
weight[diff<cmin] = 0
weight[diff>cmax] = 1
u = weight * img_low + (1-weight)* img
temp = img - u
lim = 20
temp1 = (temp + lim) * 255 / (2 * lim)
temp1[temp1 < 0] = 0
temp1[temp1 >255] = 255
v = temp1
if show:
plt.imshow(v,cmap='gray')
plt.show()
return v
def compute_gradient_norm(input):
input = input.astype(np.float32)
Gx, Gy = np.gradient(input)
out = np.sqrt(Gx * Gx + Gy * Gy) + 0.000001
return out
def LowpassFiltering(img,L):
h,w = img.shape
h2,w2 = L.shape
img = cv2.copyMakeBorder(img, 0, h2-h, 0, w2-w, cv2.BORDER_CONSTANT, value=0)
img_fft = np.fft.fft2(img)
img_fft =
|
np.fft.fftshift(img_fft)
|
numpy.fft.fftshift
|
"""
Tests for the asym_logit.py file. These tests do not include tests of
the functions that perform the mathematical calculations necessary to estimate
the multinomial Asymmetric Logit model.
"""
import warnings
import unittest
from collections import OrderedDict
from copy import deepcopy
from functools import partial
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.sparse import diags
import pylogit.asym_logit as asym
try:
# in Python 3 range returns an iterator instead of list
# to maintain backwards compatibility use "old" version of range
from past.builtins import xrange, range
except ImportError:
pass
class GenericTestCase(unittest.TestCase):
"""
Defines the common setUp method used for the different type of tests.
"""
def setUp(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
self.fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
self.fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
self.fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
self.fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
self.fake_shapes = np.array([-1, 1])
# Create names for the intercept parameters
self.fake_shape_names = ["Shape 1", "Shape 2"]
# Record the position of the shape parameter that is being constrained
self.fake_shape_ref_pos = 2
# Calculate the 'natural' shape parameters
self.natural_shapes = asym._convert_eta_to_c(self.fake_shapes,
self.fake_shape_ref_pos)
# Create an array of all model parameters
self.fake_all_params = np.concatenate((self.fake_shapes,
self.fake_intercepts,
self.fake_betas))
# The mapping between rows and alternatives is given below.
self.fake_rows_to_alts = csr_matrix(np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
self.fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
self.fake_index = self.fake_design.dot(self.fake_betas)
# Create the needed dataframe for the Asymmetric Logit constructor
self.fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": self.fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
self.alt_id_col = "alt_id"
self.obs_id_col = "obs_id"
self.choice_col = "choice"
# Create the index specification and name dictionaryfor the model
self.fake_specification = OrderedDict()
self.fake_names = OrderedDict()
self.fake_specification["x"] = [[1, 2, 3]]
self.fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
self.constructor_args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
# Create a variable for the kwargs being passed to the constructor
self.constructor_kwargs = {"intercept_ref_pos":
self.fake_intercept_ref_pos,
"shape_ref_pos": self.fake_shape_ref_pos,
"names": self.fake_names,
"intercept_names":
self.fake_intercept_names,
"shape_names": self.fake_shape_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
self.model_obj = asym.MNAL(*self.constructor_args,
**self.constructor_kwargs)
return None
# Note that inheritance is used to share the setUp method.
class ChoiceObjectTests(GenericTestCase):
"""
Defines the tests for the Asymmetric Logit model's `__init__` function and
its class methods.
"""
def test_shape_ignore_msg_in_constructor(self):
"""
Ensures that a UserWarning is raised when the 'shape_ref_pos' keyword
argument is not an integer. This warns people against expecting all of
the shape parameters of the Asymmetric Logit model to be identified.
It also alerts users that they are using a Asymmetric Logit model when
they might have been expecting to instantiate a different choice model.
"""
# Create a variable for the kwargs being passed to the constructor
kwargs = deepcopy(self.constructor_kwargs)
for pos, item in enumerate(['foo', None]):
kwargs["shape_ref_pos"] = item
# Check that the warning has been created.
# Test to ensure that the ValueError is raised when using a
# shape_ref_pos kwarg with an incorrect number of parameters
self.assertRaises(ValueError, asym.MNAL,
*self.constructor_args, **kwargs)
return None
def test_ridge_warning_in_fit_mle(self):
"""
Ensure that a UserWarning is raised when one passes the ridge keyword
argument to the `fit_mle` method of an Asymmetric Logit model object.
"""
# Create a variable for the fit_mle function's kwargs.
# The print_res = False arguments are to make sure strings aren't
# printed to the console unnecessarily.
kwargs = {"ridge": 0.5,
"print_res": False}
# Test to make sure that the ridge warning message is printed when
# using the ridge keyword argument
with warnings.catch_warnings(record=True) as w:
# Use this filter to always trigger the UserWarnings
warnings.simplefilter('always', UserWarning)
self.model_obj.fit_mle(self.fake_all_params, **kwargs)
self.assertGreaterEqual(len(w), 1)
self.assertIsInstance(w[0].category, type(UserWarning))
self.assertIn(asym._ridge_warning_msg, str(w[0].message))
return None
def test_init_shapes_length_error_in_fit_mle(self):
"""
Ensures that ValueError is raised if init_shapes has wrong length.
"""
# Create a variable for the arguments to the fit_mle function.
# Note `None` is the argument passed when using the init_shapes,
# init_intercepts and init_coefs keyword arguments.
fit_args = [None]
# Create base set of kwargs for fit_mle function
kwargs = {"init_intercepts": self.fake_intercepts,
"init_coefs": self.fake_betas,
"print_res": False}
for i in [1, -1]:
# This will ensure we have too many or too few shape parameters.
num_shapes = self.fake_rows_to_alts.shape[1] - 1 + i
kwargs["init_shapes"] = np.arange(num_shapes)
# Test to ensure that the ValueError is raised when using an
# init_shapes kwarg with an incorrect number of parameters
self.assertRaises(ValueError, self.model_obj.fit_mle,
*fit_args, **kwargs)
def test_init_intercepts_length_error_in_fit_mle(self):
"""
Ensures that ValueError is raised if init_intercepts has wrong length.
"""
# Create a variable for the arguments to the fit_mle function.
# Note `None` is the argument passed when using the init_shapes,
# init_intercepts and init_coefs keyword arguments.
fit_args = [None]
# Create base set of kwargs for fit_mle function
kwargs = {"init_shapes": self.fake_shapes,
"init_coefs": self.fake_betas,
"print_res": False}
for i in [1, -1]:
# This will ensure we have too many or too few intercepts
num_intercepts = self.fake_rows_to_alts.shape[1] - 1 + i
kwargs["init_intercepts"] = np.arange(num_intercepts)
# Test to ensure that the ValueError when using an init_intercepts
# kwarg with an incorrect number of parameters
self.assertRaises(ValueError, self.model_obj.fit_mle,
*fit_args, **kwargs)
return None
def test_init_coefs_length_error_in_fit_mle(self):
"""
Ensures that ValueError is raised if init_coefs has wrong length.
"""
# Create a variable for the arguments to the fit_mle function.
# Note `None` is the argument passed when using the init_shapes,
# init_intercepts and init_coefs keyword arguments.
fit_args = [None]
# Create base set of kwargs for fit_mle function
kwargs = {"init_shapes": self.fake_shapes,
"init_intercepts": self.fake_intercepts,
"print_res": False}
# Note there is only one beta, so we can't go lower than zero betas.
for i in [1, -1]:
# This will ensure we have too many or too few intercepts
num_coefs = self.fake_betas.shape[0] + i
kwargs["init_coefs"] = np.arange(num_coefs)
# Test to ensure that the ValueError when using an init_intercepts
# kwarg with an incorrect number of parameters
self.assertRaises(ValueError, self.model_obj.fit_mle,
*fit_args, **kwargs)
return None
def test_insufficient_initial_values_in_fit_mle(self):
"""
Ensure that value errors are raised if neither init_vals OR
(init_shapes and init_coefs) are passed.
"""
# Create a variable for the arguments to the fit_mle function.
# Note `None` is the argument passed when using the init_shapes,
# init_intercepts and init_coefs keyword arguments.
fit_args = [None]
# Create base set of incorrect kwargs for fit_mle function
kwargs = {"init_shapes": None,
"init_intercepts": self.fake_intercepts,
"init_coefs": None,
"print_res": False}
kwargs_2 = {"init_shapes": self.fake_shapes,
"init_intercepts": self.fake_intercepts,
"init_coefs": None,
"print_res": False}
kwargs_3 = {"init_shapes": None,
"init_intercepts": self.fake_intercepts,
"init_coefs": self.fake_betas,
"print_res": False}
for bad_kwargs in [kwargs, kwargs_2, kwargs_3]:
# Test to ensure that the ValueError when not passing
# kwarg with an incorrect number of parameters
self.assertRaises(ValueError, self.model_obj.fit_mle,
*fit_args, **bad_kwargs)
return None
def test_keyword_argument_constructor_in_fit_mle(self):
"""
Ensures that the init_vals object can be successfully created from the
various init_shapes, init_intercepts, and init_coefs arguments.
"""
# Create a variable for the arguments to the fit_mle function.
# Note `None` is the argument passed when using the init_shapes,
# init_intercepts and init_coefs keyword arguments.
fit_args = [None]
# Create base set of incorrect kwargs for fit_mle function (note the
# ridge is the thing that is incorrect)
kwargs_1 = {"init_shapes": self.fake_shapes,
"init_intercepts": self.fake_intercepts,
"init_coefs": self.fake_betas,
"ridge": "foo",
"print_res": False}
kwargs_2 = {"init_shapes": self.fake_shapes,
"init_coefs": self.fake_betas,
"ridge": "foo",
"print_res": False}
# Test to ensure that the raised ValueError is printed when using
# either of these two kwargs. This ensures that we were able to
# create the init_vals object since the ridge error check is after
# the creation of this argurment.
for kwargs in [kwargs_1, kwargs_2]:
self.assertRaisesRegexp(TypeError,
"ridge",
self.model_obj.fit_mle,
*fit_args,
**kwargs)
return None
def test_init_vals_length_error_in_fit_mle(self):
"""
Ensures that ValueError is raised if init_vals has wrong length.
"""
# Note there is only one beta, so we can't go lower than zero betas.
original_intercept_ref_position = self.fake_intercept_ref_pos
for intercept_ref_position in [None, original_intercept_ref_position]:
self.model_obj.intercept_ref_position = intercept_ref_position
for i in [1, -1]:
# This will ensure we have too many or too few intercepts
num_coefs = self.fake_betas.shape[0] + i
# Test to ensure that the ValueError when using an
# init_intercepts kwarg with an incorrect number of parameters
self.assertRaisesRegexp(ValueError,
"dimension",
self.model_obj.fit_mle,
np.arange(num_coefs),
print_res=False)
return None
# As before, inheritance is used to share the setUp method.
class HelperFuncTests(GenericTestCase):
"""
Defines tests for the 'helper' functions for estimating the Asymmetric
Logit model.
"""
def test_split_param_vec_with_intercepts(self):
"""
Ensures that split_param_vec returns (shapes, intercepts, index_coefs)
when called from within asym_logit.py.
"""
# Store the results of split_param_vec()
split_results = asym.split_param_vec(self.fake_all_params,
self.fake_rows_to_alts,
self.fake_design)
# Check for expected results.
for item in split_results[1:]:
self.assertIsInstance(item, np.ndarray)
self.assertEqual(len(item.shape), 1)
npt.assert_allclose(split_results[0], self.fake_shapes)
npt.assert_allclose(split_results[1], self.fake_intercepts)
npt.assert_allclose(split_results[2], self.fake_betas)
return None
def test_split_param_vec_without_intercepts(self):
"""
Ensures that split_param_vec returns (shapes, intercepts, index_coefs)
when called from within asym_logit.py.
"""
# Store the results of split_param_vec()
shapes_and_betas = np.concatenate([self.fake_shapes,
self.fake_betas])
split_results = asym.split_param_vec(shapes_and_betas,
self.fake_rows_to_alts,
self.fake_design)
# Check for expected results.
for idx in [0, 2]:
self.assertIsInstance(split_results[idx], np.ndarray)
self.assertEqual(len(split_results[idx].shape), 1)
npt.assert_allclose(split_results[0], self.fake_shapes)
self.assertIsNone(split_results[1])
npt.assert_allclose(split_results[2], self.fake_betas)
return None
def test_convert_eta_to_c(self):
"""
Check general transformation of estimated shape parameters to the
original parameterization. Check overflow handling.
"""
# Create a 2d array of shapes, where each row represents another
# vector of shapes to be transformed and tested
test_shapes = np.array([[-1, 1],
[800, 1],
[-1, -800],
[0, 0]])
#####
# Determine the expected results
#####
expected_results = np.ones((test_shapes.shape[0], 3))
expected_results[-1] = 1.0 / 3
# Explicitly calculate the results for the first row
vals_1 = np.array([test_shapes[0, 0], test_shapes[0, 1], 0])
exp_vals_1 = np.exp(vals_1)
denom_1 = exp_vals_1.sum()
expected_results[0] = exp_vals_1 / denom_1
# Explicitly calculate the results for the middle rows, taking care of
# overflow and underflow.
vals_2 = np.array([test_shapes[1, 0], test_shapes[1, 1], 0])
exp_vals_2 = np.exp(vals_2)
exp_vals_2[0] = asym.max_comp_value
denom_2 = exp_vals_2.sum()
expected_results[1] = exp_vals_2 / denom_2
vals_3 = np.array([test_shapes[2, 0], test_shapes[2, 1], 0])
exp_vals_3 = np.exp(vals_3)
exp_vals_3[1] = asym.min_comp_value
denom_3 = exp_vals_3.sum()
expected_results[2] = exp_vals_3 / denom_3
# Use _convert_eta_to_c and compare the results for 1D inputs
for i in xrange(test_shapes.shape[0]):
func_results = asym._convert_eta_to_c(test_shapes[i],
self.fake_shape_ref_pos)
# Make sure the results are correct
self.assertIsInstance(func_results, np.ndarray)
self.assertEqual(len(func_results.shape), 1)
self.assertEqual(func_results.shape[0], expected_results.shape[1])
npt.assert_allclose(func_results, expected_results[i])
# Check the 2D inputs. Note that for 2D ndarray, we need the array of
# shape parameters to be transposed such that we have an array of
# shape (num_estimated_shapes, num_samples_of_parameters).
# To adequately compare, we need to transpose expected_results too.
func_results_2d = asym._convert_eta_to_c(test_shapes.T,
self.fake_shape_ref_pos)
# Make sure the results are correct
self.assertIsInstance(func_results_2d, np.ndarray)
self.assertEqual(len(func_results_2d.shape), 2)
self.assertEqual(func_results_2d.shape, expected_results.T.shape)
npt.assert_allclose(func_results_2d, expected_results.T)
return None
def test_asym_utility_transform(self):
"""
Ensures that `_asym_utility_transform()` returns correct results
"""
# Create a set of systematic utilities that will test the function for
# correct calculations, for proper dealing with overflow, and for
# proper dealing with underflow.
# The first and third elements tests general calculation.
# The second element of index_array should lead to the transformation
# equaling the 'natural' shape parameter for alternative 2.
# The fourth element should test what happens with underflow and should
# lead to max_comp_value.
# The fifth element should test what happens with overflow and should
# lead to -1.0 * max_comp_value
index_array = np.array([1, 0, -1, 1e400, -1e400])
# We can use a the following array of the shape parameters to test
# the underflow capabilities with respect to the shape
# parameters.
test_shapes_2 = np.array([-800, 0])
test_shapes_3 = np.array([800, 0])
# Figure out the value of the 'natural' shape parameters
natural_shapes_2 = asym._convert_eta_to_c(test_shapes_2,
self.fake_shape_ref_pos)
natural_shapes_3 = asym._convert_eta_to_c(test_shapes_3,
self.fake_shape_ref_pos)
# Crerate the array of expected results when using shape parameters
# of 'normal' magnitudes.
intercept_1 = self.fake_intercepts[0]
intercept_2 = self.fake_intercepts[1]
intercept_3 = 0
result_1 = (intercept_1 +
np.log(self.natural_shapes[0]) * (1 - index_array[0]))
result_2 = intercept_2 + np.log(self.natural_shapes[1])
# Note the division by 2 is due to the 'J - 1' term. See the original
# definition of the transformation.
result_3 = (intercept_3 +
np.log(self.natural_shapes[2]) -
np.log((1 - self.natural_shapes[2]) / 2) * index_array[2])
expected_results = np.array([result_1,
result_2,
result_3,
asym.max_comp_value + intercept_1,
- asym.max_comp_value])[:, None]
# Crerate the array of expected results when using shape parameters
# of 'abnormally' small magnitudes.
# Note the division by 2 is due to the 'J - 1' term. See the original
# definition of the transformation.
result_2_2 = intercept_2 + np.log(natural_shapes_2[1])
result_3_2 = (intercept_3 +
np.log(natural_shapes_2[2]) -
np.log((1 - natural_shapes_2[2]) / 2) * index_array[2])
# Note the '0' comes from (1-1) * ln(shape)
expected_results_2 = np.array([0 + intercept_1,
result_2_2,
result_3_2,
asym.max_comp_value + intercept_1,
-asym.max_comp_value])[:, None]
# Create the array of expected results when using shape parameters
# of 'abnormally' large magnitudes.
result_2_3 = intercept_2 + np.log(natural_shapes_3[1])
result_3_3 = (intercept_3 +
np.log(natural_shapes_3[2]) -
np.log((1 - natural_shapes_3[2]) / 2) * index_array[2])
expected_results_3 = np.array([0 + intercept_1,
result_2_3,
result_3_3,
0 + intercept_1,
-asym.max_comp_value])[:, None]
#####
# Perform various rounds of checking
#####
# Use the utility transformation function, round_1
alt_id_vals = self.fake_df[self.alt_id_col].values
args = [index_array,
alt_id_vals,
self.fake_rows_to_alts,
self.fake_shapes,
self.fake_intercepts]
kwargs = {"intercept_ref_pos": self.fake_intercept_ref_pos,
"shape_ref_position": self.fake_shape_ref_pos}
func_results = asym._asym_utility_transform(*args, **kwargs)
# Use the utility transformation function, round_2
args[3] = test_shapes_2
func_results_2 = asym._asym_utility_transform(*args, **kwargs)
# Use the utility transformation function, round_3
args[3] = test_shapes_3
func_results_3 = asym._asym_utility_transform(*args, **kwargs)
# Check the correctness of the results
all_results = [(func_results, expected_results),
(func_results_2, expected_results_2),
(func_results_3, expected_results_3)]
for pos, (test_results, correct_results) in enumerate(all_results):
self.assertIsInstance(test_results, np.ndarray)
self.assertEqual(len(test_results.shape), 2)
self.assertEqual(test_results.shape[1], correct_results.shape[1])
self.assertEqual(test_results.shape[0], correct_results.shape[0])
npt.assert_allclose(test_results, correct_results)
return None
def test_asym_utility_transform_2d(self):
"""
Ensures that `_asym_utility_transform()` returns correct results when
called with 2 dimensional systematic utility arrays and
"""
# Create a set of systematic utilities that will test the function for
# correct calculations, for proper dealing with overflow, and for
# proper dealing with underflow.
# The first and third elements tests general calculation.
# The second element of index_array should lead to the transformation
# equaling the 'natural' shape parameter for alternative 2.
# The fourth element should test what happens with underflow and should
# lead to max_comp_value.
# The fifth element should test what happens with overflow and should
# lead to -1.0 * max_comp_value
index_array = np.array([1, 0, -1, 1e400, -1e400])
index_array_2d = np.concatenate([index_array[:, None],
index_array[:, None]],
axis=1)
# Create 2d array of shapes
shapes_2d = np.concatenate([self.fake_shapes[:, None],
self.fake_shapes[:, None]],
axis=1)
# Create 2d array of intercepts
intercepts_2d = np.concatenate([self.fake_intercepts[:, None],
self.fake_intercepts[:, None]],
axis=1)
# We can use a the following array of the shape parameters to test
# the underflow capabilities with respect to the shape
# parameters.
test_shapes_2 = np.array([-800, 0])
test_shapes_2_2d = np.concatenate([test_shapes_2[:, None],
test_shapes_2[:, None]],
axis=1)
test_shapes_3 = np.array([800, 0])
test_shapes_3_2d = np.concatenate([test_shapes_3[:, None],
test_shapes_3[:, None]],
axis=1)
# Figure out the value of the 'natural' shape parameters
natural_shapes_2 = asym._convert_eta_to_c(test_shapes_2,
self.fake_shape_ref_pos)
natural_shapes_3 = asym._convert_eta_to_c(test_shapes_3,
self.fake_shape_ref_pos)
# Crerate the array of expected results when using shape parameters
# of 'normal' magnitudes.
intercept_1 = self.fake_intercepts[0]
intercept_2 = self.fake_intercepts[1]
intercept_3 = 0
result_1 = (intercept_1 +
np.log(self.natural_shapes[0]) * (1 - index_array[0]))
result_2 = intercept_2 + np.log(self.natural_shapes[1])
# Note the division by 2 is due to the 'J - 1' term. See the original
# definition of the transformation.
result_3 = (intercept_3 +
np.log(self.natural_shapes[2]) -
np.log((1 - self.natural_shapes[2]) / 2) * index_array[2])
expected_results = np.array([result_1,
result_2,
result_3,
asym.max_comp_value + intercept_1,
- asym.max_comp_value])[:, None]
# Crerate the array of expected results when using shape parameters
# of 'abnormally' small magnitudes.
# Note the division by 2 is due to the 'J - 1' term. See the original
# definition of the transformation.
result_2_2 = intercept_2 + np.log(natural_shapes_2[1])
result_3_2 = (intercept_3 +
np.log(natural_shapes_2[2]) -
np.log((1 - natural_shapes_2[2]) / 2) * index_array[2])
# Note the '0' comes from (1-1) * ln(shape)
expected_results_2 = np.array([0 + intercept_1,
result_2_2,
result_3_2,
asym.max_comp_value + intercept_1,
-asym.max_comp_value])[:, None]
# Create the array of expected results when using shape parameters
# of 'abnormally' large magnitudes.
result_2_3 = intercept_2 + np.log(natural_shapes_3[1])
result_3_3 = (intercept_3 +
np.log(natural_shapes_3[2]) -
np.log((1 - natural_shapes_3[2]) / 2) * index_array[2])
expected_results_3 = np.array([0 + intercept_1,
result_2_3,
result_3_3,
0 + intercept_1,
-asym.max_comp_value])[:, None]
#####
# Perform various rounds of checking
#####
# Use the utility transformation function, round_1
alt_id_vals = self.fake_df[self.alt_id_col].values
args = [index_array_2d,
alt_id_vals,
self.fake_rows_to_alts,
shapes_2d,
intercepts_2d]
kwargs = {"intercept_ref_pos": self.fake_intercept_ref_pos,
"shape_ref_position": self.fake_shape_ref_pos}
func_results = asym._asym_utility_transform(*args, **kwargs)
# Use the utility transformation function, round_2
args[3] = test_shapes_2_2d
func_results_2 = asym._asym_utility_transform(*args, **kwargs)
# Use the utility transformation function, round_3
args[3] = test_shapes_3_2d
func_results_3 = asym._asym_utility_transform(*args, **kwargs)
# Check the correctness of the results
all_results = [(func_results, expected_results),
(func_results_2, expected_results_2),
(func_results_3, expected_results_3)]
for pos, (test_results, correct_results) in enumerate(all_results):
self.assertIsInstance(test_results, np.ndarray)
self.assertEqual(len(test_results.shape), 2)
self.assertEqual(test_results.shape[1], 2)
self.assertEqual(test_results.shape[0], correct_results.shape[0])
for col in [0, 1]:
npt.assert_allclose(test_results[:, col][:, None],
correct_results)
return None
def test_asym_transform_deriv_v(self):
"""
Tests basic behavior of the asym_transform_deriv_v. Essentially the
only things that can go wrong is underflow or overflow from the shape
parameters going to zero or 1.0.
"""
# Declare set of index values to be tested
test_index = np.array([1, 0, -1, -2, 2])
# Figure out how many alternatives are there
num_alts = self.fake_rows_to_alts.shape[1]
# Test what happens with large shape parameters (that result in
# 'natural' shape parameters near 1.0)
large_shapes = np.array([800, 0])
large_results = np.array([0,
-np.log(asym.min_comp_value),
np.log(num_alts - 1),
asym.max_comp_value,
-np.log(asym.min_comp_value)])
# Test what happens with large shape parameters (that result in
# 'natural' shape parameters near 1.0)
small_shapes = np.array([-800, 0])
natural_small_shapes = asym._convert_eta_to_c(small_shapes,
self.fake_shape_ref_pos)
small_results = np.array([-np.log(natural_small_shapes[0]),
-np.log(natural_small_shapes[1]),
(np.log(num_alts - 1) -
np.log(1 - natural_small_shapes[2])),
(np.log(num_alts - 1) -
np.log(1 - natural_small_shapes[0])),
-np.log(natural_small_shapes[2])])
# Bundle the arguments needed for _asym_transform_deriv_v()
args = [test_index,
self.fake_df[self.alt_id_col].values,
self.fake_rows_to_alts,
self.fake_shapes]
# Create the needed output array
num_rows = test_index.shape[0]
output = diags(np.ones(num_rows), 0, format='csr')
# Validate the results from asym_transform_deriv_v
for shape_array, results in [(large_shapes, large_results),
(small_shapes, small_results)]:
# Get the reslts from asym_transform_deriv_v
args[-1] = shape_array
kwargs = {"ref_position": self.fake_shape_ref_pos,
"output_array": output}
derivative = asym._asym_transform_deriv_v(*args, **kwargs)
# Ensure the results are as expected
self.assertIsInstance(derivative, type(output))
self.assertEqual(len(derivative.shape), 2)
self.assertEqual(derivative.shape, (num_rows, num_rows))
npt.assert_allclose(
|
np.diag(derivative.A)
|
numpy.diag
|
from datetime import timedelta
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Type
import numpy as np
import pandas as pd
from athenian.api.controllers.features.github.check_run_metrics_accelerated import \
calculate_interval_intersections
from athenian.api.controllers.features.histogram import calculate_histogram, Histogram, Scale
from athenian.api.controllers.features.metric import make_metric, Metric, MetricFloat, MetricInt, \
MetricTimeDelta
from athenian.api.controllers.features.metric_calculator import AverageMetricCalculator, \
BinnedHistogramCalculator, BinnedMetricCalculator, HistogramCalculatorEnsemble, \
make_register_metric, MaxMetricCalculator, MetricCalculator, MetricCalculatorEnsemble, \
RatioCalculator, SumMetricCalculator
from athenian.api.controllers.miners.github.check_run import check_suite_completed_column, \
check_suite_started_column, pull_request_closed_column, pull_request_merged_column, \
pull_request_started_column
from athenian.api.int_to_str import int_to_str
from athenian.api.models.metadata.github import CheckRun
from athenian.api.models.web import CodeCheckMetricID
metric_calculators: Dict[str, Type[MetricCalculator]] = {}
histogram_calculators: Dict[str, Type[MetricCalculator]] = {}
register_metric = make_register_metric(metric_calculators, histogram_calculators)
class CheckRunMetricCalculatorEnsemble(MetricCalculatorEnsemble):
"""MetricCalculatorEnsemble adapted for CI check runs."""
def __init__(self,
*metrics: str,
quantiles: Sequence[float],
quantile_stride: int):
"""Initialize a new instance of CheckRunMetricCalculatorEnsemble class."""
super().__init__(*metrics,
quantiles=quantiles,
class_mapping=metric_calculators,
quantile_stride=quantile_stride)
class CheckRunHistogramCalculatorEnsemble(HistogramCalculatorEnsemble):
"""HistogramCalculatorEnsemble adapted for CI check runs."""
def __init__(self, *metrics: str, quantiles: Sequence[float]):
"""Initialize a new instance of CheckRunHistogramCalculatorEnsemble class."""
super().__init__(*metrics, quantiles=quantiles, class_mapping=histogram_calculators)
class CheckRunBinnedMetricCalculator(BinnedMetricCalculator):
"""BinnedMetricCalculator adapted for CI check runs."""
ensemble_class = CheckRunMetricCalculatorEnsemble
class CheckRunBinnedHistogramCalculator(BinnedHistogramCalculator):
"""BinnedHistogramCalculator adapted for CI check runs."""
ensemble_class = CheckRunHistogramCalculatorEnsemble
def group_check_runs_by_pushers(pushers: List[List[str]],
df: pd.DataFrame,
) -> List[np.ndarray]:
"""Triage check runs by their corresponding commit authors."""
if not pushers or df.empty:
return [np.arange(len(df))]
indexes = []
for group in pushers:
group = np.unique(group).astype("S")
pushers = df[CheckRun.author_login.name].values.astype("S")
included_indexes = np.nonzero(np.in1d(pushers, group))[0]
indexes.append(included_indexes)
return indexes
def make_check_runs_count_grouper(df: pd.DataFrame) -> Tuple[
Callable[[pd.DataFrame], List[np.ndarray]],
np.ndarray,
Sequence[int]]:
"""
Split check runs by parent check suite size.
:return: 1. Function to return the groups. \
2. Check suite node IDs column. \
3. Check suite sizes.
"""
suites = df[CheckRun.check_suite_node_id.name].values
unique_suites, run_counts = np.unique(suites, return_counts=True)
suite_blocks = np.array(np.split(np.argsort(suites), np.cumsum(run_counts)[:-1]))
unique_run_counts, back_indexes, group_counts = np.unique(
run_counts, return_inverse=True, return_counts=True)
run_counts_order = np.argsort(back_indexes)
ordered_indexes = np.concatenate(suite_blocks[run_counts_order])
groups = np.split(ordered_indexes, np.cumsum(group_counts * unique_run_counts)[:-1])
def group_check_runs_by_check_runs_count(_) -> List[np.ndarray]:
return groups
return group_check_runs_by_check_runs_count, suites, unique_run_counts
class FirstSuiteEncounters(MetricCalculator[float]):
"""Indicate check suites that at least tried to execute."""
metric = MetricInt
is_pure_dependency = True
complete_suite_statuses = [b"COMPLETED", b"FAILURE", b"SUCCESS", b"PENDING"]
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
_, first_suite_encounters = np.unique(
facts[CheckRun.check_suite_node_id.name].values,
return_index=True)
# ignore incomplete suites
completed = np.in1d(
facts[CheckRun.check_suite_status.name].values[first_suite_encounters].astype("S"),
self.complete_suite_statuses)
completed[
facts[CheckRun.check_suite_conclusion.name].values[first_suite_encounters].astype("S")
== b"SKIPPED"] = False
first_suite_encounters = first_suite_encounters[completed]
order = np.argsort(facts[check_suite_started_column].values[first_suite_encounters])
return first_suite_encounters[order]
def _value(self, samples: np.ndarray) -> Metric[None]:
raise NotImplementedError()
@register_metric(CodeCheckMetricID.SUITES_COUNT)
class SuitesCounter(SumMetricCalculator[int]):
"""Number of executed check suites metric."""
metric = MetricInt
deps = (FirstSuiteEncounters,)
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
first_suite_encounters = self._calcs[0].peek
result = np.full((len(min_times), len(facts)), 0, dtype=int)
result[:, first_suite_encounters] = 1
wrong_times = (
(facts[check_suite_started_column].values >= max_times[:, None])
|
(facts[check_suite_started_column].values < min_times[:, None])
)
result[wrong_times] = 0
return result
@register_metric(CodeCheckMetricID.SUITES_IN_PRS_COUNT)
class SuitesInPRsCounter(SumMetricCalculator[int]):
"""Number of executed check suites in pull requests metric."""
metric = MetricInt
deps = (SuitesCounter,)
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
result = self._calcs[0].peek.copy()
result[:, facts[CheckRun.pull_request_node_id.name] == 0] = 0
return result
class SuitesInStatusCounter(SumMetricCalculator[int]):
"""Number of executed check suites metric with the specified `statuses`."""
metric = MetricInt
statuses = {}
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
started = facts[check_suite_started_column].values.astype(min_times.dtype)
statuses = facts[CheckRun.check_suite_status.name].values.astype("S")
conclusions = facts[CheckRun.check_suite_conclusion.name].values.astype("S")
relevant = np.zeros_like(started, dtype=bool)
for status, status_conclusions in self.statuses.items():
status_mask = statuses == status
if status_conclusions:
mask = np.zeros_like(status_mask)
for sc in status_conclusions:
mask |= conclusions == sc
mask &= status_mask
else:
mask = status_mask
relevant |= mask
_, first_encounters = np.unique(
facts[CheckRun.check_suite_node_id.name].values,
return_index=True)
mask = np.zeros_like(relevant)
mask[first_encounters] = True
relevant[~mask] = False
started[~relevant] = None
result = np.zeros((len(min_times), len(facts)), int)
result[(min_times[:, None] <= started) & (started < max_times[:, None])] = 1
return result
@register_metric(CodeCheckMetricID.SUCCESSFUL_SUITES_COUNT)
class SuccessfulSuitesCounter(SuitesInStatusCounter):
"""Number of successfully executed check suites metric."""
statuses = {
b"COMPLETED": [b"SUCCESS", b"NEUTRAL"],
b"SUCCESS": [],
b"PENDING": [],
}
@register_metric(CodeCheckMetricID.FAILED_SUITES_COUNT)
class FailedSuitesCounter(SuitesInStatusCounter):
"""Number of failed check suites metric."""
statuses = {
b"COMPLETED": [b"FAILURE", b"STALE", b"ACTION_REQUIRED"],
b"FAILURE": [],
}
@register_metric(CodeCheckMetricID.CANCELLED_SUITES_COUNT)
class CancelledSuitesCounter(SuitesInStatusCounter):
"""Number of cancelled check suites metric."""
statuses = {
b"COMPLETED": [b"CANCELLED"],
}
@register_metric(CodeCheckMetricID.SUCCESS_RATIO)
class SuiteSuccessRatioCalculator(RatioCalculator):
"""Ratio of successful check suites divided by the overall count."""
deps = (SuccessfulSuitesCounter, SuitesCounter)
SuiteTimeCalculatorAnalysisDType = np.dtype([("elapsed", "timedelta64[s]"), ("size", int)])
SuiteTimeCalculatorAnalysisMetric = make_metric(
"SuiteTimeCalculatorAnalysisMetric",
__name__,
SuiteTimeCalculatorAnalysisDType,
np.array((np.timedelta64("NaT"), MetricInt.nan), dtype=SuiteTimeCalculatorAnalysisDType))
class SuiteTimeCalculatorAnalysis(MetricCalculator[None]):
"""Measure elapsed time and size of each check suite."""
may_have_negative_values = False
metric = SuiteTimeCalculatorAnalysisMetric
is_pure_dependency = True
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**kwargs) -> np.ndarray:
unique_suites, first_encounters, inverse_indexes, run_counts = np.unique(
facts[CheckRun.check_suite_node_id.name].values,
return_index=True, return_inverse=True, return_counts=True)
statuses = facts[CheckRun.check_suite_status.name].values[first_encounters].astype("S")
completed = np.in1d(statuses, [b"COMPLETED", b"SUCCESS", b"FAILURE"])
conclusions = facts[CheckRun.check_suite_conclusion.name].values[
first_encounters[completed]
].astype("S")
sensibly_completed = np.flatnonzero(completed)[
np.in1d(conclusions, [b"CANCELLED", b"SKIPPED"], invert=True)]
# first_encounters[sensibly_completed] gives the indexes of the completed suites
first_encounters = first_encounters[sensibly_completed]
# measure elapsed time for each suite size group
suite_blocks = np.array(np.split(np.argsort(inverse_indexes), np.cumsum(run_counts)[:-1]),
dtype=object)
unique_run_counts, back_indexes, group_counts = np.unique(
run_counts, return_inverse=True, return_counts=True)
run_counts_order = np.argsort(back_indexes)
ordered_indexes = np.concatenate(suite_blocks[run_counts_order]).astype(int, copy=False)
groups = np.split(ordered_indexes, np.cumsum(group_counts * unique_run_counts)[:-1])
suite_started_col = facts[check_suite_started_column].values
suite_finished_col = facts[check_suite_completed_column].values
suite_finished = np.concatenate([
suite_finished_col[group].reshape(-1, unique_run_count)[:, 0]
for group, unique_run_count in zip(groups, unique_run_counts)
])
suite_started = np.concatenate([
suite_started_col[group].reshape(-1, unique_run_count)[:, 0]
for group, unique_run_count in zip(groups, unique_run_counts)
])
elapsed = suite_finished - suite_started
# reorder the sequence to match unique_suites
suite_order = np.argsort(run_counts_order)
structs = np.zeros_like(suite_order, dtype=self.dtype)
structs["elapsed"] = elapsed[suite_order]
structs["size"] = np.repeat(unique_run_counts, group_counts)[suite_order]
suite_started = suite_started[suite_order]
result = np.full((len(min_times), len(facts)),
np.array([(np.timedelta64("NaT"), 0)], dtype=self.dtype))
time_relevant_suite_mask = \
(min_times[:, None] <= suite_started) & (suite_started < max_times[:, None])
result_structs = np.repeat(structs[sensibly_completed][None, :], len(min_times), axis=0)
mask = ~time_relevant_suite_mask[:, sensibly_completed]
result_structs["elapsed"][mask] = np.timedelta64("NaT")
result_structs["size"][mask] = 0
result[:, first_encounters] = result_structs
return result
def _value(self, samples: np.ndarray) -> Metric[None]:
return self.metric.from_fields(False, None, None, None)
@register_metric(CodeCheckMetricID.SUITE_TIME)
class SuiteTimeCalculator(AverageMetricCalculator[timedelta]):
"""Average check suite execution time metric."""
may_have_negative_values = False
metric = MetricTimeDelta
deps = (SuiteTimeCalculatorAnalysis,)
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**kwargs) -> np.ndarray:
return self._calcs[0].peek["elapsed"].astype(self.dtype, copy=False)
@register_metric(CodeCheckMetricID.ROBUST_SUITE_TIME)
class RobustSuiteTimeCalculator(MetricCalculator[timedelta]):
"""Average check suite execution time metric, sustainable version."""
metric = MetricTimeDelta
deps = (SuiteTimeCalculatorAnalysis,)
def __call__(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
quantiles_mounted_at: Optional[int],
groups_mask: np.ndarray,
**kwargs) -> None:
"""Completely ignore the default boilerplate and calculate metrics from scratch."""
structs = self._calcs[0].peek
elapsed = structs["elapsed"]
meaningful_groups_mask = (
groups_mask
& (structs["size"] > 0).any(axis=0)[None, :]
& (elapsed == elapsed)
)
if self._quantiles != (0, 1):
discard_mask = self._calculate_discard_mask(
elapsed, quantiles_mounted_at, meaningful_groups_mask)
meaningful_groups_mask[discard_mask] = False
min_times = min_times[:quantiles_mounted_at]
# max_times = max_times[:quantiles_mounted_at]
elapsed = elapsed[:quantiles_mounted_at]
structs = structs[:quantiles_mounted_at]
sizes = structs["size"].astype("S")
repos = facts[CheckRun.repository_node_id.name].values
repos_sizes = np.char.add(int_to_str(repos), np.char.add(b"|", sizes))
self._metrics = metrics = []
for group_mask in meaningful_groups_mask:
group_repos_sizes = repos_sizes[:, group_mask]
group_elapsed = elapsed[:, group_mask]
vocabulary, mapped_indexes = np.unique(group_repos_sizes, return_inverse=True)
existing_vocabulary_indexes = np.nonzero(~np.char.endswith(vocabulary, b"|0"))[0]
masks_by_reposize = (
mapped_indexes[:, np.newaxis] == existing_vocabulary_indexes
).T.reshape((len(existing_vocabulary_indexes), *group_repos_sizes.shape))
# we don't call mean() because there may be empty slices
sums = np.sum(np.broadcast_to(group_elapsed[None, :],
(len(masks_by_reposize), *group_elapsed.shape)),
axis=-1, where=masks_by_reposize)
counts = np.sum(masks_by_reposize, axis=-1)
# backfill
if (missing := counts == 0).any():
existing_reposizes, existing_ts = np.array(np.nonzero(np.flip(~missing, axis=1)))
_, existing_reposizes_counts = np.unique(existing_reposizes, return_counts=True)
existing_borders = np.cumsum(existing_reposizes_counts)[:-1]
saturated_existing_ts = existing_ts.copy()
ts_len = counts.shape[-1]
saturated_existing_ts[existing_borders - 1] = ts_len
saturated_existing_ts[-1] = ts_len
offsets = np.diff(np.insert(saturated_existing_ts, existing_borders, 0), prepend=0)
offsets = np.delete(offsets, existing_borders + np.arange(len(existing_borders)))
reposize_indexes = np.repeat(existing_reposizes, offsets, axis=-1)
ts_indexes = np.repeat(existing_ts, offsets)
ts_indexes = \
ts_len - 1 - np.flip(ts_indexes.reshape(len(counts), ts_len), axis=1).ravel()
sums[missing] = sums[reposize_indexes, ts_indexes].reshape(sums.shape)[missing]
counts[missing] = counts[reposize_indexes, ts_indexes].reshape(sums.shape)[missing]
# average the individual backfilled means
means = sums / counts
if len(means):
ts_means = np.mean(means, axis=0)
else:
ts_means = np.full(len(min_times), None, dtype=elapsed.dtype)
# confidence intervals are not implemented
metrics.append([self.metric.from_fields(m is not None, m, None, None)
for m in ts_means.tolist()])
def _values(self) -> List[List[Metric[timedelta]]]:
return self._metrics
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
raise AssertionError("this must be never called")
def _value(self, samples: np.ndarray) -> Metric[timedelta]:
raise AssertionError("this must be never called")
@register_metric(CodeCheckMetricID.SUITES_PER_PR)
class SuitesPerPRCounter(AverageMetricCalculator[float]):
"""Average number of executed check suites per pull request metric."""
may_have_negative_values = False
metric = MetricFloat
deps = (FirstSuiteEncounters,)
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
first_suite_encounters = self._calcs[0].peek
pull_requests = facts[CheckRun.pull_request_node_id.name].values
_, first_pr_encounters, pr_suite_counts = np.unique(
pull_requests[first_suite_encounters], return_index=True, return_counts=True)
# we don't have to filter out 0-s because mask_pr_times is False for them
result = np.full((len(min_times), len(facts)), np.nan, dtype=np.float32)
result[:, first_suite_encounters[first_pr_encounters]] = pr_suite_counts
mask_pr_times = (
(facts[pull_request_started_column].values.astype(max_times.dtype, copy=False) <
max_times[:, None])
&
(facts[pull_request_closed_column].values.astype(min_times.dtype, copy=False) >=
min_times[:, None])
)
result[~mask_pr_times] = None
return result
@register_metric(CodeCheckMetricID.SUITE_TIME_PER_PR)
class SuiteTimePerPRCalculator(AverageMetricCalculator[timedelta]):
"""Average check suite execution in PRs time metric."""
may_have_negative_values = False
metric = MetricTimeDelta
deps = (SuiteTimeCalculator,)
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
pull_requests = facts[CheckRun.pull_request_node_id.name].values
no_prs_mask = pull_requests == 0
result = self._calcs[0].peek.copy()
result[:, no_prs_mask] = None
return result
@register_metric(CodeCheckMetricID.PRS_WITH_CHECKS_COUNT)
class PRsWithChecksCounter(SumMetricCalculator[int]):
"""Number of PRs with executed check suites."""
metric = MetricInt
deps = (SuitesPerPRCounter,)
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
result = self._calcs[0].peek.copy()
result[result != result] = 0
result[result > 0] = 1
return result
def calculate_check_run_outcome_masks(check_run_statuses: np.ndarray,
check_run_conclusions: np.ndarray,
check_suite_conclusions: Optional[np.ndarray],
with_success: bool,
with_failure: bool,
with_skipped: bool,
) -> List[np.ndarray]:
"""Calculate the check run success and failure masks."""
completed = check_run_statuses == b"COMPLETED"
if with_success or with_skipped:
neutrals = (check_run_conclusions == b"NEUTRAL")
result = []
if with_success:
result.append(
(completed & (
(check_run_conclusions == b"SUCCESS") |
(check_suite_conclusions == b"NEUTRAL") & neutrals
)) |
(check_run_statuses == b"SUCCESS") |
(check_run_statuses == b"PENDING") # noqa(C812)
)
if with_failure:
result.append(
(completed & np.in1d(check_run_conclusions,
[b"FAILURE", b"STALE", b"ACTION_REQUIRED"])) |
(check_run_statuses == b"FAILURE") |
(check_run_statuses == b"ERROR") # noqa(C812)
)
if with_skipped:
result.append((check_suite_conclusions != b"NEUTRAL") & neutrals)
return result
@register_metric(CodeCheckMetricID.FLAKY_COMMIT_CHECKS_COUNT)
class FlakyCommitChecksCounter(SumMetricCalculator[int]):
"""Number of commits with both successful and failed check suites."""
metric = MetricInt
deps = (SuccessfulSuitesCounter, FailedSuitesCounter)
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
statuses = facts[CheckRun.status.name].values.astype("S")
conclusions = facts[CheckRun.conclusion.name].values.astype("S")
check_suite_conclusions = facts[CheckRun.check_suite_conclusion.name].values.astype("S")
success_mask, failure_mask = calculate_check_run_outcome_masks(
statuses, conclusions, check_suite_conclusions, True, True, False)
commits = facts[CheckRun.commit_node_id.name].values.copy()
check_run_names = np.char.encode(facts[CheckRun.name.name].values.astype("U"), "UTF-8")
commits_with_names = np.char.add(int_to_str(commits), check_run_names)
_, unique_map = np.unique(commits_with_names, return_inverse=True)
unique_flaky_indexes = np.intersect1d(unique_map[success_mask], unique_map[failure_mask])
flaky_mask = np.in1d(unique_map, unique_flaky_indexes)
commits[~flaky_mask] = 0
unique_commits, flaky_indexes = np.unique(commits, return_index=True)
if len(unique_commits) and unique_commits[0] == 0:
flaky_indexes = flaky_indexes[1:]
started = facts[check_suite_started_column].values.astype(min_times.dtype)
result = np.zeros((len(min_times), len(facts)), int)
mask = np.zeros(len(facts), bool)
mask[flaky_indexes] = 1
started[~mask] = None
mask = (min_times[:, None] <= started) & (started < max_times[:, None])
result[mask] = 1
return result
@register_metric(CodeCheckMetricID.PRS_MERGED_WITH_FAILED_CHECKS_COUNT)
class MergedPRsWithFailedChecksCounter(SumMetricCalculator[int]):
"""Count how many PRs were merged despite failing checks."""
metric = MetricInt
@staticmethod
def find_prs_merged_with_failed_check_runs(facts: pd.DataFrame,
) -> Tuple[pd.Index, np.array, np.array]:
"""
Compute the mask in the sorted facts that selects rows with PRs merged with a failing \
check run.
:return: 1. Index of the sorted dataframe. \
2. Column with the pull request node IDs in the sorted dataframe. \
3. Computed mask.
"""
if facts.empty:
return pd.Int64Index([]), np.array([], dtype="S1"), np.array([], dtype=bool)
df = facts[[
CheckRun.pull_request_node_id.name, CheckRun.name.name, CheckRun.started_at.name,
CheckRun.status.name, CheckRun.conclusion.name, pull_request_merged_column]]
df = df.sort_values(CheckRun.started_at.name, ascending=False) # no inplace=True, yes
pull_requests = df[CheckRun.pull_request_node_id.name].values
names = np.char.encode(df[CheckRun.name.name].values.astype("U"), "UTF-8")
joint = np.char.add(int_to_str(pull_requests), names)
_, first_encounters = np.unique(joint, return_index=True)
statuses = df[CheckRun.status.name].values.astype("S")
conclusions = df[CheckRun.conclusion.name].values.astype("S")
failure_mask = np.zeros_like(statuses, dtype=bool)
failure_mask[first_encounters] = True
merged_timestamps = df[pull_request_merged_column].values
failure_mask &= (
calculate_check_run_outcome_masks(statuses, conclusions, None, False, True, False)[0]
) & (pull_requests != 0) & (merged_timestamps == merged_timestamps)
return df.index, pull_requests, failure_mask
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
index, pull_requests, failure_mask = self.find_prs_merged_with_failed_check_runs(facts)
failing_pull_requests = pull_requests[failure_mask]
_, failing_indexes = np.unique(failing_pull_requests, return_index=True)
failing_indexes = np.nonzero(failure_mask)[0][failing_indexes]
failing_indexes = index.values[failing_indexes]
mask_pr_times = (
(facts[pull_request_started_column].values < max_times[:, None])
&
(facts[pull_request_closed_column].values >= min_times[:, None])
)
result = np.zeros((len(min_times), len(facts)), dtype=int)
result[:, failing_indexes] = 1
result[~mask_pr_times] = 0
return result
class MergedPRsCounter(SumMetricCalculator[int]):
"""Count how many PRs were merged with checks."""
metric = MetricInt
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
pull_requests = facts[CheckRun.pull_request_node_id.name].values.copy()
merged = facts[pull_request_merged_column].values
pull_requests[merged != merged] = 0
unique_prs, first_encounters = np.unique(pull_requests, return_index=True)
first_encounters = first_encounters[unique_prs != 0]
mask_pr_times = (
(facts[pull_request_started_column].values < max_times[:, None])
&
(facts[pull_request_closed_column].values >= min_times[:, None])
)
result = np.zeros((len(min_times), len(facts)), dtype=int)
result[:, first_encounters] = 1
result[~mask_pr_times] = 0
return result
@register_metric(CodeCheckMetricID.PRS_MERGED_WITH_FAILED_CHECKS_RATIO)
class MergedPRsWithFailedChecksRatioCalculator(RatioCalculator):
"""Calculate the ratio of PRs merged with failing checks to all merged PRs with checks."""
deps = (MergedPRsWithFailedChecksCounter, MergedPRsCounter)
class ConcurrencyCalculator(MetricCalculator[float]):
"""Calculate the concurrency value for each check run."""
metric = MetricFloat
is_pure_dependency = True
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
repos = facts[CheckRun.repository_full_name.name].values.astype("S")
names = np.char.encode(facts[CheckRun.name.name].values.astype("U"), "UTF-8")
crtypes = np.char.add(np.char.add(repos, b"|"), names)
del repos, names
started_ats = \
facts[CheckRun.started_at.name].values.astype(min_times.dtype, copy=False)
completed_ats = \
facts[CheckRun.completed_at.name].values.astype(min_times.dtype, copy=False)
have_completed = completed_ats == completed_ats
crtypes = crtypes[have_completed]
time_order_started_ats = started_ats[have_completed]
time_order_completed_ats = completed_ats[have_completed]
_, crtypes_counts = np.unique(crtypes, return_counts=True)
crtypes_order = np.argsort(crtypes)
crtype_order_started_ats = \
time_order_started_ats[crtypes_order].astype("datetime64[s]")
crtype_order_completed_ats = \
time_order_completed_ats[crtypes_order].astype("datetime64[s]")
intersections = calculate_interval_intersections(
crtype_order_started_ats.view(np.uint64),
crtype_order_completed_ats.view(np.uint64),
np.cumsum(crtypes_counts),
)
intersections = intersections[np.argsort(crtypes_order)]
result = np.full((len(min_times), len(facts)), np.nan, np.float32)
result[:, have_completed] = intersections
mask = (min_times[:, None] <= started_ats) & (started_ats < max_times[:, None])
result[~mask] = np.nan
return result
def _value(self, samples: np.ndarray) -> Metric[float]:
raise AssertionError("disabled for pure dependencies")
@register_metric(CodeCheckMetricID.CONCURRENCY)
class AvgConcurrencyCalculator(AverageMetricCalculator[float]):
"""Calculate the average concurrency of the check runs."""
may_have_negative_values = False
metric = MetricFloat
deps = (ConcurrencyCalculator,)
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
return self._calcs[0].peek
@register_metric(CodeCheckMetricID.CONCURRENCY_MAX)
class MaxConcurrencyCalculator(MaxMetricCalculator[int]):
"""Calculate the maximum concurrency of the check runs."""
may_have_negative_values = False
metric = MetricFloat
deps = (ConcurrencyCalculator,)
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
return self._calcs[0].peek
def _agg(self, samples: np.ndarray) -> int:
return int(super()._agg(samples))
ElapsedTimePerConcurrencyCalculatorDType = np.dtype(
[("concurrency", float), ("duration", "timedelta64[s]")])
ElapsedTimePerConcurrencyCalculatorMetric = make_metric(
"ElapsedTimePerConcurrencyCalculatorMetric",
__name__,
ElapsedTimePerConcurrencyCalculatorDType,
np.array((np.nan, np.timedelta64("NaT")), dtype=ElapsedTimePerConcurrencyCalculatorDType))
@register_metric(CodeCheckMetricID.ELAPSED_TIME_PER_CONCURRENCY)
class ElapsedTimePerConcurrencyCalculator(MetricCalculator[object]):
"""Calculate the cumulative time spent on each concurrency level."""
metric = ElapsedTimePerConcurrencyCalculatorMetric
deps = (ConcurrencyCalculator,)
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**_) -> np.ndarray:
result =
|
np.zeros_like(self._calcs[0].peek, dtype=self.dtype)
|
numpy.zeros_like
|
# -*- coding: utf-8 -*-
"""
autoencoder.py
=================
Implementation of a neural network using stochastic gradient descent.
"""
from typing import List
import numpy as np
import numpy.random as random
class Autoencoder(object):
def __init__(self, sizes: List[int] = None, learning_rate: float = 0.01, regularization_penalty: float = 0):
"""
Initializes the neural network creating the layers of neurons, the matrix of weights, the biases and the
required data structures for a neural network.
This method will require an list of ints that indicates the size of each layer (the number of neurons). The
learning rate (alpha) and the regularization penalty (lambda) could be set.
:param sizes: array with the sizes of each layer. Default: [8, 3, 8]
:param learning_rate: learning rate of the neural network (alpga). Default: 0.01
:param regularization_penalty: regularization penalty (lambda). Default: 0
"""
if sizes is None:
sizes = [8, 3, 8]
self.learning_rate = learning_rate # alpha
self.regularization_penalty = regularization_penalty # lambda
self.number_of_layers = len(sizes)
self.sizes = sizes
self.weighted_inputs = None # z's
self.activations = None # a's
self.deltas = None # deltas
self.biases = [random.rand(layer_size, 1) for layer_size in sizes[1:]]
self.bias_partial_derivatives = [np.zeros(bias.shape) for bias in self.biases]
self.weights = [random.rand(layer_size, prev_layer_size)
for layer_size, prev_layer_size in zip(sizes[1:], sizes[:-1])]
self.weights_partial_derivatives = [np.zeros(weight.shape) for weight in self.weights]
self.cost_gradients = [random.rand(layer_size, prev_layer_size)
for layer_size, prev_layer_size in zip(sizes[1:], sizes[:-1])]
self.epoch_convergence = None
self.errors = []
def gradient_descent_learning(self, training_examples: np.ndarray, epochs: int = 50000, batch_size: int = 8,
evaluation: bool = True) -> None:
"""
Method that trains the neural network and evaluate its results for a given input.
The batch size will generate a training input with the indicated number of instances that will be used to
update the weights and bias of the neuron, if the batch is smaller than the total training set, this last will
be divided in n batch that will be used to train the neural network.
:param training_examples: matrix with the inputs to the network
:param epochs: integer with the number of times the training will be done. Default: 10000 epochs
:param batch_size: size of the training batch used per gradient descent update. Default: 8 samples
:param evaluation: true to print the evaluation of the network after each epoch. Default: True
:return:
"""
num_training_examples = training_examples.shape[1]
for i in range(int(epochs)):
random.shuffle(np.transpose(training_examples)) # randomize the input data
batches = [training_examples[:, i:i + batch_size] for i in range(0, num_training_examples, batch_size)]
for batch in batches:
self.train_neural_network(batch)
if evaluation:
error = np.sum(self.deltas[-1]) ** 2
self.errors.append(error)
correct_results = self.evaluate(training_examples)
print('Epoch: {0}. Correct results: {1} of {2}'.format(i, correct_results, num_training_examples))
if correct_results == num_training_examples:
if self.epoch_convergence is None:
self.epoch_convergence = i
print('Converged in {0} epochs'.format(self.epoch_convergence))
if error < 0.01:
print("Squared error less than 0.01 in {0} epochs. Value: {1}".format(i, error))
break
def train_neural_network(self, input_values: np.ndarray):
"""
Method that handle the training process for a set of inputs. This method create the structures to save the
data during the training process and call the different methods that make the calculations.
:param input_values: training values
"""
input_length = input_values.shape[1]
self.weighted_inputs = [np.zeros([layer_size, input_length]) for layer_size in self.sizes[1:]]
self.activations = [np.zeros([layer_size, input_length]) for layer_size in self.sizes]
self.deltas = [np.zeros([layer_size, input_length]) for layer_size in self.sizes]
self.forward_propagation(input_values)
self.backwards_propagation(input_values, input_length)
self.gradient_descent()
def forward_propagation(self, net_input: np.ndarray):
"""
Forward propagation process.
:param net_input: array with the input of the network
:return: output of the network
"""
self.activations[0] = net_input # save the input as the activation of the first layer
for i in range(len(self.weights)):
weighted_input = np.dot(self.weights[i], net_input) + self.biases[i] # numpy broadcasting
net_input = sigmoid(weighted_input)
self.weighted_inputs[i] = weighted_input # saved for back propagation -> z
self.activations[i + 1] = net_input # saved for back propagation -> a
def backwards_propagation(self, expected_outputs: np.ndarray, training_examples_length: int):
"""
Backwards propagation process.
:param expected_outputs: output of the last layer of the neural network should show
:param training_examples_length: length of the training example contained in the input. Equal to m
"""
training_examples_div = (1 / training_examples_length) # 1/m
self.deltas[-1] = -(expected_outputs - self.activations[-1]) # -(y - activation_output)
# self.deltas[-1] = -(expected_outputs - self.activations[-1]) * sigmoid_derivative(self.activations[-i])
# -(y - activation_output) * f'(z[-1])
self.weights_partial_derivatives[-1] = \
training_examples_div * np.dot(self.deltas[-1], np.transpose(self.activations[-2]))
self.bias_partial_derivatives[-1] = training_examples_div * np.sum(self.deltas[-1], axis=1, keepdims=True)
for i in range(2, self.number_of_layers):
self.deltas[-i] = np.dot(np.transpose(self.weights[-i + 1]), self.deltas[-i + 1]) \
* sigmoid_derivative(self.activations[-i])
self.weights_partial_derivatives[-i] = \
training_examples_div * np.dot(self.deltas[-i], np.transpose(self.activations[-i - 1]))
self.bias_partial_derivatives[-i] = training_examples_div * np.sum(self.deltas[-i], axis=1, keepdims=True)
def gradient_descent(self):
"""
Method that handle the gradient descent calculations, take care of the calculation required to update
the weights and biases.
This method depend of the learning rate set during the creation of the neural network ('learning-rate) and
it is capable of doing regularization setting the value of 'regularization_penalty'.
"""
for i in range(len(self.weights)):
self.weights[i] -= \
self.learning_rate * (
self.weights_partial_derivatives[i] + self.regularization_penalty * self.weights[i])
self.biases[i] -= \
self.learning_rate * (self.bias_partial_derivatives[i] + self.regularization_penalty * self.biases[i])
def evaluate(self, data: np.ndarray):
"""
Function that takes all the training data and check the results that the network returns.
The one in the neural network output will be the position where the result is higher and then is compared
with the input.
:param data: data used to compare the results
:return: number of correct results returned by the network
"""
self.forward_propagation(data)
return np.sum(np.argmax(self.activations[-1], axis=0) == np.argmax(data, axis=0))
def sigmoid(z):
"""
The sigmoid function that applies to the result of the product of the weight and the activation of the
neurons plus the biases, known as weighted input.
z = w_l*a_l+b
:param z: weighted input.
:return: activation of the next layer of the network
"""
return 1.0 / (1 + np.exp(-z))
def sigmoid_derivative(activation_values):
"""
Derivative of the sigmoid function
:param activation_values: activation values
:return: result of applying the derivative function
"""
return activation_values * (1.0 - activation_values)
if __name__ == '__main__':
# Default training set
array = np.array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1]])
# Creation of the network and training
# network = nn.NeuralNetwork(learning_rate=0.1)
# If the learning rate or the penalty wants to be changed
network = Autoencoder(learning_rate=0.1, regularization_penalty=0.0001)
network.gradient_descent_learning(array)
# Testing
test_input = np.array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1]])
print('Test input:')
print(np.array_str(test_input))
network.forward_propagation(test_input)
print('Last layer activation:')
print(
|
np.array_str(network.activations[-1], precision=3, suppress_small=True)
|
numpy.array_str
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 07 11:48:18 2017
@author: Gaurav
"""
import math
import random
import pickle
import warnings
import nltk
import itertools
import numpy as np
import keras.backend as K
from theano import tensor as T
import gensim as gen
import scipy.stats as measures
from gensim.models import word2vec
from scipy.stats.stats import pearsonr
from keras.engine.topology import Layer
from nltk.tokenize import regexp_tokenize
from sklearn.metrics.pairwise import cosine_similarity
warnings.simplefilter("ignore")
START = '$_START_$'
END = '$_END_$'
unk_token = '$_UNK_$'
#vocab_size = 17000
#embedding_dim = 300
#dimx = 30
#dimy = 30
loss_type = 2 # 1 - l1+l2+l3-L4; 2 - l2+l3-L4; 3 - l1+l2+l3 , 4 - l2+l3
#word_to_index={}
#index_to_word=[]
#wordVec_model = word2vec.Word2Vec.load_word2vec_format("GoogleNews-vectors-negative300.bin.gz",binary=True)
def split(train_l,train_r,label,ratio):
total = train_l.shape[0]
train_samples = int(total*(1-ratio))
test_samples = total-train_samples
tr_l,tst_l,tr_r,tst_r,l_tr,l_tst=[],[],[],[],[],[]
dat=random.sample(range(total),train_samples)
for a in dat:
tr_l.append(train_l[a,:])
tr_r.append(train_r[a,:])
l_tr.append(label[a])
for i in range(test_samples):
if i not in dat:
tst_l.append(train_l[i,:])
tst_r.append(train_r[i,:])
l_tst.append(label[i])
tr_l = np.array(tr_l)
tr_r = np.array(tr_r)
tst_l = np.array(tst_l)
tst_r = np.array(tst_r)
l_tr = np.array(l_tr)
l_tst = np.array(l_tst)
return tr_l,tst_l,tr_r,tst_r,l_tr,l_tst
class ZeroLike(Layer):
def __init__(self, **kwargs):
super(ZeroLike, self).__init__(**kwargs)
def call(self, x, mask=None):
return K.zeros_like(x)
def get_output_shape_for(self, input_shape):
return input_shape
def project(model,inp):
m = model.predict([inp[0],inp[1]])
return m[2]
def sum_corr(view1,view2,flag=''):
print("test correlation")
corr = 0
for i,j in zip(view1,view2):
corr += measures.pearsonr(i,j)[0]
print('avg sum corr ::',flag,'::',corr/len(view1))
def cal_sim(model,ind1,ind2=1999):
view1 = np.load("test_v1.npy")[0:ind1]
view2 =
|
np.load("test_v2.npy")
|
numpy.load
|
"""
Test cate/ops/coregistration.py
Test coregistration, checks if the values seem as expected
when using default upsampling/downsampling methods.
"""
from unittest import TestCase
import numpy as np
import xarray as xr
from numpy.testing import assert_almost_equal, assert_array_equal
from cate.core.op import OP_REGISTRY
from cate.util.misc import object_to_qualified_name
from cate.ops import coregister
from cate.ops.coregistration import _find_intersection
from ..util.test_monitor import RecordingMonitor
class TestCoregistration(TestCase):
"""
Test coregistration
"""
def test_nominal(self):
"""
Test nominal execution
"""
ds_fine = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'time': np.array([1, 2])}).chunk(chunks={'lat': 2, 'lon': 4})
ds_coarse = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2])}).chunk(chunks={'lat': 3, 'lon': 3})
# Test that the coarse dataset has been resampled onto the grid
# of the finer dataset.
rm = RecordingMonitor()
ds_coarse_resampled = coregister(ds_fine, ds_coarse, monitor=rm)
self.assertEqual([('start', 'coregister dataset', 2),
('progress', 0.0, 'coregister dataarray', 0),
('progress', 0.0, 'coregister dataarray: resample slice', 0),
('progress', 0.125, None, 6),
('progress', 0.125, None, 13),
('progress', 0.125, None, 19),
('progress', 0.125, None, 25),
('progress', 0.0, 'coregister dataarray: resample slice', 25),
('progress', 0.0, 'coregister dataarray: resample slice', 25),
('progress', 0.125, None, 31),
('progress', 0.125, None, 38),
('progress', 0.125, None, 44),
('progress', 0.125, None, 50),
('progress', 0.0, 'coregister dataarray: resample slice', 50),
('progress', 0.0, 'coregister dataarray', 50),
('progress', 0.0, 'coregister dataarray', 50),
('progress', 0.0, 'coregister dataarray: resample slice', 50),
('progress', 0.125, None, 56),
('progress', 0.125, None, 63),
('progress', 0.125, None, 69),
('progress', 0.125, None, 75),
('progress', 0.0, 'coregister dataarray: resample slice', 75),
('progress', 0.0, 'coregister dataarray: resample slice', 75),
('progress', 0.125, None, 81),
('progress', 0.125, None, 88),
('progress', 0.125, None, 94),
('progress', 0.125, None, 100),
('progress', 0.0, 'coregister dataarray: resample slice', 100),
('progress', 0.0, 'coregister dataarray', 100),
('done',)], rm.records)
expected = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([[[1., 0.28571429, 0., 0., 0., 0., 0., 0.],
[0.33333333, 0.57142857, 0.38095238, 0., 0., 0., 0., 0.],
[0., 0.47619048, 0.52380952, 0.28571429, 0.04761905, 0., 0.,
0.],
[0., 0., 0.42857143, 0.85714286, 0.14285714, 0., 0., 0.]],
[[1., 0.28571429, 0., 0., 0., 0., 0., 0.],
[0.33333333, 0.57142857, 0.38095238, 0., 0., 0., 0., 0.],
[0., 0.47619048, 0.52380952, 0.28571429, 0.04761905, 0., 0.,
0.],
[0., 0., 0.42857143, 0.85714286, 0.14285714, 0., 0., 0.]]])),
'second': (['time', 'lat', 'lon'], np.array([[[1., 0.28571429, 0., 0., 0., 0., 0., 0.],
[0.33333333, 0.57142857, 0.38095238, 0., 0., 0., 0., 0.],
[0., 0.47619048, 0.52380952, 0.28571429, 0.04761905, 0., 0.,
0.],
[0., 0., 0.42857143, 0.85714286, 0.14285714, 0., 0., 0.]],
[[1., 0.28571429, 0., 0., 0., 0., 0., 0.],
[0.33333333, 0.57142857, 0.38095238, 0., 0., 0., 0., 0.],
[0., 0.47619048, 0.52380952, 0.28571429, 0.04761905, 0., 0.,
0.],
[0., 0., 0.42857143, 0.85714286, 0.14285714, 0., 0., 0.]]])),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'time': np.array([1, 2])})
assert_almost_equal(ds_coarse_resampled['first'].values, expected['first'].values)
# Test that the fine dataset has been resampled (aggregated)
# onto the grid of the coarse dataset.
ds_fine_resampled = coregister(ds_coarse, ds_fine)
expected = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([[[0.625, 0.125, 0., 0., 0., 0.],
[0.125, 0.5, 0.125, 0., 0., 0.],
[0., 0.125, 0.625, 0., 0., 0.]],
[[0.625, 0.125, 0., 0., 0., 0.],
[0.125, 0.5, 0.125, 0., 0., 0.],
[0., 0.125, 0.625, 0., 0., 0.]]])),
'second': (['time', 'lat', 'lon'], np.array([[[0.625, 0.125, 0., 0., 0., 0.],
[0.125, 0.5, 0.125, 0., 0., 0.],
[0., 0.125, 0.625, 0., 0., 0.]],
[[0.625, 0.125, 0., 0., 0., 0.],
[0.125, 0.5, 0.125, 0., 0., 0.],
[0., 0.125, 0.625, 0., 0., 0.]]])),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2])})
assert_almost_equal(ds_fine_resampled['first'].values, expected['first'].values)
def test_registered(self):
"""
Test registered operation execution execution
"""
reg_op = OP_REGISTRY.get_op(object_to_qualified_name(coregister))
ds_fine = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'time': np.array([1, 2])})
ds_coarse = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2])})
# Test that the coarse dataset has been resampled onto the grid
# of the finer dataset.
ds_coarse_resampled = reg_op(ds_master=ds_fine, ds_replica=ds_coarse)
expected = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([[[1., 0.28571429, 0., 0., 0., 0., 0., 0.],
[0.33333333, 0.57142857, 0.38095238, 0., 0., 0., 0., 0.],
[0., 0.47619048, 0.52380952, 0.28571429, 0.04761905, 0., 0.,
0.],
[0., 0., 0.42857143, 0.85714286, 0.14285714, 0., 0., 0.]],
[[1., 0.28571429, 0., 0., 0., 0., 0., 0.],
[0.33333333, 0.57142857, 0.38095238, 0., 0., 0., 0., 0.],
[0., 0.47619048, 0.52380952, 0.28571429, 0.04761905, 0., 0.,
0.],
[0., 0., 0.42857143, 0.85714286, 0.14285714, 0., 0., 0.]]])),
'second': (['time', 'lat', 'lon'], np.array([[[1., 0.28571429, 0., 0., 0., 0., 0., 0.],
[0.33333333, 0.57142857, 0.38095238, 0., 0., 0., 0., 0.],
[0., 0.47619048, 0.52380952, 0.28571429, 0.04761905, 0., 0.,
0.],
[0., 0., 0.42857143, 0.85714286, 0.14285714, 0., 0., 0.]],
[[1., 0.28571429, 0., 0., 0., 0., 0., 0.],
[0.33333333, 0.57142857, 0.38095238, 0., 0., 0., 0., 0.],
[0., 0.47619048, 0.52380952, 0.28571429, 0.04761905, 0., 0.,
0.],
[0., 0., 0.42857143, 0.85714286, 0.14285714, 0., 0., 0.]]])),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'time': np.array([1, 2])})
assert_almost_equal(ds_coarse_resampled['first'].values, expected['first'].values)
# Test that the fine dataset has been resampled (aggregated)
# onto the grid of the coarse dataset.
ds_fine_resampled = reg_op(ds_master=ds_coarse, ds_replica=ds_fine)
expected = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([[[0.625, 0.125, 0., 0., 0., 0.],
[0.125, 0.5, 0.125, 0., 0., 0.],
[0., 0.125, 0.625, 0., 0., 0.]],
[[0.625, 0.125, 0., 0., 0., 0.],
[0.125, 0.5, 0.125, 0., 0., 0.],
[0., 0.125, 0.625, 0., 0., 0.]]])),
'second': (['time', 'lat', 'lon'], np.array([[[0.625, 0.125, 0., 0., 0., 0.],
[0.125, 0.5, 0.125, 0., 0., 0.],
[0., 0.125, 0.625, 0., 0., 0.]],
[[0.625, 0.125, 0., 0., 0., 0.],
[0.125, 0.5, 0.125, 0., 0., 0.],
[0., 0.125, 0.625, 0., 0., 0.]]])),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2])})
assert_almost_equal(ds_fine_resampled['first'].values, expected['first'].values)
def test_error(self):
"""
Test error conditions
"""
# Test unexpected global bounds
ds_fine = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'lat': np.linspace(67.5, 135, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'time': np.array([1, 2])})
ds_coarse = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2])})
with self.assertRaises(ValueError) as err:
coregister(ds_fine, ds_coarse)
self.assertIn('(67.5, 135.0)', str(err.exception))
# Test non-equidistant dataset
ds_fine = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'lat': [-67.5, -20, 20, 67.5],
'lon': np.linspace(-157.5, 157.5, 8),
'time': np.array([1, 2])})
ds_coarse = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2])})
with self.assertRaises(ValueError) as err:
coregister(ds_fine, ds_coarse)
self.assertIn('not equidistant', str(err.exception))
# Test non-pixel registered dataset
ds_fine = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'time': np.array([1, 2])})
ds_coarse_err = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.zeros([2, 5, 10])),
'second': (['time', 'lat', 'lon'], np.zeros([2, 5, 10])),
'lat': np.linspace(-90, 90, 5),
'lon': np.linspace(-162, 162, 10),
'time': np.array([1, 2])})
with self.assertRaises(ValueError) as err:
coregister(ds_fine, ds_coarse_err)
self.assertIn('not pixel-registered', str(err.exception))
ds_coarse_err = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.zeros([2, 5, 10])),
'second': (['time', 'lat', 'lon'], np.zeros([2, 5, 10])),
'lat': np.linspace(-72, 72, 5),
'lon': np.linspace(-180, 180, 10),
'time': np.array([1, 2])})
with self.assertRaises(ValueError) as err:
coregister(ds_fine, ds_coarse_err)
self.assertIn('not pixel-registered', str(err.exception))
# Test unexpected dimensionality
ds_fine = xr.Dataset({
'first': (['lat', 'longertude'], np.eye(4, 8)),
'second': (['lat', 'longertude'], np.eye(4, 8)),
'lat': np.linspace(-67.5, 67.5, 4),
'longertude': np.linspace(-157.5, 157.5, 8)})
ds_coarse = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2])})
with self.assertRaises(ValueError) as err:
coregister(ds_fine, ds_coarse)
self.assertIn('longertude', str(err.exception))
ds_fine = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'time': np.array([1, 2])})
ds_coarse = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'second': (['time', 'lon'], np.eye(2, 6)),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2])})
with self.assertRaises(ValueError) as err:
coregister(ds_fine, ds_coarse)
self.assertIn('select_var', str(err.exception))
def test_find_intersection(self):
"""
Test the _find_intersection method
"""
# Test =======
# =========
a = np.linspace(0.5, 9.5, 10)
b = np.linspace(5.5, 14.5, 10)
result = _find_intersection(a, b, (0, 15))
self.assertEqual((5, 10), result)
# Test =======
# =========
a = np.linspace(0.5, 9.5, 10)
b = np.linspace(5.5, 14.5, 10)
result = _find_intersection(b, a, (0, 15))
self.assertEqual((5, 10), result)
# Test =======
# ==============
a = np.linspace(5.5, 14.5, 10)
b = np.linspace(0.5, 19.5, 20)
result = _find_intersection(a, b, (0, 20))
self.assertEqual((5, 15), result)
# Test ==================
# ========
a = np.linspace(0.5, 19.5, 20)
b = np.linspace(5.5, 14.5, 10)
result = _find_intersection(a, b, (0, 20))
self.assertEqual((5, 15), result)
# Test ============
# ========
a = np.linspace(0.5, 9.5, 10)
b = np.linspace(10.5, 19.5, 10)
with self.assertRaises(ValueError) as err:
_find_intersection(a, b, (0, 20))
self.assertIn('valid intersection', str(err.exception))
# Test ============
# ========
a = np.linspace(0.5, 9.5, 10)
b = np.linspace(10.5, 19.5, 10)
with self.assertRaises(ValueError) as err:
_find_intersection(b, a, (0, 20))
self.assertIn('valid intersection', str(err.exception))
# Test misaligned origins
a = np.linspace(0.5, 9.5, 10)
b = np.linspace(1, 9, 10)
with self.assertRaises(ValueError) as err:
_find_intersection(a, b, (0, 10))
self.assertIn('valid intersection', str(err.exception))
# Test differing pixel sizes
a = np.linspace(0.5, 9.5, 10)
b = np.linspace(5.25, 14.75, 20)
result = _find_intersection(b, a, (0, 20))
self.assertEqual((5, 10), result)
def test_subset(self):
"""
Test coregistration being run on a subset
"""
ds_fine = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)])),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'time': np.array([1, 2])})
ds_coarse = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2])})
lat_slice = slice(-70, 70)
lon_slice = slice(-40, 40)
ds_coarse = ds_coarse.sel(lat=lat_slice, lon=lon_slice)
# Test that the coarse dataset has been resampled onto the grid
# of the finer dataset.
ds_coarse_resampled = coregister(ds_fine, ds_coarse)
assert_array_equal([-67.5, -22.5, 22.5, 67.5], ds_coarse_resampled.lat.values)
assert_array_equal([-22.5, 22.5],
ds_coarse_resampled.lon.values)
# Check if the geospatial attributes have been correctly set
self.assertEqual(ds_coarse_resampled.lat.values[0] - 45 * 0.5,
ds_coarse_resampled.attrs['geospatial_lat_min'])
self.assertEqual(ds_coarse_resampled.lat.values[-1] + 45 * 0.5,
ds_coarse_resampled.attrs['geospatial_lat_max'])
self.assertEqual(ds_coarse_resampled.lon.values[0] - 45 * 0.5,
ds_coarse_resampled.attrs['geospatial_lon_min'])
self.assertEqual(ds_coarse_resampled.lon.values[-1] + 45 * 0.5,
ds_coarse_resampled.attrs['geospatial_lon_max'])
self.assertEqual(45.0,
ds_coarse_resampled.attrs['geospatial_lat_resolution'])
self.assertEqual(45.0,
ds_coarse_resampled.attrs['geospatial_lon_resolution'])
def test_recursive(self):
"""
Test coregistration with more dimensions than lat/lon/time
"""
slice_fine = np.eye(4, 8)
slice_coarse = np.eye(3, 6)
ndarr_fine = np.zeros([2, 2, 2, 4, 8])
ndarr_coarse = np.zeros([2, 2, 2, 3, 6])
ndarr_fine_l1 = np.zeros([2, 2, 4, 8])
ndarr_coarse_l1 = np.zeros([2, 2, 3, 6])
ndarr_fine_l2 = np.zeros([2, 2, 4, 8])
ndarr_coarse_l2 = np.zeros([2, 2, 3, 6])
ndarr_fine[:] = slice_fine
ndarr_coarse[:] = slice_coarse
ndarr_fine_l1[:] = slice_fine
ndarr_coarse_l1[:] = slice_coarse
ndarr_fine_l2[:] = slice_fine
ndarr_coarse_l2[:] = slice_coarse
ds_fine = xr.Dataset({
'first': (['time', 'layer', 'layer2', 'lat', 'lon'], ndarr_fine),
'second': (['time', 'layer', 'layer2', 'lat', 'lon'], ndarr_fine),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'layer': np.array([1, 2]),
'layer2': np.array([1, 2]),
'time': np.array([1, 2])}).chunk(chunks={'lat': 2, 'lon': 4})
ds_coarse = xr.Dataset({
'first': (['time', 'layer', 'layer2', 'lat', 'lon'], ndarr_coarse),
'second': (['time', 'layer', 'layer2', 'lat', 'lon'], ndarr_coarse),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2]),
'layer': np.array([1, 2]),
'layer2': np.array([1, 2])}).chunk(chunks={'lat': 3, 'lon': 3})
# Test that the coarse dataset has been resampled onto the grid
# of the finer dataset.
rm = RecordingMonitor()
ds_coarse_resampled = coregister(ds_fine, ds_coarse, monitor=rm)
self.assertEqual([('start', 'coregister dataset', 2),
('progress', 0.0, 'coregister dataarray', 0),
('progress', 0.0, 'coregister dataarray: resample slice', 0),
('progress', 0.03125, None, 2),
('progress', 0.03125, None, 3),
('progress', 0.03125, None, 5),
('progress', 0.03125, None, 6),
('progress', 0.0, 'coregister dataarray: resample slice', 6),
('progress', 0.0, 'coregister dataarray: resample slice', 6),
('progress', 0.03125, None, 8),
('progress', 0.03125, None, 9),
('progress', 0.03125, None, 11),
('progress', 0.03125, None, 13),
('progress', 0.0, 'coregister dataarray: resample slice', 13),
('progress', 0.0, 'coregister dataarray: resample slice', 13),
('progress', 0.03125, None, 14),
('progress', 0.03125, None, 16),
('progress', 0.03125, None, 17),
('progress', 0.03125, None, 19),
('progress', 0.0, 'coregister dataarray: resample slice', 19),
('progress', 0.0, 'coregister dataarray: resample slice', 19),
('progress', 0.03125, None, 20),
('progress', 0.03125, None, 22),
('progress', 0.03125, None, 23),
('progress', 0.03125, None, 25),
('progress', 0.0, 'coregister dataarray: resample slice', 25),
('progress', 0.0, 'coregister dataarray: resample slice', 25),
('progress', 0.03125, None, 27),
('progress', 0.03125, None, 28),
('progress', 0.03125, None, 30),
('progress', 0.03125, None, 31),
('progress', 0.0, 'coregister dataarray: resample slice', 31),
('progress', 0.0, 'coregister dataarray: resample slice', 31),
('progress', 0.03125, None, 33),
('progress', 0.03125, None, 34),
('progress', 0.03125, None, 36),
('progress', 0.03125, None, 38),
('progress', 0.0, 'coregister dataarray: resample slice', 38),
('progress', 0.0, 'coregister dataarray: resample slice', 38),
('progress', 0.03125, None, 39),
('progress', 0.03125, None, 41),
('progress', 0.03125, None, 42),
('progress', 0.03125, None, 44),
('progress', 0.0, 'coregister dataarray: resample slice', 44),
('progress', 0.0, 'coregister dataarray: resample slice', 44),
('progress', 0.03125, None, 45),
('progress', 0.03125, None, 47),
('progress', 0.03125, None, 48),
('progress', 0.03125, None, 50),
('progress', 0.0, 'coregister dataarray: resample slice', 50),
('progress', 0.0, 'coregister dataarray', 50),
('progress', 0.0, 'coregister dataarray', 50),
('progress', 0.0, 'coregister dataarray: resample slice', 50),
('progress', 0.03125, None, 52),
('progress', 0.03125, None, 53),
('progress', 0.03125, None, 55),
('progress', 0.03125, None, 56),
('progress', 0.0, 'coregister dataarray: resample slice', 56),
('progress', 0.0, 'coregister dataarray: resample slice', 56),
('progress', 0.03125, None, 58),
('progress', 0.03125, None, 59),
('progress', 0.03125, None, 61),
('progress', 0.03125, None, 63),
('progress', 0.0, 'coregister dataarray: resample slice', 63),
('progress', 0.0, 'coregister dataarray: resample slice', 63),
('progress', 0.03125, None, 64),
('progress', 0.03125, None, 66),
('progress', 0.03125, None, 67),
('progress', 0.03125, None, 69),
('progress', 0.0, 'coregister dataarray: resample slice', 69),
('progress', 0.0, 'coregister dataarray: resample slice', 69),
('progress', 0.03125, None, 70),
('progress', 0.03125, None, 72),
('progress', 0.03125, None, 73),
('progress', 0.03125, None, 75),
('progress', 0.0, 'coregister dataarray: resample slice', 75),
('progress', 0.0, 'coregister dataarray: resample slice', 75),
('progress', 0.03125, None, 77),
('progress', 0.03125, None, 78),
('progress', 0.03125, None, 80),
('progress', 0.03125, None, 81),
('progress', 0.0, 'coregister dataarray: resample slice', 81),
('progress', 0.0, 'coregister dataarray: resample slice', 81),
('progress', 0.03125, None, 83),
('progress', 0.03125, None, 84),
('progress', 0.03125, None, 86),
('progress', 0.03125, None, 88),
('progress', 0.0, 'coregister dataarray: resample slice', 88),
('progress', 0.0, 'coregister dataarray: resample slice', 88),
('progress', 0.03125, None, 89),
('progress', 0.03125, None, 91),
('progress', 0.03125, None, 92),
('progress', 0.03125, None, 94),
('progress', 0.0, 'coregister dataarray: resample slice', 94),
('progress', 0.0, 'coregister dataarray: resample slice', 94),
('progress', 0.03125, None, 95),
('progress', 0.03125, None, 97),
('progress', 0.03125, None, 98),
('progress', 0.03125, None, 100),
('progress', 0.0, 'coregister dataarray: resample slice', 100),
('progress', 0.0, 'coregister dataarray', 100),
('done',)], rm.records)
slice_exp = np.array([[1., 0.28571429, 0., 0., 0., 0., 0., 0.],
[0.33333333, 0.57142857, 0.38095238, 0., 0., 0., 0., 0.],
[0., 0.47619048, 0.52380952, 0.28571429, 0.04761905, 0., 0., 0.],
[0., 0., 0.42857143, 0.85714286, 0.14285714, 0., 0., 0.]])
ndarr_fine_exp = np.zeros([2, 2, 2, 4, 8])
ndarr_fine_exp[:] = slice_exp
expected = xr.Dataset({
'first': (['time', 'layer', 'layer2', 'lat', 'lon'], ndarr_fine_exp),
'second': (['time', 'layer', 'layer2', 'lat', 'lon'], ndarr_fine_exp),
'layer': np.array([1, 2]),
'layer2': np.array([1, 2]),
'time': np.array([1, 2])})
assert_almost_equal(ds_coarse_resampled['first'].values, expected['first'].values)
# Test that the fine dataset has been resampled (aggregated)
# onto the grid of the coarse dataset.
ds_fine_resampled = coregister(ds_coarse, ds_fine)
slice_exp = np.array([[0.625, 0.125, 0., 0., 0., 0.],
[0.125, 0.5, 0.125, 0., 0., 0.],
[0., 0.125, 0.625, 0., 0., 0.]])
ndarr_coarse_exp = np.zeros([2, 2, 2, 3, 6])
ndarr_coarse_exp[:] = slice_exp
expected = xr.Dataset({
'first': (['time', 'layer', 'layer2', 'lat', 'lon'], ndarr_coarse_exp),
'second': (['time', 'layer', 'layer2', 'lat', 'lon'], ndarr_coarse_exp),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'layer': np.array([1, 2]),
'layer2': np.array([1, 2]),
'time': np.array([1, 2])})
assert_almost_equal(ds_fine_resampled['first'].values, expected['first'].values)
# Test that coregistering with data arrays with less than all possible
# dimensions works
ds_fine = xr.Dataset({
'first': (['time', 'layer', 'lat', 'lon'], ndarr_fine_l1),
'second': (['time', 'layer2', 'lat', 'lon'], ndarr_fine_l2),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'layer': np.array([1, 2]),
'layer2': np.array([1, 2]),
'time': np.array([1, 2])}).chunk(chunks={'lat': 2, 'lon': 4})
ds_coarse = xr.Dataset({
'first': (['time', 'layer', 'lat', 'lon'], ndarr_coarse_l1),
'second': (['time', 'layer2', 'lat', 'lon'], ndarr_coarse_l2),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2]),
'layer': np.array([1, 2]),
'layer2': np.array([1, 2])}).chunk(chunks={'lat': 3, 'lon': 3})
ds_fine_resampled = coregister(ds_coarse, ds_fine)
ndarr_coarse_exp = np.zeros([2, 2, 3, 6])
ndarr_coarse_exp[:] = slice_exp
expected = xr.Dataset({
'first': (['time', 'layer', 'lat', 'lon'], ndarr_coarse_exp),
'second': (['time', 'layer2', 'lat', 'lon'], ndarr_coarse_exp),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'layer': np.array([1, 2]),
'layer2': np.array([1, 2]),
'time': np.array([1, 2])})
assert_almost_equal(ds_fine_resampled['first'].values, expected['first'].values)
def test_2D(self):
"""
Test a case where a 2D lat/lon dataset is resampled or used for
resampling
"""
# Master dataset is 2D
ds_fine = xr.Dataset({
'first': (['lat', 'lon'], np.eye(4, 8)),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8)}).chunk()
ds_coarse = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'second': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2])}).chunk(chunks={'lat': 3, 'lon': 3})
ds_coarse_resampled = coregister(ds_fine, ds_coarse)
slice_exp = np.array([[1., 0.28571429, 0., 0., 0., 0., 0., 0.],
[0.33333333, 0.57142857, 0.38095238, 0., 0., 0., 0., 0.],
[0., 0.47619048, 0.52380952, 0.28571429, 0.04761905, 0., 0., 0.],
[0., 0., 0.42857143, 0.85714286, 0.14285714, 0., 0., 0.]])
exp_arr = np.zeros([2, 4, 8])
exp_arr[:] = slice_exp
expected = xr.Dataset({
'first': (['time', 'lat', 'lon'], exp_arr),
'second': (['time', 'lat', 'lon'], exp_arr),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'time': np.array([1, 2])})
assert_almost_equal(ds_coarse_resampled['first'].values, expected['first'].values)
# replica dataset contains a 2D variable
ds_coarse = xr.Dataset({
'first': (['lat', 'lon'], np.eye(3, 6)),
'second': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)])),
'lat': np.linspace(-60, 60, 3),
'lon': np.linspace(-150, 150, 6),
'time': np.array([1, 2])}).chunk(chunks={'lat': 3, 'lon': 3})
ds_coarse_resampled = coregister(ds_fine, ds_coarse)
assert_almost_equal(ds_coarse_resampled['first'].values, slice_exp)
def test_int_array(self):
"""
Test coregistration on integer arrays
"""
ds_fine = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8)], dtype='int32')),
'second': (['time', 'lat', 'lon'], np.array([np.eye(4, 8), np.eye(4, 8,)])),
'lat': np.linspace(-67.5, 67.5, 4),
'lon': np.linspace(-157.5, 157.5, 8),
'time': np.array([1, 2])}).chunk(chunks={'lat': 2, 'lon': 4})
ds_coarse = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.array([np.eye(3, 6), np.eye(3, 6)], dtype='int32')),
'second': (['time', 'lat', 'lon'], np.array([np.eye(3, 6),
|
np.eye(3, 6)
|
numpy.eye
|
import sys
import numpy as np
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from statsmodels.tsa.seasonal import seasonal_decompose
import argparse
import csv
shape_i = 0
baseline_range = {'start':3, 'end':24}
crisis_range = {'start':24}
#{{{
def get_ws(scores, header):
wd = []
we = []
for i in range(len(header)):
timepoint = header[i].split(' ')
day = timepoint[1]
time = timepoint[-1]
if day in ['Saturday', 'Sunday']:
we.append(scores[i])
else:
wd.append(scores[i])
wd_u =
|
np.mean(wd)
|
numpy.mean
|
import matplotlib
matplotlib.use('Agg') # check this
import matplotlib.pyplot as plt
import numpy as np
import vaex
import pandas as pd
import os
from glob import glob
from astrom_common import Alignment, plotalignment
from astrometry.util.fits import fits_table
from astrometry.util.plotutils import *
from astrometry.libkd.spherematch import match_radec
os.environ['NUMEXPR_MAX_THREADS'] = '8'
def check_results(fns, tag):
def get_field(ds, col):
return ds.evaluate(ds[col.upper()])
rr = []
dd = []
for fn in fns:
df = pd.read_hdf(fn, key='data')
ds = vaex.from_pandas(df)
print(len(ds), 'rows')
ra = get_field(ds, 'ra')
dec = get_field(ds, 'dec')
rr.append(ra)
dd.append(dec)
rr = np.hstack(rr)
dd = np.hstack(dd)
print('Total of', len(rr), 'stars')
T = fits_table()
T.ra = rr
T.dec = dd
T.writeto('all-rd-%s.fits' % tag)
plothist(rr, dd, 500)
plt.xlabel('RA (deg)')
plt.ylabel('Dec (deg)')
plt.savefig('all-radec-%s.png' % tag)
I,J,d = match_radec(rr, dd, rr, dd, 0.2/3600, notself=True)
plt.clf()
plt.hist(d * 3600. * 1000., bins=50)
plt.xlabel('Distance between stars (milli-arcsec)')
plt.savefig('all-dists-%s.png' % tag)
def check_results_2(tag):
T = fits_table('all-rd-%s.fits' % tag)
I,J,d = match_radec(T.ra, T.dec, T.ra, T.dec, 0.2/3600, notself=True)
plt.clf()
plt.hist(d * 3600. * 1000., bins=50)
plt.xlabel('Distance between stars (milli-arcsec)')
plt.savefig('all-dists-%s.png' % tag)
plt.clf()
plt.hist(d * 3600. * 1000., bins=50, log=True)
plt.xlabel('Distance between stars (milli-arcsec)')
plt.savefig('all-dists-log-%s.png' % tag)
def apply_alignments(aff_fn, corners_fn, infns, pandas=True):
from astrom_common import Affine
T = fits_table(aff_fn)
affs = Affine.fromTable(T)
print('Read affines:', affs)
ibright = dict([(fn.strip(),i) for i,fn in enumerate(T.filenames)])
corners = {}
for line in open(corners_fn).readlines():
line = line.strip()
words = line.split()
ras = np.array([float(words[i]) for i in [1,3,5,7]])
decs = np.array([float(words[i]) for i in [2,4,6,8]])
corners[words[0]] = (ras,decs)
from astrometry.util.miscutils import point_in_poly
#fns1 = glob('data/M31-*ST/proc_default/M31-*ST.phot.hdf5')
#fns2 = glob('data/M31-*ST/M31-*ST.phot.hdf5')
#fns1.sort()
#fns2.sort()
#fns = fns1 + fns2
fns = infns
print('Files:', fns)
veto_polys = []
for photfile in fns:
basename = os.path.basename(photfile)
basename = basename.replace('.phot.hdf5', '')
print('Base name:', basename)
corner = corners[basename]
ras,decs = corner
poly = np.vstack((ras, decs)).T
outfn2 = 'cut-%s.hdf5' % basename
if os.path.exists(outfn2):
print('File', outfn2, 'exists; skipping')
veto_polys.append(poly)
continue
brightfn = basename + '-bright.fits'
ii = ibright[brightfn]
aff = affs[ii]
print('Reading', photfile)
if pandas:
df = pd.read_hdf(photfile, key='data')
ds = vaex.from_pandas(df)
else:
ds = vaex.open(photfile)
def get_field(ds, col):
if pandas:
return ds.evaluate(ds[col])
else:
return ds.evaluate(ds[col.upper()])
print(len(ds), 'rows')
ra = get_field(ds, 'ra')
dec = get_field(ds, 'dec')
ra,dec = aff.apply(ra, dec)
Tleft = fits_table()
Tleft.ra = ra
Tleft.dec = dec
Tleft.index = np.arange(len(Tleft))
inside = point_in_poly(Tleft.ra, Tleft.dec, poly)
print(np.sum(inside), 'of', len(Tleft), 'inside corners of this half-brick')
inside_veto = np.zeros(len(Tleft), bool)
for vp in veto_polys:
inveto = point_in_poly(Tleft.ra, Tleft.dec, vp)
inside_veto[inveto] = True
print(np.sum(inside_veto), 'stars are inside the corners of previous half-bricks')
print('inside:', type(inside), inside.dtype)
inside[inside_veto] = False
print(np.sum(inside), 'stars are uniquely in this half-brick')
veto_polys.append(poly)
outfn = 'out/out-%s.hdf5' % basename
if pandas:
df[inside].to_hdf(outfn, key='data', mode='w',
format='table', complevel=9, complib='zlib')
else:
df = ds.take(np.flatnonzero(inside)).to_pandas_df()
df.to_hdf(outfn, key='data', mode='w',
format='table', complevel=9, complib='zlib')
print('Wrote', outfn)
outfn = 'cut/cut-%s.hdf5' % basename
if pandas:
df[np.logical_not(inside)].to_hdf(outfn, key='data', mode='w',
format='table', complevel=9, complib='zlib')
else:
df = ds.take(np.flatnonzero(np.logical_not(inside))).to_pandas_df()
df.to_hdf(outfn, key='data', mode='w',
format='table', complevel=9, complib='zlib')
print('Wrote', outfn)
def to_fits(fns, pandas=True):
print('Files:', fns)
plt.clf()
outfns = []
for photfile in fns:
#photfile like 'data/M31-B23-WEST/M31-B23-WEST.phot.hdf5'
print()
print(photfile)
basename = os.path.basename(photfile)
basename = basename.replace('.phot.hdf5', '')
print('Base name:', basename)
outfn = basename + '-bright.fits'
outfns.append(outfn)
if os.path.exists(outfn):
print('Exists:', outfn)
st_in = os.stat(photfile)
st_out = os.stat(outfn)
print('Timestamps: in', st_in.st_mtime, 'out', st_out.st_mtime)
if st_out.st_mtime > st_in.st_mtime:
continue
print('Input file is newer!')
basename = basename.replace('_', '-')
words = basename.split('-')
assert(len(words) == 3)
galaxy = words[0]
assert(galaxy.startswith('M'))
brick = words[1]
assert(brick[0] == 'B')
brick = int(brick[1:], 10)
print('Brick number:', brick)
dirn = words[2]
#ew = words[2]
assert(dirn in ['EAST', 'WEST', 'NW','NN','NE','SW','SS','SE'])
#east = (ew == 'EAST')
if pandas:
df = pd.read_hdf(photfile, key='data')
ds = vaex.from_pandas(df)
else:
ds = vaex.open(photfile)
print('Read', photfile)
#print(ds)
def get_field(ds, col):
if pandas:
return ds.evaluate(ds[col])
else:
return ds.evaluate(ds[col.upper()])
print(len(ds), 'rows')
if 'f814w_gst' in ds:
good = get_field(ds, 'f814w_gst')
print(len(ds), 'rows')
#print('good:', good.dtype)
from collections import Counter
print('good:', Counter(good))
#print('ds:', ds.dtype)
#ds = ds[good]
#ds = ds[np.flatnonzero(good)]
ds = ds.take(np.flatnonzero(good))
#print('ds:', ds)
print(len(ds), 'gst on F814W')
else:
ds.select('(F814W_SNR > 4) & (F814W_SHARP**2 < 0.2)', name='F814W_ST')
ds.select('F814W_ST & (F814W_CROWD < 2.25)', name='F814W_GST')
ds = ds[ds['F814W_GST']]
print(len(ds), 'gst on F814W')
# good = ds.evaluate(ds['f475w_gst'])
# print(good)
# print(len(good))
# print(type(good))
# print(good.dtype)
# print('Of those,', np.sum(ds.evaluate(ds['f475w_gst'])), 'are F475W_GST')
# print('Of those,', np.sum(ds.evaluate(ds['f336w_gst'])), 'are F336W_GST')
# print('Of those,', np.sum(ds.evaluate(ds['f275w_gst'])), 'are F275W_GST')
# print('Of those,', np.sum(ds.evaluate(ds['f110w_gst'])), 'are F110W_GST')
# print('Of those,', np.sum(ds.evaluate(ds['f160w_gst'])), 'are F160W_GST')
mag = get_field(ds, 'f814w_vega')
print('Of', len(mag), 'mags,', np.sum(np.isfinite(mag)), 'are finite')
print('range:', np.nanmin(mag), np.nanmax(mag))
plt.hist(mag[np.isfinite(mag)], range=(20, 28), bins=50, label=basename)
with np.errstate(invalid='ignore'):
print('ds', ds)
if pandas:
#ds = ds[mag < 24]
ds = ds.take(np.flatnonzero(mag < 24))
else:
ds = ds[ds['F814W_VEGA'] < 24]
#ds = ds.take(np.flatnonzero(ds['F814W_VEGA'] < 24))
print('ds cut', ds)
print(len(ds), 'with F814W < 24')
mag = get_field(ds, 'f814w_vega')
xx = get_field(ds, 'x')
yy = get_field(ds, 'y')
xlo = xx.min()
xhi = xx.max()
ylo = yy.min()
yhi = yy.max()
nx = int(np.round((xhi - xlo) / 1000.)) + 1
xbins = np.linspace(xlo, xhi, nx)
ny = int(np.round((yhi - ylo) / 1000.)) + 1
ybins = np.linspace(ylo, yhi, ny)
print('x bins', xbins)
print('y bins', ybins)
xbin = np.digitize(xx, xbins)
ybin = np.digitize(yy, ybins)
xybin = ybin * nx + xbin
nbins = nx * ny
print('N bins:', nbins)
nperbin = int(np.ceil(100000. / nbins))
II = []
for ibin in range(nbins):
I = np.flatnonzero(xybin == ibin)
if len(I) == 0:
continue
Ibright = np.argsort(mag[I])[:nperbin]
II.append(I[Ibright])
II = np.hstack(II)
#I = np.argsort(mag)
#I = I[:100000]
#print('100k-th star: mag', mag[I[-1]])
ds = ds.take(II)
cols = ['ra','dec','x', 'y']
if pandas:
cols.append('index')
T = fits_table()
for col in cols:
T.set(col, get_field(ds, col))
for filt in [814, 475, 336, 275, 110, 160]:
for col in ['f%iw_vega']:
colname = col % filt
T.set(colname, get_field(ds, colname))
T.galaxy = np.array([galaxy] * len(T))
T.brick = np.zeros(len(T), np.uint8) + brick
#T.east = np.zeros(len(T), bool)
#T.east[:] = east
T.dirn = np.array([dirn] * len(T))
T.writeto(outfn)
plt.legend()
plt.xlabel('F814W mag')
plt.savefig('mags.png')
return outfns
def find_alignments(fns, wcsfns, gaia_fn, aff_fn, aligned_fn):
from astrometry.libkd.spherematch import tree_build_radec, trees_match
from astrometry.libkd.spherematch import match_radec
from astrometry.util.plotutils import plothist
from astrometry.util.util import Tan
import fitsio
from astrom_common import getwcsoutline
from singles import find_overlaps
if True:
WCS = []
for fn in wcsfns:
wcs = Tan(fn)
WCS.append(wcs)
names = [fn.replace('-bright.fits', '') for fn in fns]
outlines = [getwcsoutline(wcs) for wcs in WCS]
overlaps,areas = find_overlaps(outlines)
print('Reading tables...')
TT = [fits_table(fn) for fn in fns]
print('Building trees...')
kds = [tree_build_radec(T.ra, T.dec) for T in TT]
for T,name in zip(TT, names):
T.name = np.array([name]*len(T))
allra = np.hstack([T.ra for T in TT])
alldec = np.hstack([T.dec for T in TT])
minra = np.min(allra)
maxra = np.max(allra)
mindec = np.min(alldec)
maxdec = np.max(alldec)
print('RA,Dec range:', minra, maxra, mindec, maxdec)
plothist(allra, alldec)
plt.axis([maxra, minra, mindec, maxdec])
plt.xlabel('RA (deg)')
plt.ylabel('Dec (deg)')
plt.savefig('match-all.png')
Tref = fits_table(gaia_fn)
r_arcsec = 0.2
I,J,d = match_radec(Tref.ra, Tref.dec, allra, alldec, r_arcsec/3600.)
dec = alldec[J]
cosdec = np.cos(
|
np.deg2rad(dec)
|
numpy.deg2rad
|
"""flatsys_test.py - test flat system module
RMM, 29 Jun 2019
This test suite checks to make sure that the basic functions supporting
differential flat systetms are functioning. It doesn't do exhaustive
testing of operations on flat systems. Separate unit tests should be
created for that purpose.
"""
from distutils.version import StrictVersion
import numpy as np
import pytest
import scipy as sp
import control as ct
import control.flatsys as fs
import control.optimal as opt
class TestFlatSys:
"""Test differential flat systems"""
@pytest.mark.parametrize(
"xf, uf, Tf",
[([1, 0], [0], 2),
([0, 1], [0], 3),
([1, 1], [1], 4)])
def test_double_integrator(self, xf, uf, Tf):
# Define a second order integrator
sys = ct.StateSpace([[-1, 1], [0, -2]], [[0], [1]], [[1, 0]], 0)
flatsys = fs.LinearFlatSystem(sys)
# Define the basis set
poly = fs.PolyFamily(6)
x1, u1, = [0, 0], [0]
traj = fs.point_to_point(flatsys, Tf, x1, u1, xf, uf, basis=poly)
# Verify that the trajectory computation is correct
x, u = traj.eval([0, Tf])
np.testing.assert_array_almost_equal(x1, x[:, 0])
np.testing.assert_array_almost_equal(u1, u[:, 0])
np.testing.assert_array_almost_equal(xf, x[:, 1])
np.testing.assert_array_almost_equal(uf, u[:, 1])
# Simulate the system and make sure we stay close to desired traj
T = np.linspace(0, Tf, 100)
xd, ud = traj.eval(T)
t, y, x = ct.forced_response(sys, T, ud, x1, return_x=True)
np.testing.assert_array_almost_equal(x, xd, decimal=3)
@pytest.fixture
def vehicle_flat(self):
"""Differential flatness for a kinematic car"""
def vehicle_flat_forward(x, u, params={}):
b = params.get('wheelbase', 3.) # get parameter values
zflag = [np.zeros(3), np.zeros(3)] # list for flag arrays
zflag[0][0] = x[0] # flat outputs
zflag[1][0] = x[1]
zflag[0][1] = u[0] * np.cos(x[2]) # first derivatives
zflag[1][1] = u[0] * np.sin(x[2])
thdot = (u[0]/b) * np.tan(u[1]) # dtheta/dt
zflag[0][2] = -u[0] * thdot * np.sin(x[2]) # second derivatives
zflag[1][2] = u[0] * thdot * np.cos(x[2])
return zflag
def vehicle_flat_reverse(zflag, params={}):
b = params.get('wheelbase', 3.) # get parameter values
x = np.zeros(3); u = np.zeros(2) # vectors to store x, u
x[0] = zflag[0][0] # x position
x[1] = zflag[1][0] # y position
x[2] = np.arctan2(zflag[1][1], zflag[0][1]) # angle
u[0] = zflag[0][1] * np.cos(x[2]) + zflag[1][1] * np.sin(x[2])
thdot_v = zflag[1][2] *
|
np.cos(x[2])
|
numpy.cos
|
import numpy as np
import utils
from scipy.special import gammaln, psi
from formatted_logger import formatted_logger
eps = 1e-20
log = formatted_logger('RTM', 'info')
class rtm:
""" implementation of relational topic model by <NAME> Blei (2009)
I implemented the exponential link probability function in here
"""
def __init__(self, num_topic, num_doc, num_voca, doc_ids, doc_cnt, doc_links, rho):
self.D = num_doc
self.K = num_topic
self.V = num_voca
self.alpha = .1
self.gamma = np.random.gamma(100., 1./100, [self.D, self.K])
self.beta = np.random.dirichlet([5]*self.V, self.K)
self.nu = 0
self.eta = np.random.normal(0.,1, self.K)
self.phi = list()
self.pi = np.zeros([self.D, self.K])
for di in xrange(self.D):
unique_word = len(doc_ids[di])
cnt = doc_cnt[di]
self.phi.append(np.random.dirichlet([10]*self.K, unique_word).T) # list of KxW
self.pi[di,:] = np.sum(cnt*self.phi[di],1)/np.sum(cnt*self.phi[di])
self.doc_ids = doc_ids
self.doc_cnt = doc_cnt
self.doc_links = doc_links
self.rho = rho #regularization parameter
log.info('Initialize RTM: num_voca:%d, num_topic:%d, num_doc:%d' % (self.V,self.K,self.D))
def posterior_inference(self, max_iter):
for iter in xrange(max_iter):
self.variation_update()
self.parameter_estimation()
log.info('%d iter: ELBO = %.3f' % (iter, self.compute_elbo()))
def compute_elbo(self):
""" compute evidence lower bound for trained model
"""
elbo = 0
e_log_theta = psi(self.gamma) - psi(np.sum(self.gamma, 1))[:,np.newaxis] # D x K
log_beta = np.log(self.beta+eps)
for di in xrange(self.D):
words = self.doc_ids[di]
cnt = self.doc_cnt[di]
elbo += np.sum(cnt * (self.phi[di] * log_beta[:,words])) # E_q[log p(w_{d,n}|\beta,z_{d,n})]
elbo += np.sum((self.alpha - 1.)*e_log_theta[di,:]) # E_q[log p(\theta_d | alpha)]
elbo += np.sum(self.phi[di].T * e_log_theta[di,:]) # E_q[log p(z_{d,n}|\theta_d)]
elbo += -gammaln(np.sum(self.gamma[di,:])) + np.sum(gammaln(self.gamma[di,:])) \
- np.sum((self.gamma[di,:] - 1.)*(e_log_theta[di,:])) # - E_q[log q(theta|gamma)]
elbo += - np.sum(cnt * self.phi[di] *
|
np.log(self.phi[di])
|
numpy.log
|
# Sample code from the TorchVision 0.3 Object Detection Finetuning Tutorial
# http://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
import os
import numpy as np
import torch
from PIL import Image
import pickle
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from engine import train_one_epoch, evaluate
import utils
import transforms as T
def get_model_instance_segmentation(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
return T.Compose(transforms)
img = Image.open("credit/JPEGImages/image_1.jpg").convert("RGB")
mask = Image.open("credit/SegmentationObjectPNG/image_1.png")
mask = np.array(mask)
print(mask.shape)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = mask == obj_ids[:, None, None]
print(masks.shape)
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin =
|
np.min(pos[0])
|
numpy.min
|
#--> This code was developed using Anaconda3 installed as
#--> administrator and with PATH option selected during the install so
#--> that python can be used from a Windows command line. NOTE: to
#--> get command line arguments passed, which is essential for this
#--> code, you need need to edit registry to make
# Computer\HKEY_CLASSES_ROOT\Applications\python.exe\shell\open\command
# "C:\ProgramData\Anaconda3\python.exe" "%1" %*
# Thanks to
# https://stackoverflow.com/questions/29540541/executable-python-script-not-take-sys-argv-in-windows
# Alternately, you can just call python (with the full path to python
# if needed) and then specify the full path to the module and then the
# module's arguments
import importlib
import sys
import os
import socket
import time
import subprocess
import argparse
import json
import numpy as np
from scipy import signal
from astropy import log
from astropy import wcs
from astropy.io import fits
from astropy import units as u
from astropy.time import Time, TimeDelta
if sys.platform == 'win32':
# --> also check out pythonnet
try:
import win32com.client
except:
log.info('You are missing the win32com.client. This should be in the Anaconda package. MaxIm/telescope control will not work.')
else:
# http://timgolden.me.uk/pywin32-docs/html/com/win32com/HTML/QuickStartClientCom.html
# Use makepy.py -i to poke around in what might be useful
try:
# 'ASCOM Master Interfaces for .NET and COM' constants.
# Use example: win32com.client.constants.shutterOpen
win32com.client.gencache.EnsureModule('{76618F90-032F-4424-A680-802467A55742}', 0, 1, 0)
except:
log.info('ASCOM does not seem to be installed. MaxIm/telescope control will not work.')
else:
try:
# MaxIm constants. The long string is the GUID of MaxIm found by makepy.py
win32com.client.gencache.EnsureModule('{B4955EC7-F7F2-11D2-AA9C-444553540000}', 0, 1, 0)
except:
log.info('MaxIm not found. MaxIm/telescope control will not work.')
else:
log.info('You are not on a Windows system. The MaxIm/telescope control features of this package will not work unless you are on a Windows system.')
import define as D
# --> these are things that eventually I would want to store in a
# --> configuration file
# --> CHANGE ME BACK TO 1s(or 7s) and filter 0 (0.7s or 0.3 on
# --> Vega filter 1 works for day)
default_exptime = 1
default_filt = 0
default_cent_tol = 5 # Pixels
default_guider_exptime = 1 # chage back to 1 for night, 0.2 for day
# In principle, it is possible to use only MaxIm guiding stuff for
# this, alleviating the need for us to connect directly to the
# telescope. In practice, with a GEM and for setting DEC conveniently
# in the guider, MaxImControl really needs to be connected to the scope.
# Since unexpected behavior may result if there is not a hard failure
# when a GEM is not connected, indicate here whether or not you want
# that hard failure
telescope_must_be_connectable = True
# These are necessary for GEMs because MaxIm does not reveal the
# contents of the Camera Control -> Guide Tab -> Settings dialog box
# -> Advanced Tab -> Guider Motor Control radio buttons to the
# scripting interface. As explained in guider_motor_reverse_setup,
# when using ACP or otherwise not having MaxIm connected to the
# telescope, we need to manage motor reversal ourselves
guider_motor_control_reverseX = True
guider_motor_control_reverseY = False
# Misalignment in deg
guider_cal_astrometry_max_misalignment = 10
horizon_limit = 8.5
# --> I may improve this location or the technique of message passing
hostname = socket.gethostname()
if hostname == "snipe" or hostname == "byted":
raw_data_root = '/data/io/IoIO/raw'
elif hostname == "greyhound" or hostname == "gigabyte":
# --> This doesn't work. I need Unc?
#raw_data_root = '//snipe/data/io/IoIO/raw'
raw_data_root = r'\\snipe\data\io\IoIO\raw'
default_telescope = 'ScopeSim.Telescope'
elif socket.gethostname() == "IoIO1U1":
raw_data_root = r'C:\Users\PLANETARY SCIENCE\Desktop\IoIO\data'
# --> Eventually, it would be nice to have this in a chooser
default_telescope = 'AstroPhysicsV2.Telescope'
# For weather synchronization with ACP
ACPUtil = 'ACP.Util'
default_guide_box_command_file = os.path.join(raw_data_root, 'GuideBoxCommand.txt')
default_guide_box_log_file = os.path.join(raw_data_root, 'GuideBoxLog.txt')
run_level_main_astrometry = os.path.join(
raw_data_root, '2021-04_Astrometry/Main_Astrometry_East_of_Pier.fit')
#raw_data_root, '2021-03_Astrometry/Main_Astrometry_East_of_Pier.fit')
#raw_data_root, '2020-09_Astrometry/Main_Astrometry_East_of_Pier.fit')
#raw_data_root, '2020-03_Astrometry/Main_Astrometry_East_of_Pier.fit')
#raw_data_root, '2019-04_Astrometry/Main_Astrometry_East_of_Pier.fit')
#raw_data_root, '2019-04_Astrometry/Main_Astrometry_West_of_Pier.fit')
#raw_data_root, '2019-02_Astrometry/PinPointSolutionEastofPier.fit')
#raw_data_root, '2019-02_Astrometry/PinPointSolutionWestofPier.fit')
#raw_data_root, '2018-04_Astrometry/PinPointSolutionEastofPier.fit')
# --> Currently only guider WestofPier (looking east) works properly,
# --> which might indicate that calculations need to be made with true
# --> north of CCD aligned with true north button on mount. Although
# --> pier flip doesn't affect N/S because tube rolls over too, E/W is
# --> affected
run_level_guider_astrometry = os.path.join(
raw_data_root, '2021-04_Astrometry/Guider_Astrometry_East_of_Pier.fit')
#raw_data_root, '2021-03_Astrometry/Guider_Astrometry_East_of_Pier.fit')
#raw_data_root, '2020-09_Astrometry/Guider_Astrometry_East_of_Pier.fit')
#raw_data_root, '2020-03_Astrometry/Guider_Astrometry_East_of_Pier.fit')
#raw_data_root, '2019-04_Astrometry/Guider_Astrometry_West_of_Pier.fit')
#raw_data_root, '2019-02_Astrometry/GuiderPinPointSolutionWestofPier.fit')
#raw_data_root, '2019-02_Astrometry/GuiderPinPointSolutionEastofPier.fit')
#raw_data_root, '2018-04_Astrometry/GuiderPinPointSolutionWestofPier.fit')
#raw_data_root, '2018-01_Astrometry//GuiderPinPointSolutionEastofPier.fit')
def angle_norm(angle, maxang):
"""Normalize an angle to run up to maxang degrees"""
angle += 360
angle %= 360
if angle > maxang: # handles 180 case
angle -= 360
return angle
def iter_linfit(x, y, max_resid=None):
"""Performs least squares linear fit iteratively to discard bad points
If you actually know the statistical weights on the points,
just use polyfit directly.
"""
# Let polyfit report errors in x and y
coefs = np.polyfit(x, y, 1)
# We are done if we have just two points
if len(x) == 2:
return coefs
# Our first fit may be significantly pulled off by bad
# point(s), particularly if the number of points is small.
# Construct a repeat until loop the Python way with
# while... break to iterate to squeeze bad points out with
# low weights
last_redchi2 = None
iterations = 1
while True:
# Calculate weights roughly based on chi**2, but not going
# to infinity
yfit = x * coefs[0] + coefs[1]
resid = (y - yfit)
if resid.all == 0:
break
# Add 1 to avoid divide by zero error
resid2 = resid**2 + 1
# Use the residual as the variance + do the algebra
redchi2 = np.sum(1/(resid2))
coefs = np.polyfit(x, y, 1, w=1/resid2)
# Converge to a reasonable epsilon
if last_redchi2 and last_redchi2 - redchi2 < np.finfo(float).eps*10:
break
last_redchi2 = redchi2
iterations += 1
# The next level of cleanliness is to exclude any points above
# max_resid from the fit (if specified)
if max_resid is not None:
goodc = np.where(np.abs(resid) < max_resid)
# Where returns a tuple of arrays!
if len(goodc[0]) >= 2:
coefs = iter_linfit(x[goodc], y[goodc])
return coefs
# I am either phasing this out or I could potentially make it work
# with __enter__ and __exit__ for a context manager
def get_HDUList(HDUList_im_or_fname):
"""Returns an astropy.fits.HDUList given a filename, image or
HDUList. If you have a set of HDUs, you'll need to put them
together into an HDUList yourself, since this can't guess how
to do that"""
if isinstance(HDUList_im_or_fname, fits.HDUList):
return HDUList_im_or_fname
elif isinstance(HDUList_im_or_fname, str):
return fits.open(HDUList_im_or_fname)
elif isinstance(HDUList_im_or_fname, np.ndarray):
hdu = fits.PrimaryHDU(HDUList_im_or_fname)
return fits.HDUList(hdu)
else:
raise ValueError('Not a valid input, HDUList_im_or_fname, expecting, fits.HDUList, string, or np.ndarray')
def pier_flip_astrometry(header_in):
"""Adjust FITS astrometry CD* keywords to emulate a pier flip (rotate FOV 180 deg)
header_in : input FITS header
return value : copy of header_in with CD* keywords adjusted"""
header = header_in.copy()
header['CDELT1'] *= -1
header['CDELT2'] *= -1
header['CD1_1'] *= -1
header['CD1_2'] *= -1
header['CD2_1'] *= -1
header['CD2_2'] *= -1
if header.get('PIERSIDE'):
if header['PIERSIDE'] == 'EAST':
header['PIERSIDE'] = 'WEST'
else:
header['PIERSIDE'] = 'EAST'
header['FLIPAPPL'] = (True, 'Artificially flipped pier side')
header['HISTORY'] = 'Artificially flipped pier side, modified CD* and PIERSIDE'
return header
# --> Really what I think I want is a PGData for all of the center and
# --> rate stuff. That will clean up the ObsData property and
# --> __init__
class ObsData():
"""Base class for observations, enabling object centering, etc.
This is intended to work in an active obsering setting, so
generally an image array will be received, the desired properties
will be calculated from it and those properties will be read by
the calling code.
"""
def __init__(self,
HDUList_im_or_fname=None,
desired_center=None,
recalculate=False,
readnoise=5):
if HDUList_im_or_fname is None:
raise ValueError('No HDUList_im_or_fname provided')
self.recalculate = recalculate
self.readnoise = readnoise
# Set up our basic FITS image info
self.header = None
self._binning = None
self._subframe_origin = None
self._HDU_unbinned = None
self._we_opened_file = None
# Keep property for later use/speedy access
self._hist_of_im = None
self._back_level = None
# These are in pixels
self._obj_center = None
self._desired_center = desired_center
if not self._desired_center is None:
self._desired_center = np.asarray(self._desired_center)
# --> Work with these
self.obj_center_err = np.asarray((1.,1.))
self.desired_center_tolerance = np.asarray((5.,5.))
# 0 -- 10 scale indicating quality of obj_center and
# desired_center calculations
self.quality = 0
# astropy time object for calc_flex_pix_rate
self.TRateChange = None
self.Tmidpoint = None
# Amount of guide box motion since first observation
# units=main camera pixels
self.total_flex_dpix = None
# one-time motion, just before exposure
self.delta_pix = None
# Make the guts of __init__ methods that can be overridden
# --> Here is where I would make the division between ObsData
# and PGData. PGData would init the rates and stuff + read
# the ObsData. The ObsData would have a cleanup method that
# otherwise would not be called
# Read our image
self.read_im(HDUList_im_or_fname)
# Populate our object
self.populate_obj()
self.cleanup()
def populate_obj(self):
"""Calculate quantities that will be stored long-term in object"""
# Note that if MaxIm is not configured to write IRAF-complient
# keywords, IMAGETYP gets a little longer and is capitalized
# http://diffractionlimited.com/wp-content/uploads/2016/11/sbfitsext_1r0.pdf
kwd = self.header['IMAGETYP'].upper()
if 'DARK' in kwd or 'BIAS' in kwd or 'FLAT' in kwd:
raise ValueError('Not able to process IMAGETYP = ' + self.header['IMAGETYP'])
# Do our work & leave the results in the property
self.obj_center
self.desired_center
# --> CHANGE ME BACK
self._desired_center = np.asarray((1100, 1150))
def cleanup(self):
"""Close open file, deference large arrays"""
if self._we_opened_file:
self.close_fits()
del self.HDUList
del self._HDU_unbinned
def read_im(self, HDUList_im_or_fname=None):
"""Populate ObsData with HDUList and associated info"""
self.HDUList = get_HDUList(HDUList_im_or_fname)
# Store the original shape of our image so we can do
# coordinate calculations without it
self.oshape = np.asarray(self.HDUList[0].data.shape)
if isinstance(HDUList_im_or_fname, np.ndarray):
# We don't have any metadata
return self.HDUList
# All other options should have HDUList already populated with
# stuff we need. Copy stuff into our local property as needed
if isinstance(HDUList_im_or_fname, str):
self._we_opened_file = True
# Store the header in our object. This is just a
# reference at first, but after HDUList is deleted, this
# becomes the only copy
# https://stackoverflow.com/questions/22069727/python-garbage-collector-behavior-on-compound-objects
self.header = self.HDUList[0].header
# Calculate an astropy Time object for the midpoint of the
# observation for ease of time delta calculations.
# Account for darktime, if available
try:
exptime = self.header.get('DARKTIME')
if exptime is None:
exptime = self.header['EXPTIME']
# Use units to help with astropy.time calculations
exptime *= u.s
self.Tmidpoint = (Time(self.header['DATE-OBS'],
format='fits')
+ exptime/2)
except:
log.warning('Cannot read DARKTIME and/or EXPTIME keywords from FITS header')
try:
# Note Astropy Pythonic transpose Y, X order
self._binning = (self.header['YBINNING'],
self.header['XBINNING'])
self._binning = np.asarray(self._binning)
# This is in binned coordinates
self._subframe_origin = (self.header['YORGSUBF'],
self.header['XORGSUBF'])
self._subframe_origin = np.asarray(self._subframe_origin)
except:
log.warning('Could not read binning or subframe origin from image header. Did you pass a valid MaxIm-recorded image and header? Assuming binning = 1, subframe_origin = 0,0')
self._binning = np.asarray((1,1))
self._subframe_origin = (0,0)
if self.recalculate == True:
# We don't want to use values stored in the file, this
# forces recalculate
return self.HDUList
try:
cx = self.header['OBJ_CR0']
cy = self.header['OBJ_CR1']
self._obj_center = np.asarray((cy, cx))
dx = self.header['DES_CR0']
dy = self.header['DES_CR1']
self._desired_center = np.asarray((dy, dx))
except:
# It was worth a try
pass
return self.HDUList
def unbinned(self, coords):
"""Returns coords referenced to full CCD given internally stored binning/subim info"""
coords = np.asarray(coords)
return np.asarray(self._binning * coords + self._subframe_origin)
def binned(self, coords):
"""Assuming coords are referenced to full CCD, return location in binned coordinates relative to the subframe origin"""
coords = np.asarray(coords)
return np.asarray((coords - self._subframe_origin) / self._binning)
def im_unbinned(self, a):
"""Returns an unbinned version of a. a must be same shape
as the primary HDU image
"""
assert a.shape == self.HDUList[0].data.shape
# Don't bother if we are already unbinned
if np.sum(self._binning) == 2:
return a
newshape = self._binning * a.shape
# From http://scipy-cookbook.readthedocs.io/items/Rebinning.html
assert len(a.shape) == len(newshape)
slices = [ slice(0,old, float(old)/new)
for old,new in zip(a.shape,newshape) ]
coordinates = np.mgrid[slices]
indices = coordinates.astype('i') #choose the biggest smaller integer index
unbinned = a[tuple(indices)]
# Check to see if we need to make a larger array into which to
# plop unbinned array
if np.sum(self._subframe_origin) > 0:
# Note subframe origin reads in binned pixels
origin = self.unbinned(self._subframe_origin)
full_unbinned = np.zeros(origin + unbinned.shape)
full_unbinned[origin[0]:, origin[1]:] = unbinned
unbinned = full_unbinned
return unbinned
@property
def HDU_unbinned(self):
"""Returns an unbinned version of the primary HDU image or the primary HDU image if it is not binned.
"""
if self._HDU_unbinned is not None:
return self._HDU_unbinned
self._HDU_unbinned = self.im_unbinned(self.HDUList[0].data)
return self._HDU_unbinned
def close_fits(self):
if self.HDUList.fileinfo is not None:
self.HDUList.close()
self._we_opened_file = None
def imshow(self, im=None):
if im is None:
im = self.HDUList[0].data
plt.imshow(im)
plt.show()
@property
def obj_center(self):
"""Returns pixel coordinate of the brightests object in the image in
UNBINNED Y, X coordinates. Does basic median filtering to get
rid of cosmic rays. It is assumed this will be overridden
with better object finders, such as one that uses PinPoint
astrometry.
"""
if self._obj_center is not None:
return self._obj_center
# Take the median to get rid of cosmic rays
im = self.HDUList[0].data
im = signal.medfilt(im, kernel_size=3)
im_center = np.unravel_index(
|
np.argmax(im)
|
numpy.argmax
|
import os
import h5py
import numpy as np
import ujson as json
import tensorflow as tf
from tqdm import tqdm
import sys
sys.path.append('../..')
from model import QGModel, QGRLModel
from eval import evaluate, evaluate_simple, convert_tokens, evaluate_rl, format_generated_ques_for_qpc, \
format_generated_ques_for_qa
from LIB.utils import get_batch_dataset, get_dataset, write_metrics, save
from utils import get_record_parser
from QPC.ELMo_QPC.model import QPCModel
from QA.BiDAF_QA.model import BidafQA
def train(config):
with open(config.word_emb_file, "r") as fh:
word_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.pos_emb_file, "r") as fh:
pos_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.ner_emb_file, "r") as fh:
ner_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.label_emb_file, "r") as fh:
label_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.train_eval_file, "r") as fh:
train_eval_file = json.load(fh)
with open(config.dev_eval_file, "r") as fh:
dev_eval_file = json.load(fh)
with open(config.dev_meta, "r") as fh:
meta = json.load(fh)
with open(config.word_dictionary, "r") as fh:
word_dictionary = json.load(fh)
with h5py.File(config.embedding_file, 'r') as fin:
embed_weights = fin["embedding"][...]
elmo_word_mat = np.zeros((embed_weights.shape[0] + 1, embed_weights.shape[1]), dtype=np.float32)
elmo_word_mat[1:, :] = embed_weights
id2word = {word_dictionary[w]: w for w in word_dictionary}
dev_total = meta["total"]
print("Building model...")
parser = get_record_parser(config)
graph = tf.Graph()
best_bleu, best_ckpt = 0., 0
with graph.as_default() as g:
train_dataset = get_batch_dataset(config.train_record_file, parser, config.batch_size)
dev_dataset = get_dataset(config.dev_record_file, parser, config.batch_size)
train_iterator = train_dataset.make_one_shot_iterator()
dev_iterator = dev_dataset.make_one_shot_iterator()
model = QGModel(config, word_mat, elmo_word_mat, label_mat, pos_mat, ner_mat)
model.build_graph()
model.add_train_op()
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
writer = tf.summary.FileWriter(config.output_dir)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=1000,
var_list=[p for p in tf.global_variables() if "word_mat" not in p.name])
if os.path.exists(os.path.join(config.output_dir, "checkpoint")):
saver.restore(sess, tf.train.latest_checkpoint(config.output_dir))
if os.path.exists(config.best_ckpt):
with open(config.best_ckpt, "r") as fh:
best_qg_ckpt = json.load(fh)
best_bleu, best_ckpt = float(best_qg_ckpt["best_bleu"]), int(best_qg_ckpt["best_ckpt"])
global_step = max(sess.run(model.global_step), 1)
train_next_element = train_iterator.get_next()
for _ in tqdm(range(global_step, config.num_steps + 1)):
global_step = sess.run(model.global_step) + 1
para, para_unk, para_char, que, que_unk, que_char, labels, pos_tags, ner_tags, \
que_labels, que_pos_tags, que_ner_tags, y1, y2, qa_id = sess.run(train_next_element)
loss, _ = sess.run([model.loss, model.train_op], feed_dict={
model.para: para, model.para_unk: para_unk, model.que: que,
model.labels: labels, model.pos_tags: pos_tags, model.ner_tags: ner_tags,
model.dropout: config.dropout, model.qa_id: qa_id,
})
if global_step % config.period == 0:
loss_sum = tf.Summary(value=[tf.Summary.Value(
tag="model/loss", simple_value=loss), ])
writer.add_summary(loss_sum, global_step)
if global_step % config.checkpoint == 0:
filename = os.path.join(
config.output_dir, "model_{}.ckpt".format(global_step))
saver.save(sess, filename)
metrics = evaluate_batch(config, model, config.val_num_batches, train_eval_file, sess,
train_iterator, id2word, evaluate_func=evaluate_simple)
write_metrics(metrics, writer, global_step, "train")
metrics = evaluate_batch(config, model, dev_total // config.batch_size + 1, dev_eval_file,
sess, dev_iterator, id2word, evaluate_func=evaluate_simple)
write_metrics(metrics, writer, global_step, "dev")
bleu = metrics["bleu"]
if bleu > best_bleu:
best_bleu, best_ckpt = bleu, global_step
save(config.best_ckpt, {"best_bleu": str(best_bleu), "best_ckpt": str(best_ckpt)},
config.best_ckpt)
def evaluate_batch(config, model, num_batches, eval_file, sess, iterator, id2word, evaluate_func=evaluate):
answer_dict = {}
losses = []
next_element = iterator.get_next()
for _ in tqdm(range(1, num_batches + 1)):
para, para_unk, para_char, que, que_unk, que_char, labels, pos_tags, ner_tags, \
que_labels, que_pos_tags, que_ner_tags, y1, y2, qa_id = sess.run(next_element)
loss, symbols, probs = sess.run([model.loss, model.symbols, model.probs],
feed_dict={
model.para: para, model.para_unk: para_unk, model.que: que,
model.labels: labels, model.pos_tags: pos_tags,
model.ner_tags: ner_tags, model.qa_id: qa_id,
model.temperature: config.temperature,
model.diverse_rate: config.diverse_rate
})
answer_dict_ = convert_tokens(eval_file, qa_id, symbols, probs, id2word)
for key in answer_dict_:
if key not in answer_dict:
answer_dict[key] = answer_dict_[key]
losses.append(loss)
loss = np.mean(losses)
print(len(answer_dict))
metrics = evaluate_func(eval_file, answer_dict)
metrics["loss"] = loss
return metrics
def test(config):
with open(config.word_emb_file, "r") as fh:
word_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.pos_emb_file, "r") as fh:
pos_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.ner_emb_file, "r") as fh:
ner_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.label_emb_file, "r") as fh:
label_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.test_eval_file, "r") as fh:
eval_file = json.load(fh)
with open(config.test_meta, "r") as fh:
meta = json.load(fh)
with open(config.word_dictionary, "r") as fh:
word_dictionary = json.load(fh)
with h5py.File(config.embedding_file, 'r') as fin:
embed_weights = fin["embedding"][...]
elmo_word_mat = np.zeros((embed_weights.shape[0] + 1, embed_weights.shape[1]), dtype=np.float32)
elmo_word_mat[1:, :] = embed_weights
id2word = {word_dictionary[w]: w for w in word_dictionary}
total = meta["total"]
print(total)
graph = tf.Graph()
print("Loading model...")
with graph.as_default() as g:
test_iterator = get_dataset(config.test_record_file, get_record_parser(
config, is_test=True), config.test_batch_size).make_one_shot_iterator()
model = QGModel(config, word_mat, elmo_word_mat, label_mat, pos_mat, ner_mat, trainable=False)
model.build_graph()
model.add_train_op()
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
if os.path.exists(config.best_ckpt):
with open(config.best_ckpt, "r") as fh:
best_ckpt = json.load(fh)
checkpoint_to_test = int(best_ckpt["best_ckpt"])
else:
print("No Best!")
exit()
with tf.Session(config=sess_config) as sess:
if config.diverse_beam:
filename = "{}/diverse{}_beam{}".format(config.output_dir, config.diverse_rate, config.beam_size)
elif config.sample:
filename = "{}/temperature{}_sample{}".format(config.output_dir, config.temperature, config.sample_size)
else:
filename = "{}/beam{}".format(config.output_dir, config.beam_size)
writer = tf.summary.FileWriter(filename)
checkpoint = "{}/model_{}.ckpt".format(config.output_dir, checkpoint_to_test)
print(checkpoint)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_list=[p for p in tf.global_variables() if "word_mat" not in p.name])
saver.restore(sess, checkpoint)
global_step = sess.run(model.global_step)
metrics = evaluate_batch(config, model, total // config.test_batch_size + 1, eval_file, sess,
test_iterator, id2word)
print(metrics)
write_metrics(metrics, writer, global_step, "test")
def train_rl(config):
with open(config.word_emb_file, "r") as fh:
word_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.pos_emb_file, "r") as fh:
pos_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.ner_emb_file, "r") as fh:
ner_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.label_emb_file, "r") as fh:
label_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.train_eval_file, "r") as fh:
train_eval_file = json.load(fh)
with open(config.dev_eval_file, "r") as fh:
dev_eval_file = json.load(fh)
with open(config.dev_meta, "r") as fh:
meta = json.load(fh)
with open(config.word_dictionary, "r") as fh:
word_dictionary = json.load(fh)
with h5py.File(config.embedding_file, 'r') as fin:
embed_weights = fin["embedding"][...]
elmo_word_mat = np.zeros((embed_weights.shape[0] + 1, embed_weights.shape[1]), dtype=np.float32)
elmo_word_mat[1:, :] = embed_weights
id2word = {word_dictionary[w]: w for w in word_dictionary}
best_bleu, best_ckpt = 0., 0
dev_total = meta["total"]
print("Building model...")
parser = get_record_parser(config)
graph_qg = tf.Graph()
with graph_qg.as_default():
train_dataset = get_batch_dataset(config.train_record_file, parser, config.batch_size)
dev_dataset = get_dataset(config.dev_record_file, parser, config.batch_size)
train_iterator = train_dataset.make_one_shot_iterator()
dev_iterator = dev_dataset.make_one_shot_iterator()
model_qg = QGRLModel(config, word_mat, elmo_word_mat, label_mat, pos_mat, ner_mat)
model_qg.build_graph()
model_qg.add_train_op()
sess_qg = tf.Session(graph=graph_qg)
writer = tf.summary.FileWriter(config.output_dir)
with sess_qg.as_default():
with graph_qg.as_default():
sess_qg.run(tf.global_variables_initializer())
saver_qg = tf.train.Saver(max_to_keep=1000,
var_list=[p for p in tf.global_variables() if "word_mat" not in p.name])
if os.path.exists(os.path.join(config.output_dir, "checkpoint")):
saver_qg.restore(sess_qg, tf.train.latest_checkpoint(config.output_dir))
if os.path.exists(config.best_ckpt):
with open(config.best_ckpt, "r") as fh:
best_qg_ckpt = json.load(fh)
best_bleu, best_ckpt = float(best_qg_ckpt["best_bleu"]), int(best_qg_ckpt["best_ckpt"])
global_step = max(sess_qg.run(model_qg.global_step), 1)
train_next_element = train_iterator.get_next()
for _ in tqdm(range(global_step, config.num_steps + 1)):
global_step = sess_qg.run(model_qg.global_step) + 1
para, para_unk, para_char, que, que_unk, que_char, labels, pos_tags, ner_tags, \
que_labels, que_pos_tags, que_ner_tags, y1, y2, qa_id = sess_qg.run(train_next_element)
# get greedy search questions as baseline and sampled questions
symbols, symbols_rl = sess_qg.run([model_qg.symbols, model_qg.symbols_rl], feed_dict={
model_qg.para: para, model_qg.para_unk: para_unk, model_qg.que: que,
model_qg.labels: labels, model_qg.pos_tags: pos_tags,
model_qg.ner_tags: ner_tags, model_qg.qa_id: qa_id
})
# get rewards and format sampled questions
reward, reward_rl, reward_base, que_rl = evaluate_rl(train_eval_file, qa_id, symbols, symbols_rl, id2word,
metric=config.rl_metric)
# update model with policy gradient
loss_ml, _ = sess_qg.run([model_qg.loss_ml, model_qg.train_op], feed_dict={
model_qg.para: para, model_qg.para_unk: para_unk, model_qg.que: que,
model_qg.labels: labels, model_qg.pos_tags: pos_tags,
model_qg.ner_tags: ner_tags, model_qg.dropout: config.dropout, model_qg.qa_id: qa_id,
model_qg.sampled_que: que_rl, model_qg.reward: reward
})
if global_step % config.period == 0:
loss_sum = tf.Summary(value=[tf.Summary.Value(
tag="model/loss", simple_value=loss_ml), ])
writer.add_summary(loss_sum, global_step)
reward_base_sum = tf.Summary(value=[tf.Summary.Value(
tag="model/reward_base", simple_value=np.mean(reward_base)), ])
writer.add_summary(reward_base_sum, global_step)
reward_rl_sum = tf.Summary(value=[tf.Summary.Value(
tag="model/reward_rl", simple_value=np.mean(reward_rl)), ])
writer.add_summary(reward_rl_sum, global_step)
if global_step % config.checkpoint == 0:
filename = os.path.join(
config.output_dir, "model_{}.ckpt".format(global_step))
saver_qg.save(sess_qg, filename)
metrics = evaluate_batch(config, model_qg, config.val_num_batches, train_eval_file, sess_qg,
train_iterator, id2word, evaluate_func=evaluate_simple)
write_metrics(metrics, writer, global_step, "train")
metrics = evaluate_batch(config, model_qg, dev_total // config.batch_size + 1, dev_eval_file,
sess_qg, dev_iterator, id2word, evaluate_func=evaluate_simple)
write_metrics(metrics, writer, global_step, "dev")
bleu = metrics["bleu"]
if bleu > best_bleu:
best_bleu, best_ckpt = bleu, global_step
save(config.best_ckpt, {"best_bleu": str(best_bleu), "best_ckpt": str(best_ckpt)},
config.best_ckpt)
def train_qpp(config):
with open(config.word_emb_file, "r") as fh:
word_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.pos_emb_file, "r") as fh:
pos_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.ner_emb_file, "r") as fh:
ner_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.label_emb_file, "r") as fh:
label_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.train_eval_file, "r") as fh:
train_eval_file = json.load(fh)
with open(config.dev_eval_file, "r") as fh:
dev_eval_file = json.load(fh)
with open(config.dev_meta, "r") as fh:
meta = json.load(fh)
with open(config.word_dictionary, "r") as fh:
word_dictionary = json.load(fh)
with h5py.File(config.embedding_file, 'r') as fin:
embed_weights = fin["embedding"][...]
elmo_word_mat = np.zeros((embed_weights.shape[0] + 1, embed_weights.shape[1]), dtype=np.float32)
elmo_word_mat[1:, :] = embed_weights
id2word = {word_dictionary[w]: w for w in word_dictionary}
dev_total = meta["total"]
best_bleu, best_ckpt = 0., 0
print("Building model...")
parser = get_record_parser(config)
graph_qg = tf.Graph()
graph_qqp = tf.Graph()
with graph_qg.as_default():
train_dataset = get_batch_dataset(config.train_record_file, parser, config.batch_size)
dev_dataset = get_dataset(config.dev_record_file, parser, config.batch_size)
train_iterator = train_dataset.make_one_shot_iterator()
dev_iterator = dev_dataset.make_one_shot_iterator()
with graph_qg.as_default() as g:
model_qg = QGRLModel(config, word_mat, elmo_word_mat, label_mat, pos_mat, ner_mat)
model_qg.build_graph()
model_qg.add_train_op()
with graph_qqp.as_default() as g:
model_qqp = QPCModel(config, dev=True, trainable=False, graph=g)
sess_qg = tf.Session(graph=graph_qg)
sess_qqp = tf.Session(graph=graph_qqp)
writer = tf.summary.FileWriter(config.output_dir)
with sess_qg.as_default():
with graph_qg.as_default():
sess_qg.run(tf.global_variables_initializer())
saver_qg = tf.train.Saver(max_to_keep=1000,
var_list=[p for p in tf.global_variables() if "word_mat" not in p.name])
if os.path.exists(os.path.join(config.output_dir, "checkpoint")):
print(tf.train.latest_checkpoint(config.output_dir))
saver_qg.restore(sess_qg, tf.train.latest_checkpoint(config.output_dir))
if os.path.exists(config.best_ckpt):
with open(config.best_ckpt, "r") as fh:
best_qg_ckpt = json.load(fh)
best_bleu, best_ckpt = float(best_qg_ckpt["best_bleu"]), int(best_qg_ckpt["best_ckpt"])
with sess_qqp.as_default():
with graph_qqp.as_default():
sess_qqp.run(tf.global_variables_initializer())
saver_qqp = tf.train.Saver()
if os.path.exists(config.best_ckpt_qpc):
with open(config.best_ckpt_qpc, "r") as fh:
best_qpc_ckpt = json.load(fh)
best_ckpt = int(best_qpc_ckpt["best_ckpt"])
print("{}/model_{}.ckpt".format(config.output_dir_qpc, best_ckpt))
saver_qqp.restore(sess_qqp, "{}/model_{}.ckpt".format(config.output_dir_qpc, best_ckpt))
else:
print("NO the best QPC model to load!")
exit()
global_step = max(sess_qg.run(model_qg.global_step), 1)
train_next_element = train_iterator.get_next()
for _ in tqdm(range(global_step, config.num_steps + 1)):
global_step = sess_qg.run(model_qg.global_step) + 1
para, para_unk, para_char, que, que_unk, que_char, labels, pos_tags, ner_tags, \
que_labels, que_pos_tags, que_ner_tags, y1, y2, qa_id = sess_qg.run(train_next_element)
symbols, symbols_rl = sess_qg.run([model_qg.symbols, model_qg.symbols_rl], feed_dict={
model_qg.para: para, model_qg.para_unk: para_unk, model_qg.que: que, model_qg.labels: labels,
model_qg.pos_tags: pos_tags, model_qg.ner_tags: ner_tags, model_qg.qa_id: qa_id
})
# format questions for QPC
que_base, que_unk_base, que_rl, que_unk_rl = \
format_generated_ques_for_qpc(qa_id, symbols, symbols_rl, config.batch_size,
config.ques_limit, id2word)
label = np.zeros((config.batch_size, 2), dtype=np.int32)
# QQP reward
reward_base = sess_qqp.run(model_qqp.pos_prob, feed_dict={
model_qqp.que1: que_unk, model_qqp.que2: que_unk_base,
model_qqp.label: label, model_qqp.qa_id: qa_id,
})
reward_rl = sess_qqp.run(model_qqp.pos_prob, feed_dict={
model_qqp.que1: que_unk, model_qqp.que2: que_unk_rl,
model_qqp.label: label, model_qqp.qa_id: qa_id,
})
reward = [rr - rb for rr, rb in zip(reward_rl, reward_base)]
# train with rl
loss_ml, _ = sess_qg.run([model_qg.loss_ml, model_qg.train_op], feed_dict={
model_qg.para: para, model_qg.para_unk: para_unk, model_qg.labels: labels,
model_qg.pos_tags: pos_tags, model_qg.ner_tags: ner_tags,
model_qg.dropout: config.dropout, model_qg.qa_id: qa_id,
model_qg.que: que, model_qg.sampled_que: que_rl, model_qg.reward: reward
})
if global_step % config.period == 0:
loss_sum = tf.Summary(value=[tf.Summary.Value(
tag="model/loss", simple_value=loss_ml), ])
writer.add_summary(loss_sum, global_step)
reward_base_sum = tf.Summary(value=[tf.Summary.Value(
tag="model/reward_base", simple_value=np.mean(reward_base)), ])
writer.add_summary(reward_base_sum, global_step)
reward_rl_sum = tf.Summary(value=[tf.Summary.Value(
tag="model/reward_rl", simple_value=np.mean(reward_rl)), ])
writer.add_summary(reward_rl_sum, global_step)
if global_step % config.checkpoint == 0:
filename = os.path.join(
config.output_dir, "model_{}.ckpt".format(global_step))
saver_qg.save(sess_qg, filename)
metrics = evaluate_batch(config, model_qg, config.val_num_batches, train_eval_file, sess_qg,
train_iterator, id2word, evaluate_func=evaluate_simple)
write_metrics(metrics, writer, global_step, "train")
metrics = evaluate_batch(config, model_qg, dev_total // config.batch_size + 1, dev_eval_file,
sess_qg, dev_iterator, id2word, evaluate_func=evaluate_simple)
write_metrics(metrics, writer, global_step, "dev")
bleu = metrics["bleu"]
if bleu > best_bleu:
best_bleu, best_ckpt = bleu, global_step
save(config.best_ckpt, {"best_bleu": str(best_bleu), "best_ckpt": str(best_ckpt)},
config.best_ckpt)
def train_qap(config):
with open(config.word_emb_file, "r") as fh:
word_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.char_emb_file, "r") as fh:
char_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.pos_emb_file, "r") as fh:
pos_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.ner_emb_file, "r") as fh:
ner_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.label_emb_file, "r") as fh:
label_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.train_eval_file, "r") as fh:
train_eval_file = json.load(fh)
with open(config.dev_eval_file, "r") as fh:
dev_eval_file = json.load(fh)
with open(config.dev_meta, "r") as fh:
meta = json.load(fh)
with open(config.word_dictionary, "r") as fh:
word_dictionary = json.load(fh)
with open(config.char_dictionary, "r") as fh:
char_dictionary = json.load(fh)
with h5py.File(config.embedding_file, 'r') as fin:
embed_weights = fin["embedding"][...]
elmo_word_mat = np.zeros((embed_weights.shape[0] + 1, embed_weights.shape[1]), dtype=np.float32)
elmo_word_mat[1:, :] = embed_weights
id2word = {word_dictionary[w]: w for w in word_dictionary}
dev_total = meta["total"]
best_bleu, best_ckpt = 0., 0
print("Building model...")
parser = get_record_parser(config)
graph_qg = tf.Graph()
graph_qa = tf.Graph()
with graph_qg.as_default():
train_dataset = get_batch_dataset(config.train_record_file, parser, config)
dev_dataset = get_dataset(config.dev_record_file, parser, config)
train_iterator = train_dataset.make_one_shot_iterator()
dev_iterator = dev_dataset.make_one_shot_iterator()
with graph_qg.as_default() as g:
model_qg = QGRLModel(config, word_mat, elmo_word_mat, label_mat, pos_mat, ner_mat)
with graph_qa.as_default() as g:
model_qa = BidafQA(config, word_mat, char_mat, dev=True, trainable=False)
sess_qg = tf.Session(graph=graph_qg)
sess_qa = tf.Session(graph=graph_qa)
writer = tf.summary.FileWriter(config.output_dir)
with sess_qg.as_default():
with graph_qg.as_default():
model_qg.build_graph()
model_qg.add_train_op()
sess_qg.run(tf.global_variables_initializer())
saver_qg = tf.train.Saver(max_to_keep=1000,
var_list=[p for p in tf.global_variables() if "word_mat" not in p.name])
if os.path.exists(os.path.join(config.output_dir, "checkpoint")):
print(tf.train.latest_checkpoint(config.output_dir))
saver_qg.restore(sess_qg, tf.train.latest_checkpoint(config.output_dir))
if os.path.exists(config.best_ckpt):
with open(config.best_ckpt, "r") as fh:
best_qg_ckpt = json.load(fh)
best_bleu, best_ckpt = float(best_qg_ckpt["best_bleu"]), int(best_qg_ckpt["best_ckpt"])
with sess_qa.as_default():
with graph_qa.as_default():
model_qa.build_graph()
model_qa.add_train_op()
sess_qa.run(tf.global_variables_initializer())
saver_qa = tf.train.Saver(max_to_keep=1000,
var_list=[p for p in tf.global_variables() if "word_mat" not in p.name])
if os.path.exists(config.best_ckpt_qa):
with open(config.best_ckpt_qa, "r") as fh:
best_qpc_ckpt = json.load(fh)
best_ckpt = int(best_qpc_ckpt["best_ckpt"])
print("{}/model_{}.ckpt".format(config.output_dir_qa, best_ckpt))
saver_qa.restore(sess_qa, "{}/model_{}.ckpt".format(config.output_dir_qa, best_ckpt))
else:
print("NO the best QA model to load!")
exit()
global_step = max(sess_qg.run(model_qg.global_step), 1)
train_next_element = train_iterator.get_next()
for _ in tqdm(range(global_step, config.num_steps + 1)):
global_step = sess_qg.run(model_qg.global_step) + 1
para, para_unk, para_char, que, que_unk, que_char, labels, pos_tags, ner_tags, \
que_labels, que_pos_tags, que_ner_tags, y1, y2, qa_id = sess_qg.run(train_next_element)
symbols, symbols_rl = sess_qg.run([model_qg.symbols, model_qg.symbols_rl], feed_dict={
model_qg.para: para, model_qg.para_unk: para_unk, model_qg.que: que, model_qg.labels: labels,
model_qg.pos_tags: pos_tags, model_qg.ner_tags: ner_tags, model_qg.qa_id: qa_id
})
# format questions for QA
que_base, que_unk_base, que_char_base, que_rl, que_unk_rl, que_char_rl = \
format_generated_ques_for_qa(train_eval_file, qa_id, symbols, symbols_rl, config.batch_size,
config.ques_limit, config.char_limit, id2word, char_dictionary)
# QAP reward
base_qa_loss = sess_qa.run(model_qa.batch_loss,
feed_dict={
model_qa.para: para_unk, model_qa.para_char: para_char,
model_qa.que: que_unk_base, model_qa.que_char: que_char_base,
model_qa.y1: y1, model_qa.y2: y2, model_qa.qa_id: qa_id,
})
qa_loss = sess_qa.run(model_qa.batch_loss,
feed_dict={
model_qa.para: para_unk, model_qa.para_char: para_char,
model_qa.que: que_unk_rl, model_qa.que_char: que_char_rl,
model_qa.y1: y1, model_qa.y2: y2, model_qa.qa_id: qa_id,
})
reward_base = list(map(lambda x:
|
np.exp(-x)
|
numpy.exp
|
# -*- coding: utf-8 -*-
"""
Python version of the LODE implementation.
Currently, only the exponent p=1 with the optimal radial basis r^l for each
angular channel l=0,1,2,...,lmax is supported.
"""
import logging
import numpy as np
from scipy.special import gammainc
from scipy.integrate import quad
try:
from tqdm import tqdm
except ImportError:
tqdm = (lambda i, **kwargs: i)
from .kvec_generator import KvectorGenerator
from .radial_basis import RadialBasis
from .spherical_harmonics import evaluate_spherical_harmonics
logger = logging.getLogger(__name__)
def gammainc_upper_numerical(n, zz):
"""
Implement upper incomplete Gamma function
"""
yy = np.zeros_like(zz)
integrand = lambda x: x**(n-1) * np.exp(-x)
for iz, z in enumerate(zz):
yy[iz] = quad(integrand, z, np.inf)[0]
return yy
class DensityProjectionCalculator():
"""
Compute the spherical expansion coefficients.
Initialize the calculator using the hyperparameters.
All the needed splines that only depend on the hyperparameters
are prepared as well by storing the values.
Parameters
----------
max_radial : int
Number of radial functions
max_angular : int
Number of angular functions
cutoff_radius : float
Environment cutoff (Å)
smearing : float
Smearing of the Gaussain (Å). Note that computational cost scales
cubically with 1/smearing.
radial_basis : str
The radial basis. Currently implemented are
'GTO_primitive', 'GTO', 'monomial'.
For monomial: Only use one radial basis r^l for each angular
channel l leading to a total of (lmax+1)^2 features.
compute_gradients : bool
Compute gradients
potential_exponent : int
potential exponent: p=0 uses Gaussian densities,
p=1 is LODE using 1/r (Coulomb) densities"
subtract_center_contribution : bool
Subtract contribution from the central atom.
Attributes
----------
features : array
the computed projection coefficients in the format:
The projection coefficients as an array of dimension:
num_environ x num_chem_species x num_radial x num_lm_coefficients,
where:
num_environ = total number of atoms in the system summed over
all frames
num_chem_species = number of chemical species
num_radial = nmax
num_lm_coefficients = (lmax+1)^2
feature_gradients : array
the gradients of the projection coefficients
The returned array has dimensions:
num_environm_squared x 3 x num_chemical_species x num_radial x num_lm_coefficients,
The factor of 3 corresponds to x,y,z-components.
Otherwise, the specification is almost identical to get_features, except
that the first axis specifying the atomic environment now runs over
all pairs (i,j).
Example: For a structure containing 3 atoms labeled as (0,1,2),
the gradients are stored as
gradients[0] = dV_0/dr_0
gradients[1] = dV_0/dr_1
gradients[2] = dV_0/dr_2
gradients[3] = dV_1/dr_0
gradients[4] = dV_1/dr_1
...
gradients[8] = dV_2/dr_2
If multiple frames are present, all these coefficients are
concatenated along the 0-th axis, as usual e.g. for SOAP vectors
in librascal.
representation_info : array
Stuff for interacting to interact with atomistic-ml-storage.
"""
def __init__(self,
max_radial,
max_angular,
cutoff_radius,
smearing,
radial_basis,
compute_gradients=False,
potential_exponent=1,
subtract_center_contribution=False,
fast_implementation=True):
# Store the input variables
self.max_radial = max_radial
self.max_angular = max_angular
self.cutoff_radius = cutoff_radius
self.radial_basis = radial_basis.lower()
self.smearing = smearing
self.potential_exponent = potential_exponent
self.compute_gradients = compute_gradients
self.subtract_center_contribution = subtract_center_contribution
self.fast_implementation = fast_implementation
# Make sure that the provided parameters are consistent
if self.potential_exponent not in [0, 1, 2, 3, 4, 5, 6]:
raise ValueError("Potential exponent has to be 0, 1, 2, ..., 6")
if self.radial_basis not in ["monomial", "gto", "gto_primitive"]:
raise ValueError(f"{self.radial_basis} is not an implemented basis"
". Try 'monomial', 'GTO' or GTO_primitive.")
if self.radial_basis == "monomial" and self.max_radial != 1:
raise ValueError("For monomial basis only `max_radial=1` "
"is allowed.")
# Auxilary quantity: the actual number of features is this number
# times the number of chemical species
self.num_features_bare = self.max_radial * (self.max_angular + 1)**2
# Initialize radial basis class to precompute the quantities
# only related to the choice of radial basis, namely the
# projections of the spherical Bessel functions j_l(kr) onto the
# radial basis and (if desired) the center contributions
self.radial_proj = RadialBasis(self.max_radial, self.max_angular,
self.cutoff_radius, self.smearing,
self.radial_basis,
potential_exponent,
self.subtract_center_contribution)
self.radial_proj.compute(np.pi/self.smearing)
def transform(self, frames, show_progress=False):
"""
Computes the features and (if compute_gradients == True) gradients
for all the provided frames. The features and gradients are stored in
features and feature_gradients attribute.
Parameters
----------
frames : ase.Atoms
List containing all ase.Atoms structures
show_progress : bool
Show progress bar for frame analysis
Returns
-------
None, but stores the projection coefficients and (if desired)
gradients as arrays as `features` and `features_gradients`.
"""
self.frames = frames
# Check that the provided cells are large enough:
# Roughly speaking, all cell dimensions L need to be at least
# twice the used smearing: L > 2 * smearing
too_small_frames_list = []
length_min = 1e15
for iframe, frame in enumerate(frames):
cell = frame.get_cell()
basis_vector_lengths = np.linalg.norm(cell, axis=1)
length_min_cell = max(basis_vector_lengths)
if length_min > length_min_cell:
length_min = length_min_cell
if 2*self.smearing >= length_min_cell:
too_small_frames_list.append(iframe)
if self.smearing >= length_min/2:
raise ValueError(f"Given `smearing` ({self.smearing} Å) is too large for "
f"structures {too_small_frames_list}. Smearing must be"
f"smaller than half of the shortest"
f"box length ({length_min} Å)! "
f"Use a smearing > {length_min/2}")
# Generate a dictionary to map atomic species to array indices
# In general, the species are sorted according to atomic number
# and assigned the array indices 0,1,2,...
# Example: for H2O: H is mapped to 0 and O is mapped to 1.
species = set()
for frame in frames:
for atom in frame:
species.add(atom.number)
species = sorted(species)
self.species_dict = {}
for frame in frames:
#Get atomic species in dataset
self.species_dict.update({atom.symbol: species.index(atom.number) for atom in frame})
# Define variables determining size of feature vector coming from frames
self.num_atoms_per_frame = np.array([len(frame) for frame in frames])
num_atoms_total = np.sum(self.num_atoms_per_frame)
num_chem_species = len(self.species_dict)
# Initialize arrays in which to store all features
self.features = np.zeros((num_atoms_total, num_chem_species,
self.max_radial, (self.max_angular+1)**2))
if self.compute_gradients:
num_gradients = np.sum(self.num_atoms_per_frame**2)
self.feature_gradients = np.zeros((num_gradients, 3, num_chem_species,
self.max_radial, (self.max_angular+1)**2))
# For each frame, compute the projection coefficients
current_index = 0
gradient_index = 0
if show_progress:
frame_generator = tqdm(self.frames)
else:
frame_generator = self.frames
for i_frame, frame in enumerate(frame_generator):
number_of_atoms = self.num_atoms_per_frame[i_frame]
results = self._transform_single_frame(frame)
# Returned values are features + gradients
if self.compute_gradients:
features = results[0]
self.feature_gradients[gradient_index:gradient_index+number_of_atoms**2] += results[1]
# Returned values are only the features
else:
features = results
self.features[current_index:current_index+number_of_atoms] += features
current_index += number_of_atoms
gradient_index += number_of_atoms**2
def _transform_single_frame(self, frame):
"""
Compute features for single frame and return to the transform()
method which loops over all structures to obtain the complete
vector for all environments.
"""
###
# Initialization
###
# Define useful shortcuts
lmax = self.max_angular
nmax = self.max_radial
num_lm = (lmax+1)**2
num_atoms = len(frame)
num_chem_species = len(self.species_dict)
iterator_species = np.zeros(num_atoms, dtype=int)
for i, symbol in enumerate(frame.get_chemical_symbols()):
iterator_species[i] = self.species_dict[symbol]
# Initialize arrays in which to store all features
frame_features = np.zeros((num_atoms, num_chem_species,
self.max_radial, (self.max_angular+1)**2))
if self.compute_gradients:
num_gradients = np.sum(num_atoms**2)
frame_gradients = np.zeros((num_gradients, 3, num_chem_species,
self.max_radial, (self.max_angular+1)**2))
# Debug log
logger.debug(f"num_atoms = {num_atoms}")
logger.debug(f"shape frame_features = {frame_features.shape}")
# Extra phase dependent on angular channel for convenience
angular_phases =
|
np.zeros(lmax+1)
|
numpy.zeros
|
"""
analysis.py
Author: <NAME>
Affiliation: McGill University
Created on: Wed 16 Dec 2020 16:16:41 EST
Description:
"""
import pickle
import numpy as np
from .models import BubbleModel
from scipy.ndimage import gaussian_filter
from .inference import tanh_generic, power_law, power_law_max1, \
broken_power_law, broken_power_law_max1, double_power_law, \
extract_params, power_law_lognorm, erf_Q, power_law_Q, lin_Q
from .util import labels, bin_e2c, bin_c2e, get_error_2d
try:
import matplotlib.pyplot as pl
from matplotlib.cm import ScalarMappable
from matplotlib.colors import LogNorm, Normalize
except ImportError:
pass
_default_modes = np.logspace(-1, 0., 21)
_default_colors = ['k', 'b', 'm', 'c', 'r', 'g', 'y', 'orange']
_default_ls = ['-', '--', '-.', ':']
_default_labels = {'Q': r'$Q$', 'R': r'$R$', 'Ts': r'$T_S$',
'sigma': r'$\sigma$', 'gamma': r'$\gamma$', 'Asys': r'$A_{\rm{sys}}$'}
_default_limits = {'Q': (-0.05, 1.05), 'R': (3e-1, 30),
'Ts': (1, 200),
'sigma': (0, 2), 'gamma': (-4, -2), 'Asys': (0.2, 2)}
_default_z = np.arange(5, 20, 0.05)
_default_Q = np.arange(0, 1.01, 0.01)
bbox = dict(facecolor='none', edgecolor='k', fc='w',
boxstyle='round,pad=0.3', alpha=0.9, zorder=1000)
class AnalyzeFit(object): # pragma: no cover
def __init__(self, fn):
self.fn = fn
@property
def data(self):
if not hasattr(self, '_data'):
with open(self.fn, 'rb') as f:
self._data = pickle.load(f)
return self._data
@property
def model(self):
if not hasattr(self, '_model'):
self._model = BubbleModel(**self.data['kwargs'])
return self._model
@property
def custom_labels(self):
if not hasattr(self, '_custom_labels'):
self._custom_labels = {}
return self._custom_labels
@custom_labels.setter
def custom_labels(self, value):
if not hasattr(self, '_custom_labels'):
self._custom_labels = {}
self._custom_labels.update(value)
def get_labels(self, pars, redshifts=None):
labels = []
for i, par in enumerate(pars):
if par in self.custom_labels:
labels.append(self.custom_labels[par])
elif par in _default_labels:
if redshifts is not None:
s = _default_labels[par]
j = s.find('$')
k = s.rfind('$')
l = s[j+1:k]
lab = r'$%s(z=%.2f)$' % (l, redshifts[i])
else:
lab = _default_labels[par]
labels.append(lab)
else:
labels.append(par)
return labels
def check_model_ps(self, z=None, k=None, Ts=(0, np.inf), Q=(0, 1), R=(0, 100),
sigma=(0.5, 2), gamma=(-4, 0), skip=0):
"""
Scroll through models in some specified corner of parameter space
and re-compute the power spectrum and plot it.
"""
pars, redshifts = self.data['pinfo']
fchain = self.data['flatchain']
if np.unique(redshifts).size > 1:
assert z is not None, "Must supply `z` if multi-z fit."
limits = {'Ts': Ts, 'Q': Q, 'R': R, 'sigma': sigma, 'gamma': gamma}
if k is None:
k = np.logspace(-1, 0, 11)
for i in range(fchain.shape[0]):
if i < skip:
continue
kw = {par:fchain[i,j] for j, par in enumerate(pars)}
if self.data['kwargs']['Ts_log10']:
kw['Ts'] = 10**kw['Ts']
plot_it = True
for par in pars:
if limits[par][0] < kw[par] < limits[par][1]:
continue
plot_it = False
if not plot_it:
continue
print('Generating model from chain link {} with kwargs={}'.format(i,
kw))
ps = self.model.get_ps_21cm(redshifts[0], k, **kw)
pl.loglog(k, k**3 * ps / 2. / np.pi**2)
input('<enter> for next model')
pl.clear()
def plot_triangle(self, fig=1, axes=None, params=None, redshifts=None,
complement=False, bins=20, burn=0, fig_kwargs={}, contours=True,
fill=False, nu=[0.95, 0.68], take_log=False, is_log=False,
skip=None, smooth=None, skip_params=None, show_1d=True,
logL_cut=-np.inf, **kwargs):
"""
"""
has_ax = axes is not None
if not has_ax:
fig = pl.figure(constrained_layout=True, num=fig, **fig_kwargs)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
else:
axes_by_row = axes
all_params, redshifts = self.data['pinfo']
if params is None:
params = all_params
else:
pass
elements = range(len(params))
try:
labels = self.get_labels(params, redshifts)
except IndexError:
labels = [''] * len(params)
Np = len(params)
if type(bins) not in [list, tuple, np.ndarray]:
bins = [bins] * Np
if type(complement) not in [list, tuple, np.ndarray]:
complement = [complement] * Np
if type(is_log) not in [list, tuple, np.ndarray]:
is_log = [is_log] * Np
if type(take_log) not in [list, tuple, np.ndarray]:
take_log = [take_log] * Np
# Remember, for gridspec, rows are numbered frop top-down.
if not has_ax:
if show_1d:
gs = fig.add_gridspec(Np, Np)
axes_by_row = [[] for i in range(Np)]
else:
axes_by_row = [fig.add_subplot(111)]
flatchain = self.data['flatchain']
for i, row in enumerate(range(Np)):
for j, col in enumerate(range(Np)):
# Skip elements in upper triangle
if j > i:
continue
if (not show_1d) and i == j:
continue
if skip is not None:
if i in skip:
continue
if j in skip:
continue
# Create axis
if show_1d:
if not has_ax:
_ax = fig.add_subplot(gs[i,j])
axes_by_row[i].append(_ax)
else:
_ax = axes_by_row[i][j]
else:
_ax = axes_by_row[0]
if skip_params is not None:
if params[i] in skip_params:
continue
if params[j] in skip_params:
continue
if params[i] not in all_params:
continue
if params[j] not in all_params:
continue
zsamplesi, samplesi = self.get_samples(params[i], burn)
zsamplesj, samplesj = self.get_samples(params[j], burn)
if zsamplesi.size > 1:
iz = np.argmin(np.abs(redshifts[i] - zsamplesi))
idata = samplesi[:,iz]
else:
idata = samplesi[0,:]
if zsamplesj.size > 1:
jz = np.argmin(np.abs(redshifts[j] - zsamplesj))
jdata = samplesj[:,jz]
else:
jdata = samplesj[0,:]
# Retrieve data to be used in plot
if not is_log[i]:
p1 = 1. - idata if complement[i] else idata
else:
p1 = 10**idata if is_log[i] else idata
if take_log[i]:
p1 = np.log10(p1)
# 2-D PDFs from here on
if not is_log[j]:
p2 = 1. - jdata if complement[j] else jdata
else:
p2 = 10**jdata if is_log[j] else jdata
if take_log[j]:
p2 = np.log10(p2)
# 1-D PDFs
if i == j:
kw = kwargs.copy()
if 'colors' in kw:
del kw['colors']
if 'linestyles' in kw:
del kw['linestyles']
_ax.hist(p2, density=True, bins=bins[j], histtype='step', **kw)
if j > 0:
_ax.set_yticklabels([])
if j == Np - 1:
_ax.set_xlabel(labels[j])
else:
_ax.set_xticklabels([])
else:
_ax.set_ylabel(r'PDF')
ok = np.isfinite(p2)
_ax.set_xlim(p2[ok==1].min(), p2[ok==1].max())
continue
if contours:
hist, be2, be1 = np.histogram2d(p2, p1, [bins[j], bins[i]])
if smooth is not None:
hist = gaussian_filter(hist, smooth)
bc1 = bin_e2c(be1)
bc2 = bin_e2c(be2)
nu, levels = get_error_2d(p2, p1, hist, [bc2, bc1], nu=nu)
# (columns, rows, histogram)
if fill:
_ax.contourf(bc2, bc1, hist.T / hist.max(),
levels, zorder=4, **kwargs)
else:
_ax.contour(bc2, bc1, hist.T / hist.max(),
levels, zorder=4, **kwargs)
else:
h, x, y, img = _ax.hist2d(p2, p1, bins=[bins[j], bins[i]],
cmap='viridis', norm=LogNorm())
# Get rid of labels/ticks on interior panels.
if i < Np - 1:
_ax.set_xticklabels([])
else:
_ax.set_xlabel(labels[j])
if j > 0:
_ax.set_yticklabels([])
else:
_ax.set_ylabel(labels[i])
ok1 = np.isfinite(p1)
ok2 = np.isfinite(p2)
_ax.set_ylim(p1[ok1==1].min(), p1[ok1==1].max())
_ax.set_xlim(p2[ok2==1].min(), p2[ok2==1].max())
# Done
return fig, axes_by_row
def plot_walker_trajectories(self, burn=0, ax=None, fig=1, **kwargs):
params, redshifts = self.data['pinfo']
nrows = len(params)
ncols = 1
_j = 0
if ax is None:
fig, axes = pl.subplots(nrows, ncols, num=fig,
figsize=(ncols * 4, nrows * 4))
steps = np.arange(0, self.data['chain'].shape[1])
ibest = np.argwhere(self.data['lnprob'] == self.data['lnprob'].max())[-1]
# This looks slow/lazy but it's to keep ordering.
punique = []
for i, par in enumerate(params):
if par in punique:
continue
punique.append(par)
zunique = np.unique(redshifts)
zunique = zunique[np.isfinite(zunique)]
ct = 0
for i, par in enumerate(params):
_z_ = redshifts[i]
try:
parname, parnum = par.split('_')
except ValueError:
parname = par
parnum = None
axes[i].annotate(par, (0.05, 0.95), bbox=bbox,
xycoords='axes fraction', ha='left', va='top')
chain = self.data['chain'][:,burn:,i]
axes[i].plot(steps, chain.T, **kwargs)
# Plot best one as horizontal line
axes[i].plot(steps,
chain[ibest[0],ibest[1]] * np.ones_like(steps), color='k',
ls='--', zorder=10, lw=3)
# Put marker at walker/step where best happens
axes[i].scatter(steps[ibest[1]], chain[ibest[0],ibest[1]],
marker='|', s=150, color='k', zorder=10)
if _j == 0:
ylab = par.split('_')[0]
axes[i].set_ylabel(ylab)
return axes
def get_zindex_in_data(self, z, ztol=1e-3):
j = np.argmin(np.abs(z - self.data['zfit']))
assert abs(self.data['zfit'][j] - z) < ztol
return j
def get_ps(self, z=None, ztol=1e-2, burn=0, reshape=True):
burn_per_w = burn // self.data['chain'].shape[0]
data = self.data
sh = data['blobs'].shape
try:
nsteps, nw, nz, nk = sh
except ValueError:
nsteps, nw, nz = sh
nk = 1
if reshape:
_ps = np.reshape(data['blobs'], (nsteps*nw,nz,nk))
else:
_ps = data['blobs']
if z is not None:
i = self.get_zindex_in_data(z, ztol=ztol)
if reshape:
ps = _ps[:,i]
else:
ps = _ps[:,:,i]
return self.data['kblobs'], ps[burn_per_w:]
else:
return self.data['kblobs'], _ps[burn_per_w:]
def get_stuck_walkers(self, logL_min=-np.inf, burn=0):
nw = self.data['chain'].shape[0]
burn_per_w = burn // nw
bad_L = self.data['lnprob'][:,burn_per_w:] < logL_min
bad_w = np.any(bad_L, axis=1)
bad_i = np.argwhere(bad_w).squeeze()
return bad_i
def plot_ps(self, z=None, use_best=True, ax=None, fig=1, ztol=1e-2,
conflevel=0.68, samples=None, show_recovery=True,
marker_kw={}, use_cbar=True, show_cbar=True, show_data=True, cmap='jet',
burn=0, logL_min=-np.inf, logL_max=np.inf, **kwargs):
"""
Plot the power spectrum, either at the maximum likelihood point or
as a shaded region indicative of a given confidence level.
"""
if ax is None:
fig, ax = pl.subplots(1, 1, num=fig)
if use_cbar:
norm = Normalize(vmin=min(self.data['zfit']),
vmax=max(self.data['zfit']))
cmap = ScalarMappable(norm=norm, cmap=cmap)
cmap.set_array([])
sh = self.data['blobs'].shape
try:
nsteps, nw, nz, nk = sh
except ValueError:
nsteps, nw, nz = sh
nk = 1
burn_per_w = burn // nw
ibest = np.argwhere(self.data['lnprob'] == self.data['lnprob'].max())[0]
i = self.get_zindex_in_data(z=z, ztol=ztol)
_z_ = self.data['zfit'][i]
if (logL_min > -np.inf):
bad_i = self.get_stuck_walkers(logL_min, burn=burn)
if bad_i.size == nw:
raise ValueError("No acceptable walkers!")
elif bad_i.size > 0:
_k, _ps_ = self.get_ps(z=_z_, ztol=ztol, burn=burn, reshape=False)
_ps = []
for iw in np.arange(nw):
if iw in bad_i:
continue
_ps.append(_ps_[:,iw,:])
_ps = np.array(_ps)
ps = np.reshape(_ps, (_ps.shape[0] * _ps.shape[1], nk))
else:
_k, ps = self.get_ps(z=_z_, ztol=ztol, burn=burn, reshape=True)
else:
_k, ps = self.get_ps(z=_z_, ztol=ztol, burn=burn, reshape=True)
if use_cbar:
kwargs['color'] = cmap.to_rgba(_z_)
if not show_recovery:
pass
elif use_best:
ax.plot(self.data['kblobs'], self.data['blobs'][ibest[1],
ibest[0],i], **kwargs)
elif samples is not None:
ax.plot(self.data['kblobs'], ps[-samples:,:].T, **kwargs)
else:
_lo = (1. - conflevel) * 100 / 2.
_hi = 100 - _lo
lo, hi = np.nanpercentile(ps, (_lo, _hi), axis=0)
ax.fill_between(self.data['kblobs'], lo, hi, **kwargs)
##
# Overplot data
if ('data' in self.data.keys()) and show_data:
# Use cmap to force match in colors
ydat, yerr = self.data['data'][i]
if use_cbar:
marker_kw['color'] = cmap.to_rgba(_z_)
ax.errorbar(self.data['kblobs'], ydat, yerr.T, **marker_kw)
ax.set_xlabel(labels['k'])
ax.set_ylabel(labels['delta_sq'])
ax.set_xscale('log')
ax.set_yscale('log')
try:
ax.set_ylim(self.data['blobs'].min()*0.5, self.data['blobs'].max() * 2)
except:
ax.set_ylim(1, 1e4)
if use_cbar and show_cbar and show_recovery:
cax = fig.add_axes([0.91, 0.11, 0.015, 0.77])
cb = pl.colorbar(cmap, ax=ax, cax=cax, orientation='vertical')
cb.set_label(r'$z$', fontsize=20)
return fig, ax
def get_par_from_increments(self):
pass
def plot_igm_constraints(self, z=None, use_best=False, conflevel=0.68,
ax=None, fig=1, burn=0, marker_kw={}, scatter=False, zoffset=0,
bins=20, smooth_hist=None, **kwargs):
"""
Plot contours in Q-T_S space. Kind of the whole point of this package.
"""
new_ax = False
if ax is None:
fig, ax = pl.subplots(1, 1, num=fig)
new_ax = True
params, redshifts = self.data['pinfo']
for _z_ in self.data['zfit']:
iT = None
iQ = None
for j, par in enumerate(params):
if (z is not None) and (_z_ != z):
continue
if (par == 'Ts'):
iT = j
if (par == 'Q'):
iQ = j
T = self.data['flatchain'][burn:,iT]
Q = self.data['flatchain'][burn:,iQ]
x = 1. - Q
hist, be1, be2 = np.histogram2d(T, x, bins)
bc1 = bin_e2c(be1)
bc2 = bin_e2c(be2)
if smooth_hist is not None:
hist = gaussian_filter(hist, smooth_hist)
nu, levels = get_error_2d(T, x, hist, [bc1, bc2], nu=conflevel)
ax.contour(bc2, bc1, hist / hist.max(), levels, **kwargs)
if new_ax:
ax.set_xlabel(r'$x_{\mathrm{HI}} \equiv 1 - Q$')
ax.set_ylabel(r'$T_S \ [\mathrm{K}]$')
return ax
def get_samples(self, par, burn=0):
params, redshifts = self.data['pinfo']
chain = self.data['chain']
burn_per_w = burn // self.data['chain'].shape[0]
##
# If par in `params`, it's easy.
if par in params:
z = []
y = []
for i, _z_ in enumerate(redshifts):
if params[i] != par:
continue
z.append(_z_)
y.append(self.data['flatchain'][burn:,i])
return np.array(z), np.array(y)
# May be repeats -- just take first one
ibest = np.argwhere(self.data['lnprob'] == self.data['lnprob'].max())
if ibest.ndim == 2:
ibest = ibest[0]
# First, deal with parametric results if we have them.
for _par_ in ['Q', 'R', 'Ts']:
if (par != _par_):
continue
if (self.data['kwargs']['{}_func'.format(_par_)] is None):
continue
j = 0
p = []
v = []
while '{}_p{}'.format(_par_, j) in params:
pj = params.index('{}_p{}'.format(_par_, j))
vj = chain[:,:,pj]
j += 1
p.append(pj)
v.append(vj)
fname = self.data['kwargs']['{}_func'.format(_par_)]
if fname == 'tanh':
func = tanh_generic
elif fname == 'pl':
if _par_ == 'Q':
func = power_law_max1
else:
func = power_law
elif fname == 'bpl':
if _par_ == 'Q':
func = broken_power_law_max1
else:
func = broken_power_law
elif fname == 'dpl':
assert _par_ == 'Ts'
func = double_power_law
else:
raise NotImplemented('No option for {} yet'.format(fname))
# Make Q(z) (for example) for each MCMC sample
v_flat = [self.data['flatchain'][burn:,_p] for _p in p]
_pars_ = np.array([element for element in v_flat])
zplot = self.data['zfit']
assert burn < v[0].size, \
"Provided `burn` exceeds size of chain!"
y = np.zeros((v[0].size-burn, zplot.size))
for i, _z_ in enumerate(zplot):
y[:,i] = func(_z_, _pars_)
return np.array(zplot), y
def plot_evol(self, par, use_best=False, conflevel=0.68,
ax=None, fig=1, burn=0, marker_kw={}, scatter=False, boxplot=False,
zoffset=0, samples=None, **kwargs):
"""
Plot constraints on model parameters vs. redshift.
"""
if ax is None:
fig, ax = pl.subplots(1, 1, num=fig)
params, redshifts = self.data['pinfo']
chain = self.data['chain']
# May be repeats -- just take first one
ibest = np.argwhere(self.data['lnprob'] == self.data['lnprob'].max())
if ibest.ndim == 2:
ibest = ibest[0]
# First, deal with parametric results if we have them.
for _par_ in ['Q', 'R', 'Ts', 'sigma', 'Asys']:
if (par != _par_):
continue
if (self.data['kwargs']['{}_func'.format(_par_)] is None):
continue
j = 0
p = []
v = []
while '{}_p{}'.format(_par_, j) in params:
pj = params.index('{}_p{}'.format(_par_, j))
vj = chain[:,:,pj]
j += 1
p.append(pj)
v.append(vj)
fname = self.data['kwargs']['{}_func'.format(_par_)]
if fname == 'tanh':
func = tanh_generic
elif fname == 'linear':
func = lin_Q
elif fname == 'pl':
if _par_ == 'Q':
func = power_law_max1
elif _par_ == 'Ts':
func = power_law_lognorm
elif _par_ == 'R':
func = power_law_Q
else:
func = power_law
elif fname == 'bpl':
if _par_ == 'Q':
func = broken_power_law_max1
else:
func = broken_power_law
elif fname == 'dpl':
assert _par_ == 'Ts'
func = double_power_law
elif fname == 'erf':
assert _par_ == 'Asys'
func = erf_Q
else:
raise NotImplemented('No option for {} yet'.format(fname))
pbest = [element[ibest[0],ibest[1]] for element in v]
# Make Q(z) for each MCMC sample
if use_best:
if par in ['R', 'sigma', 'Asys']:
ybest = func(_default_Q, pbest)
ax.plot(_default_Q, ybest, **kwargs)
else:
ybest = func(_default_z, pbest)
ax.plot(_default_z, ybest, **kwargs)
else:
v_flat = [self.data['flatchain'][burn:,_p] \
for _p in p]
_pars_ = np.array([element for element in v_flat])
if par in ['R', 'Asys']:
ybest = func(_default_Q, pbest)
xvals = _default_Q
else:
if scatter or boxplot:
zplot = self.data['zfit']
else:
zplot = _default_z
ybest = func(zplot, pbest)
xvals = zplot
assert burn < v[0].size, \
"Provided `burn` exceeds size of chain!"
y = np.zeros((v[0].size-burn, xvals.size))
for i, _x_ in enumerate(xvals):
y[:,i] = func(_x_, _pars_)
#zplot, y = self.get_samples
_lo = (1. - conflevel) * 100 / 2.
_hi = 100 - _lo
lo, hi = np.percentile(y, (_lo, _hi), axis=0)
if boxplot:
kw = kwargs.copy()
for i, _x_ in enumerate(xvals):
conf = np.array([[_lo / 100., _hi / 100.]])
data = y[:,i]#np.concatenate((y[:,i], ybest[i]))
ax.boxplot(data, positions=[_x_],
showfliers=False,
manage_ticks=False,
conf_intervals=conf)
#usermedians=[ybest[i]])
#conf_intervals=[[16, 84]])
if 'label' in kw:
del kw['label']
elif scatter:
kw = kwargs.copy()
for i, _x_ in enumerate(zplot):
ax.plot([_x_+zoffset]*2, [lo[i], hi[i]], **kw)
ax.scatter([_x_+zoffset]*2, [ybest[i]]*2, **marker_kw)
if 'label' in kw:
del kw['label']
elif samples is not None:
ax.plot(xvals, y[-samples:,:].T, **kwargs)
else:
ax.fill_between(xvals, lo, hi, **kwargs)
if par in ['Ts', 'R']:
ax.set_yscale('log')
if par in ['R', 'sigma', 'Asys']:
ax.set_xlabel(r'$Q$')
ax.set_xlim(0, 1)
else:
ax.set_xlabel(r'$z$')
ax.set_xlim(min(self.data['zfit'])-1,
max(self.data['zfit'])+1)
ax.set_ylabel(_default_labels[_par_])
ax.set_ylim(*_default_limits[_par_])
return ax
##
# Non-parametric results
for i, _par_ in enumerate(params):
z = redshifts[i]
if (_par_ != par):
continue
#if (_par_ == 'R') and (self.data['kwargs']['Rxdelta'] is not None):
# continue
#else:
best = chain[ibest[0], ibest[1],i]
_chain = self.data['flatchain'][burn:,i]
# Get band
_lo = (1. - conflevel) * 100 / 2.
_hi = 100 - _lo
lo, hi =
|
np.percentile(_chain, (_lo, _hi), axis=0)
|
numpy.percentile
|
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
# Package imports
from pypso.optimizers import BPSO, CPSO
from pypso.utils import plot_history
# Globally load data from sklearn
X, y = load_breast_cancer(return_X_y=True)
X = (X - X.mean()) / X.std()
# Define linear model for comparison with PSO
clf = LogisticRegression(solver="lbfgs")
clf.fit(X, y)
auc_sk = np.round(roc_auc_score(y, clf.predict_proba(X)[:, -1]), 4)
# Define objective functions
def fobj_lr(w):
"""Optimize logistic regression weights using AUC metric.
Parameters
----------
w : 1d array-like
Weights for logistic regression.
Returns
-------
float
AUC metric.
"""
# Linear combo and create probabilities
z = w[0] + np.sum(w[1:]*X, axis=1)
p = 1 / (1 + np.exp(-z))
# Want to minimize objective so take 1 - AUC
return 1 - roc_auc_score(y, p)
def fobj_lr_constraint(w):
"""Set arbitrary constraint for logistic regression weights.
Parameters
----------
w : 1d array-like
Weights for logistic regression.
Returns
-------
1d array-like
Boolean vector indicating if element satisfies condition.
"""
return abs(w) < 2
def fobj_fs(b):
"""Optimize feature selection using AUC metric.
Parameters
----------
b : 1d array-like
Array indicating if feature is selected (1=yes, 0=no).
Returns
-------
float
AUC metric.
"""
b = b.astype(bool)
try:
clf = LogisticRegression(solver="lbfgs").fit(X[:, b], y)
return 1 - roc_auc_score(y, clf.predict_proba(X[:, b])[:, -1])
except Exception as e:
return np.inf
def fobj_fs_constraint(b):
"""Set arbitrary constraint for feature selection to use 50% or less of the
total feature space.
Parameters
----------
b : 1d array-like
Array indicating if feature is selected (1=yes, 0=no).
Returns
-------
1d array-like
Boolean vector indicating if element satisfies condition.
"""
return np.sum(b) <= 10
def main():
"""Runs toy examples demonstrating pypso.
"""
# Kwargs for PSO algorithms
kwarg_params = {
'n_particles' : 300,
'n_jobs' : 4,
'verbose' : True,
'random_state' : 1718
}
#######################
# CONTINUOUS EXAMPLES #
#######################
n_dimensions = X.shape[1] + 1 # +1 for bias term
lb = [-4] * n_dimensions
ub = [4] * n_dimensions
"""Example 1. Continuous PSO without constraint"""
print(f"{'-'*50}\nEXAMPLE 1 - CONTINUOUS PSO WITHOUT CONSTRAINT\n{'-'*50}")
pso = CPSO(n_dimensions=n_dimensions, **kwarg_params)
w_opt, o_opt = pso.optimize(fobj=fobj_lr, lb=lb, ub=ub, fcons=None)
plot_history(pso)
# Print solution
names = [f"f{i}" for i in range(1, X.shape[1] + 1)]
weights = np.round(w_opt, 2).tolist()
sol = f"{weights[0]} + "
for i in range(X.shape[1]):
if (i + 1) % 5 == 0: sol += "\n"
sol += f"{weights[i+1]}*{names[i]}"
if i < X.shape[1]-1: sol += " + "
print(f"\nLinear solution:\n{sol}")
# Sanity check
print("\nSanity check:")
status = np.all((np.array(lb) < w_opt) & (w_opt < np.array(ub)))
print(f"\tall weights within bounds? {status}\n")
# Compare to sklearn logistic regression
auc_pso = np.round(1 - o_opt, 4)
print("Comparison to sklearn:")
print(f"\tsklearn logistic regression AUC = {auc_sk}")
print(f"\tPSO logistic regression AUC = {auc_pso}\n")
"""Example 2. Continuous PSO with constraint"""
print(f"{'-'*50}\nEXAMPLE 2 - CONTINUOUS PSO WITH CONSTRAINT\n{'-'*50}")
pso = CPSO(n_dimensions=n_dimensions, **kwarg_params)
w_opt, o_opt = pso.optimize(fobj=fobj_lr, lb=lb, ub=ub, fcons=fobj_lr_constraint)
plot_history(pso)
# Print solution
names = [f"f{i}" for i in range(1, X.shape[1] + 1)]
weights = np.round(w_opt, 2).tolist()
sol = f"{weights[0]} + "
for i in range(X.shape[1]):
if (i + 1) % 5 == 0: sol += "\n"
sol += f"{weights[i+1]}*{names[i]}"
if i < X.shape[1]-1: sol += " + "
print(f"\nLinear solution:\n{sol}")
print("\nSanity check:")
status = np.all((np.array(lb) < w_opt) & (w_opt < np.array(ub)))
print(f"\tall weights within bounds? {status}")
status = np.all(fobj_lr_constraint(w_opt))
print(f"\tall weights satisfy constraint? {status}\n")
# Compare to sklearn logistic regression
auc_pso = np.round(1 - o_opt, 4)
print("Comparison to sklearn:")
print(f"\tsklearn logistic regression AUC = {auc_sk}")
print(f"\tPSO logistic regression AUC = {auc_pso}\n")
###################
# BINARY EXAMPLES #
###################
n_dimensions = X.shape[1]
lb = [0] * X.shape[1]
ub = [1] * X.shape[1]
"""Example 3. Binary PSO without constraint"""
print(f"{'-'*50}\nEXAMPLE 3 - BINARY PSO WITHOUT CONSTRAINT\n{'-'*50}")
pso = BPSO(n_dimensions=n_dimensions, **kwarg_params)
b_opt, o_opt = pso.optimize(fobj=fobj_fs,
lb=lb,
ub=ub,
fcons=None,
max_iter=5)
plot_history(pso)
# Features selected
print("\nFeatures selected:")
print(f"\tn = {int(np.sum(b_opt))}")
print(f"\tids = {np.where(b_opt)[0]}")
# Compare to performance without feature selection
auc_pso =
|
np.round(1 - o_opt, 4)
|
numpy.round
|
import re
import numpy as np
from lumicks import pylake
import pytest
from lumicks.pylake.kymotracker.detail.calibrated_images import CalibratedKymographChannel
from lumicks.pylake.kymo import EmptyKymo
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup
from .data.mock_confocal import generate_kymo
def test_kymo_properties(h5_file):
f = pylake.File.from_h5py(h5_file)
if f.format_version == 2:
kymo = f.kymos["Kymo1"]
reference_timestamps = np.array([[2.006250e+10, 2.109375e+10, 2.206250e+10, 2.309375e+10],
[2.025000e+10, 2.128125e+10, 2.225000e+10, 2.328125e+10],
[2.043750e+10, 2.146875e+10, 2.243750e+10, 2.346875e+10],
[2.062500e+10, 2.165625e+10, 2.262500e+10, 2.365625e+10],
[2.084375e+10, 2.187500e+10, 2.284375e+10, 2.387500e+10]], np.int64)
assert repr(kymo) == "Kymo(pixels=5)"
with pytest.deprecated_call():
kymo.json
with pytest.deprecated_call():
assert kymo.has_fluorescence
with pytest.deprecated_call():
assert not kymo.has_force
assert kymo.pixels_per_line == 5
assert len(kymo.infowave) == 64
assert kymo.rgb_image.shape == (5, 4, 3)
assert kymo.red_image.shape == (5, 4)
assert kymo.blue_image.shape == (5, 4)
assert kymo.green_image.shape == (5, 4)
np.testing.assert_allclose(kymo.timestamps, reference_timestamps)
assert kymo.fast_axis == "X"
np.testing.assert_allclose(kymo.pixelsize_um, 10/1000)
np.testing.assert_allclose(kymo.line_time_seconds, 1.03125)
np.testing.assert_allclose(kymo.center_point_um["x"], 58.075877109272604)
np.testing.assert_allclose(kymo.center_point_um["y"], 31.978375270573267)
np.testing.assert_allclose(kymo.center_point_um["z"], 0)
np.testing.assert_allclose(kymo.size_um, [0.050])
def test_kymo_slicing(h5_file):
f = pylake.File.from_h5py(h5_file)
if f.format_version == 2:
kymo = f.kymos["Kymo1"]
kymo_reference = np.transpose([[2, 0, 0, 0, 2], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [0, 1, 1, 1, 0]])
assert kymo.red_image.shape == (5, 4)
|
np.testing.assert_allclose(kymo.red_image.data, kymo_reference)
|
numpy.testing.assert_allclose
|
import os
from shutil import *
import random, math
import scipy.misc
import numpy as np
import tensorflow as tf
def clear_duplicated_layers(layers):
layers0 = [layers[0]]
for layer in layers:
if layer.name != layers0[-1].name:
layers0.append(layer)
return layers0
def allocate_gpu(gpu_id=-1, maxLoad=0.1, maxMem=0.5, order='memory'):
if gpu_id == -1:
try:
import common.GPUtil as GPUtil
gpu_id = GPUtil.getFirstAvailable(order=order, maxLoad=maxLoad, maxMemory=maxMem)[0]
except:
gpu_id = 0
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
return gpu_id
def ini_model(sess):
sess.run(tf.global_variables_initializer())
def save_model(saver, sess, checkpoint_dir, step=None):
makedirs(checkpoint_dir)
model_name = "model"
saver.save(sess, checkpoint_dir + model_name, global_step=step)
def load_model(saver, sess, checkpoint_dir):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
return True
else:
return False
from functools import reduce
import operator
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def sigmoid(x):
return (1 / (1 + np.exp(-x)))
def mean(x):
try:
return np.mean(x).__float__()
except:
return 0.
def std(x):
try:
return np.std(x).__float__()
except:
return 0.
def copydir(src, dst):
if os.path.exists(dst):
removedirs(dst)
copytree(src, dst)
def remove(path):
if os.path.exists(path):
os.remove(path)
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def removedirs(path):
if os.path.exists(path):
rmtree(path)
def str_flags(flags):
p = ''
for key in np.sort(list(flags.keys())):
p += str(key) + ':' + str(flags.get(key)._value) + '\n'
return p
def rampup(step, rampup_length):
p = tf.minimum(1.0, tf.cast(step, tf.float32) / rampup_length)
return tf.nn.sigmoid(10.0*(p-0.5)) / sigmoid(5.0)
def save_images(images, size, path):
if images.shape[3] == 1:
images = np.concatenate([images, images, images], 3)
images = np.clip(images, -1.0, 1.0)
return scipy.misc.toimage(merge(images, size), cmin=-1, cmax=1).save(path)
def imread(path, is_grayscale=False):
if (is_grayscale):
return scipy.misc.imread(path, flatten=True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def imresize(image, resize=1):
h, w = image.shape[0], image.shape[1]
img = np.zeros((h * resize, w * resize, image.shape[2]))
for i in range(h * resize):
for j in range(w * resize):
img[i, j] = image[i // resize, j // resize]
return img
def merge(images, size, resize=3):
h, w = images.shape[1] * resize, images.shape[2] * resize
img = np.zeros((h * size[0], w * size[1], images.shape[3]))
assert size[0] * size[1] == images.shape[0]
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = imresize(image, resize)
return img
def center_crop(x, crop_h, crop_w=None, resize_w=64):
h, w = x.shape[:2]
if crop_w is None:
crop_w = crop_h
if crop_h == 0:
crop_h = crop_w = min(h, w)
j = int(round((h - crop_h) / 2.))
i = int(round((w - crop_w) / 2.))
return scipy.misc.imresize(x[j:j + crop_h, i:i + crop_w], [resize_w, resize_w])
def batch_resize(images, newHeight, newWidth):
images_resized = np.zeros([images.shape[0], newHeight, newWidth, 3])
for idx, image in enumerate(images):
if (images.shape[3] == 1):
image = np.concatenate([image, image, image], 2)
images_resized[idx] = scipy.misc.imresize(image, [newHeight, newWidth], 'bilinear')
return images_resized
def clip_truncated_normal(mean, stddev, shape, minval=None, maxval=None):
if minval == None:
minval = mean - 2 * stddev
if maxval == None:
maxval = mean + 2 * stddev
return np.clip(np.random.normal(mean, stddev, shape), minval, maxval)
def collect(X, x, len):
if isinstance(x, np.ndarray):
if x.shape.__len__() == 1:
x = x.reshape((1,) + x.shape)
return x if X is None else np.concatenate([X, x], 0)[-len:]
else:
return [x] if X is None else (X + [x])[-len:]
def get_name(layer_name, cts):
if not layer_name in cts:
cts[layer_name] = 0
name = layer_name + '_' + str(cts[layer_name])
cts[layer_name] += 1
return name
def shuffle_datas(datas):
rand_indexes = np.random.permutation(datas.shape[0])
shuffled_images = datas[rand_indexes]
return shuffled_images
def shuffle_datas_and_labels(datas, labels):
rand_indexes = np.random.permutation(datas.shape[0])
shuffled_images = datas[rand_indexes]
shuffled_labels = labels[rand_indexes]
return shuffled_images, shuffled_labels
def data_gen_random(data, num_sample):
while True:
num_data = len(data)
data_index = np.random.choice(num_data, num_sample, replace=True, p=num_data * [1 / num_data])
yield data[data_index]
def data_gen_epoch(datas, batch_size, func=None, epoch=None):
cur_epoch = 0
while len(datas) < 100 * batch_size:
datas = np.concatenate([datas, datas], axis=0)
while True:
|
np.random.shuffle(datas)
|
numpy.random.shuffle
|
import numpy as np
from eqsig import sdof, functions
from eqsig.exceptions import deprecation
def calc_significant_duration(motion, dt, start=0.05, end=0.95):
"""
Deprecated. Use calc_sig_dur_vals
Parameters
----------
motion
dt
start
end
Returns
-------
"""
deprecation("Use calc_sig_dur_vals()")
return calc_sig_dur_vals(motion, dt, start=start, end=end)
def calc_sig_dur_vals(motion, dt, start=0.05, end=0.95, se=False):
"""
Computes the significant duration using cumulative acceleration according to Trifunac and Brady (1975).
Parameters
----------
motion: array-like
acceleration time series
dt: float
time step
start: float, default=0.05
threshold to start the duration
end: float, default=0.95
threshold to end the duration
se: bool, default=False
If true then return the start and end times
Returns
-------
tuple (start_time, end_time)
"""
cum_acc2 = np.cumsum(motion ** 2)
ind2 = np.where((cum_acc2 > start * cum_acc2[-1]) & (cum_acc2 < end * cum_acc2[-1]))
start_time = ind2[0][0] * dt
end_time = ind2[0][-1] * dt
if se:
return start_time, end_time
return end_time - start_time
def calc_sig_dur(asig, start=0.05, end=0.95, im=None, se=False):
"""
Computes the significant duration according to Trifunac and Brady (1975).
Parameters
----------
asig: eqsig.AccSignal
acceleration time series object
dt: float
time step
start: float, default=0.05
threshold to start the duration
end: float, default=0.95
threshold to end the duration
im: function or None (default=None)
A function that calculates a cumulative intensity measure, if =None, then use eqsig.im.calc_arias_intensity
se: bool, default=False
If true then return the start and end times
Returns
-------
tuple (start_time, end_time)
"""
if im is None:
im_vals = calc_arias_intensity(asig)
else:
im_vals = im(asig)
ind2 = np.where((im_vals > start * im_vals[-1]) & (im_vals < end * im_vals[-1]))
start_time = ind2[0][0] * asig.dt
end_time = ind2[0][-1] * asig.dt
if se:
return start_time, end_time
return end_time - start_time
def calculate_peak(motion):
"""Calculates the peak absolute response"""
deprecation("Use calc_peak instead of calculate_peak")
return max(abs(min(motion)), max(motion))
def calc_peak(motion):
"""Calculates the peak absolute response"""
return max(abs(min(motion)), max(motion))
def calc_sir(acc_sig):
"""
Calculates the shaking intensity rate
ref:
<NAME>., <NAME>., <NAME>., <NAME>., and <NAME>., 2010. Centrifuge testing to
evaluate and mitigate liquefaction-induced building settlement mechanisms,
ASCE Journal of Geotechnical and Geoenv. Eng. 136, 918-929
Parameters
----------
acc_sig: eqsig.AccSignal
acceleration signal
Returns
-------
float
"""
ai_total = acc_sig.arias_intensity
t5, t75 = calc_significant_duration(acc_sig.values, acc_sig.dt)
return 0.7 * ai_total / (t75 - t5)
def _raw_calc_arias_intensity(acc, dt):
from scipy.integrate import cumtrapz
return np.pi / (2 * 9.81) * cumtrapz(acc ** 2, dx=dt, initial=0)
def calc_arias_intensity(acc_sig):
"""
Calculates the Arias Intensity
Parameters
----------
acc_sig: eqsig.AccSignal
Returns
-------
array_like
A time series of the build up of Arias Intensity
"""
return _raw_calc_arias_intensity(acc_sig.values, acc_sig.dt)
def calc_cav(acc_sig):
"""
Calculates the Cumulative Absolute velocity
ref:
Electrical Power Research Institute. Standardization of the Cumulative
Absolute Velocity. 1991. EPRI TR-100082-1'2, Palo Alto, California.
"""
from scipy.integrate import cumtrapz
abs_acc = np.abs(acc_sig.values)
return cumtrapz(abs_acc, dx=acc_sig.dt, initial=0)
def calc_cav_dp(asig):
"""
Calculates standardized cumulative absolute velocity
ref:
<NAME>, <NAME>. Predictive equations for the horizontal component of standardized
cumulative absolute velocity as adapted for use in the shutdown of U.S. nuclear power plants.
Nucl Eng Des 2011;241:2558-69.
:param asig:
:return:
"""
from scipy.integrate import trapz
start = 0
pga_max = 0
cav_dp = 0
points_per_sec = (int(1 / asig.dt))
total_seconds = int(asig.time[-1])
cav_dp_1_series = []
acc_in_g = asig.values / 9.81
for i in range(0, total_seconds):
end = start + points_per_sec
interval_total_time = (start * asig.dt) + 1
interval_time = np.arange(start * asig.dt, interval_total_time, asig.dt)
acc_interval = []
for j in range(start, end + 1):
acc_interval.append(acc_in_g[j])
acc_interval = np.array(acc_interval)
abs_acc_interval = abs(acc_interval)
x_lower = start * asig.dt # the lower limit of x
x_upper = end * asig.dt # the upper limit of x
x_int = interval_time[np.where((x_lower <= interval_time) * (interval_time <= x_upper))]
y_int = np.abs(np.array(abs_acc_interval)[np.where((x_lower <= interval_time) * (interval_time <= x_upper))])
int_acc = trapz(y_int, x_int)
# calculation of pga (g)
pga = (max(abs_acc_interval))
if pga > pga_max:
pga_max = pga
if (pga - 0.025) < 0:
h = 0
elif (pga - 0.025) >= 0:
h = 1
else:
raise ValueError("cannot evaluate pga: {0}".format(pga))
cav_dp = cav_dp + (h * int_acc)
cav_dp_1_series.append(cav_dp)
start = end
t1s = np.arange(total_seconds)
cav_dp_time_series = np.interp(asig.time, t1s, cav_dp_1_series)
return cav_dp_time_series
def calc_isv(acc_sig):
"""
Calculates the integral of the squared velocity
See Kokusho (2013)
:return:
"""
from scipy.integrate import cumtrapz
return cumtrapz(acc_sig.velocity ** 2, dx=acc_sig.dt, initial=0)
def cumulative_response_spectra(acc_signal, fun_name, periods=None, xi=None):
if periods is None:
periods = acc_signal.response_times
else:
periods = np.array(periods)
if xi is None:
xi = 0.05
resp_u, resp_v, resp_a = sdof.response_series(acc_signal.values, acc_signal.dt, periods, xi)
if fun_name == "arias_intensity":
rs = _raw_calc_arias_intensity(resp_a, acc_signal.dt)
else:
raise ValueError
return rs
def calc_max_velocity_period(asig):
from eqsig import AccSignal
periods = np.logspace(-1, 0.3, 100)
new_sig = AccSignal(asig.values, asig.dt)
new_sig.generate_response_spectrum(response_times=periods, xi=0.15)
max_index = np.argmax(new_sig.s_v)
max_period = periods[max_index]
return max_period
def max_acceleration_period(asig):
from eqsig import AccSignal
periods = np.logspace(-1, 1, 100)
new_sig = AccSignal(asig.values, asig.dt)
new_sig.generate_response_spectrum(response_times=periods, xi=0)
max_index = np.argmax(new_sig.s_a)
max_period = periods[max_index]
return max_period
def max_fa_period(asig):
"""Calculates the period corresponding to the maximum value in the Fourier amplitude spectrum"""
max_index = np.argmax(asig.fa_spectrum)
max_period = 1. / asig.fa_frequencies[max_index]
return max_period
def calc_bandwidth_freqs(asig, ratio=0.707):
"""
Lower and upper frequencies of smooth Fourier spectrum bandwidth
Parameters
----------
asig: eqsig.AccSignal
Acceleration time series object
ratio:
ratio of maximum value where bandwidth should be computed
Returns
-------
tuple:
(lower, upper)
"""
fas1_smooth = asig.smooth_fa_spectrum
max_fas1 = max(fas1_smooth)
lim_fas = max_fas1 * ratio
ind2 = np.where(fas1_smooth > lim_fas)
min_freq = asig.smooth_fa_frequencies[ind2[0][0]]
max_freq = asig.smooth_fa_frequencies[ind2[0][-1]]
return min_freq, max_freq
def calc_bandwidth_f_min(asig, ratio=0.707):
"""
Lower frequency of smooth Fourier spectrum bandwidth
Parameters
----------
asig: eqsig.AccSignal
Acceleration time series object
ratio: float
ratio of maximum value where bandwidth should be computed
Returns
-------
float
"""
fas1_smooth = asig.smooth_fa_spectrum
max_fas1 = max(fas1_smooth)
lim_fas = max_fas1 * ratio
ind2 = np.where(fas1_smooth > lim_fas)
min_freq = asig.smooth_fa_frequencies[ind2[0][0]]
return min_freq
def calc_bandwidth_f_max(asig, ratio=0.707):
"""
Upper frequency of smooth Fourier spectrum bandwidth
Parameters
----------
asig: eqsig.AccSignal
Acceleration time series object
ratio: float
ratio of maximum value where bandwidth should be computed
Returns
-------
float
"""
fas1_smooth = asig.smooth_fa_spectrum
max_fas1 = max(fas1_smooth)
lim_fas = max_fas1 * ratio
ind2 = np.where(fas1_smooth > lim_fas)
max_freq = asig.smooth_fa_frequencies[ind2[0][-1]]
return max_freq
def calc_bracketed_duration(asig, threshold):
"""DEPRECATED: Use calc_brac_dur"""
deprecation("Use calc_brac_dur")
return calc_brac_dur(asig, threshold)
def calc_brac_dur(asig, threshold, se=False):
"""
Calculates the Bracketed duration based on some threshold
Parameters
----------
asig: eqsig.AccSignal
Acceleration time series object
threshold: float
acceleration threshold to calculation duration start and end
se: bool, default=False
If true then return the start and end times
Returns
-------
float
"""
abs_motion = abs(asig.values)
time = np.arange(asig.npts) * asig.dt
# Bracketed duration
ind01 = np.where(abs_motion > threshold)
time2 = time[ind01]
try:
if se:
return time2[0], time2[-1]
return time2[-1] - time2[0]
except IndexError:
if se:
return None, None
return 0
def calc_acc_rms(asig, threshold):
"""Root mean squared acceleration"""
abs_motion = abs(asig.values)
# Bracketed duration
ind01 = np.where(abs_motion > threshold)
try:
# rms acceleration in m/s/s
a_rms01 = np.sqrt(1 / asig.t_b01 * np.trapz((asig.values[ind01[0][0]:ind01[0][-1]]) ** 2, dx=asig.dt))
except IndexError:
a_rms01 = 0
return a_rms01
def calc_a_rms(asig, threshold):
"""DEPRECATED"""
raise ValueError('calc_a_rms has been removed, use calc_acc_rms note that threshold changed to m/s2')
def calc_integral_of_abs_velocity(asig):
"""Integral of absolute velocity - identical to cumulative absolute displacement"""
abs_vel = abs(asig.velocity)
vel_int = np.cumsum(abs_vel * asig.dt)
return vel_int
def calc_cumulative_abs_displacement(asig):
"""Cumulative absolute displacement - identical to integral of absolute velocity"""
return calc_integral_of_abs_velocity(asig)
def calc_integral_of_abs_acceleration(asig):
"""Integral of absolute acceleration"""
abs_acc = abs(asig.values)
acc_int = np.cumsum(abs_acc * asig.dt)
return acc_int
def calc_n_cyc_array_w_power_law(values, a_ref, b, cut_off=0.01):
"""
Calculates the equivalent number of uniform amplitude cycles using a power law
Parameters
----------
values: array_like
Time series of values
a_ref: float
Reference uniform amplitude
b: float or array_like
Power law factor
cut_off: float
Low amplitude cutoff value
Returns
-------
array_like
"""
from scipy.interpolate import interp1d
peak_indices = functions.get_switched_peak_array_indices(values)
csr_peaks = np.abs(np.take(values, peak_indices))
csr_peaks = np.where(csr_peaks < cut_off * np.max(abs(values)), 1.0e-14, csr_peaks)
n_ref = 1
perc = 0.5 / (n_ref * (a_ref / csr_peaks)[:, np.newaxis] ** (1 / b))
n_eq = np.cumsum(perc, axis=0)
n_eq = np.insert(n_eq, 0, 0, axis=0)
peak_indices = np.insert(peak_indices, 0, 0, axis=0)
n_eq = np.insert(n_eq, len(n_eq)-1, n_eq[-1], axis=0)
peak_indices = np.insert(peak_indices, len(n_eq)-1, len(values), axis=0)
f = interp1d(peak_indices, n_eq, kind='previous', axis=0)
n_series = f(np.arange(len(values)))
return n_series
def calc_cyc_amp_array_w_power_law(values, n_cyc, b):
"""
Calculate the equivalent uniform loading for a given number of cycles and power coefficient (b)
:param values: array_like
Time series of values
:param n_cyc:
:param b:
:return:
"""
a1_peak_inds_end = functions.get_switched_peak_array_indices(values)
a1_csr_peaks_end = np.abs(np.take(values, a1_peak_inds_end))
csr_peaks_s1 = np.zeros_like(values)
np.put(csr_peaks_s1, a1_peak_inds_end, a1_csr_peaks_end)
csr_n15_series1 = np.cumsum((np.abs(csr_peaks_s1)[:, np.newaxis] ** (1. / b)) / 2 / n_cyc, axis=0) ** b
if not hasattr(b, '__len__'):
return np.reshape(csr_n15_series1, len(values))
return csr_n15_series1
def calc_cyc_amp_gm_arrays_w_power_law(values0, values1, n_cyc, b):
"""
Calculate the geometric mean equivalent uniform amplitude for a given number of cycles and power coefficient (b)
:param values0: array_like
Time series of values
:param values1: array_like
Time series of values in orthogonal direction to values0
:param n_cycles:
:param b:
:return:
"""
csr_n_series0 = calc_cyc_amp_array_w_power_law(values0, n_cyc=n_cyc, b=b)
csr_n_series1 = calc_cyc_amp_array_w_power_law(values1, n_cyc=n_cyc, b=b)
csr_n_series = np.sqrt(csr_n_series0 * csr_n_series1)
return csr_n_series
def calc_cyc_amp_combined_arrays_w_power_law(values0, values1, n_cyc, b):
"""
Computes the equivalent cyclic amplitude.
For a set number of cycles using a power law and assuming both components act equally
Parameters
----------
values0: array_like
Time series of values
values1: array_like
Time series of values in orthogonal direction to values0
n_cyc: int or float
Number of cycles
b: float
Power law factor
Returns
-------
array_like
"""
peak_inds_a0 = functions.get_switched_peak_array_indices(values0)
csr_peaks_a0 = np.abs(np.take(values0, peak_inds_a0))
peak_inds_a1 = functions.get_switched_peak_array_indices(values1)
csr_peaks_a1 = np.abs(np.take(values1, peak_inds_a1))
csr_peaks_s0 = np.zeros_like(values0)
|
np.put(csr_peaks_s0, peak_inds_a0, csr_peaks_a0)
|
numpy.put
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
from matplotlib import rcParams
#rcParams['font.family'] = ['Nimbus Sans L']
import matplotlib.pyplot as plt
import glob
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
import tushare as ts
import datetime
import argparse
import math
import ta
import os, sys, random
import smtplib
import imghdr
from email.message import EmailMessage
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.mime.text import MIMEText
import subprocess
import mplfinance as mpf
stock_index = ['000001.SH']
code2name = dict()
predict_days = 5
api = ts.pro_api(token='<PASSWORD> t<PASSWORD> token')
trading_note = """
本邮件由程序自动发送,勿回复,谢谢!
### 结论
经过这段时间的测试,选出以下指标,其中close为收盘价
- rsi2: 即两日RSI,2周期RSI是一个比较敏感的指标,对短期阶段性的买点判断比较准确,但是对买点判断不准确,只依靠2周期RSI容易卖飞,遇到差行情很容易回撤
- boll_wband20: 20周期的bollinger bands,对于短期趋势的判定很准确,当价格线上穿boll_wband20并且与boll_wband20同趋势的时候,是很强的上升势
- vwap30: 30周期的volume weighted average price,当价格线上穿vwap30并且与vwap30同趋势的时候,是很强的上升势
- kc_wband15: 15周期的keltner channel,当价格线上穿kc_wband15并且与kc_wband15同趋势的时候,是很强的上升势
- macd: 快线5周期,慢线15周期,信号线7周期,当macd线上穿信号线的时候是上升趋势,但是有一定的延时性
- adx15: 15周期average directional movement index, 当+DMI > -DMI的时候是上升势
- trix2: 2周期trix,当trix2上穿价格线并且与价格线同趋势的时候,是很强的上升势
- mi: mass index,当价格线上穿mi并且与mi同趋势的时候,是很强的上升势
- cci5: 5周期的commodity channel index,非常敏感,当cci5 > close并且没有很明显的下降趋势的时候,是上升势
- kst: kst oscillator, 当kst上穿信号线并且同趋势的时候,是很强的上升势,有误判情况
- psar: parabolic stop and reverse,每次价格线上穿psar都是买点
- tsi: true strength index,tsi上穿价格线是很强的上升势
- wr15: 15周期williams percent range,当wr15上穿价格线并持续保持在价格线之上,是上升势
- roc15: 15周期rate of change,当roc15上穿价格线并保持在价格线之上,是上升势
- kama: kaufman's adaptive moving average, 当价格线上穿kama,是上升势
"""
def check_stock_data(name):
files = glob.glob(name)
return (len(files) != 0)
def get_stock_data(name, weekly):
data = pd.DataFrame()
end_date = api.daily().iloc[0]['trade_date']
while True:
if weekly:
tmp = api.weekly(ts_code=name, end_date=end_date)
else:
tmp = ts.pro_bar(ts_code=name, api=api, end_date=end_date, adj='qfq')
print("get data length: %d, end_date: %s last trade day: %s" % (len(tmp), end_date, tmp.iloc[0].trade_date))
end_date = datetime.datetime.strptime(str(tmp.iloc[-1].trade_date), '%Y%m%d')
delta = datetime.timedelta(days=1)
end_date = (end_date - delta).strftime("%Y%m%d")
data = data.append(tmp)
if len(tmp) < 5000:
break
return data
def get_index_data(name, weekly):
today = datetime.date.today().strftime("%Y%m%d")
data = api.index_daily(ts_code=name)
if str(data.iloc[0].trade_date) != today:
print("today's index data is not ready, last trading day is %s" % data.iloc[0].trade_date)
return data
def get_stock_candidates():
today = datetime.date.today().strftime("%Y%m%d")
last_trading_day = api.daily().iloc[0]['trade_date']
if today != last_trading_day:
print("today's stock data is not ready, get stock candidates of %s" % last_trading_day)
df = api.daily_basic(trade_date=last_trading_day)
# 选取量在20w以上的, 价格在5-50之间的
candidates = df[(df.float_share * df.turnover_rate_f > 200000.) & (df.close > 5.) & (df.close < 50.)]["ts_code"].tolist()
return candidates
def get_code_name_map():
global code2name
global api
df = api.stock_basic()
for code, name in zip(df['ts_code'].to_list(), df['name'].to_list()):
code2name[code] = name
df = api.index_basic()
for code, name in zip(df['ts_code'].to_list(), df['name'].to_list()):
code2name[code] = name
def calculate_index(days, K):
global code2name
# days 交易日,最近的在前
last_day = api.daily().iloc[0]['trade_date']
print('last trade day: %s' % last_day)
open_cal = api.trade_cal(is_open='1', end_date=last_day)['cal_date'].to_list()[-1::-1][:days+20]
data = pd.DataFrame()
trade_date_ = []
open_ = []
high_ = []
low_ = []
close_ = []
vol_ = []
amount_ = []
top_K = []
r_top_K = []
w_top_K = []
for day in open_cal:
df = api.daily(trade_date=day)
df2 = api.daily_basic(trade_date=day)
df = df[df.ts_code.isin(df2.ts_code.tolist())]
df = df.sort_values('ts_code').reset_index()
df2 = df2.sort_values('ts_code').reset_index()
df['circ_mv'] = df2['circ_mv']
amount = df.circ_mv.sum()
df['weight'] = df['circ_mv'] / amount * 100
df['open'] = df['open'] * df['weight']
df['high'] = df['high'] * df['weight']
df['low'] = df['low'] * df['weight']
df['close'] = df['close'] * df['weight']
trade_date_.append(day)
open_.append(df.open.sum())
high_.append(df.high.sum())
low_.append(df.low.sum())
close_.append(df.close.sum())
vol_.append(df.vol.sum() / 10000.)
amount_.append(df.amount.sum() / 100000.)
cand = df.sort_values('weight', ascending=False).iloc[:K][['ts_code', 'weight']].to_numpy()
top_ = ["%s%+.3f%%" % (code2name[item[0]], item[1]) for item in cand]
w_top_K.append(top_)
cand = df.sort_values('close', ascending=False).iloc[:K][['ts_code', 'pct_chg']].to_numpy()
top_ = ["%s%+.2f%%" % (code2name[item[0]], item[1]) for item in cand]
top_K.append(top_)
cand = df.sort_values('close', ascending=True)[['ts_code', 'pct_chg']].to_numpy()
temp = []
count = 0
for item in cand:
if item[0] in code2name:
temp.append("%s%+.2f%%" %(code2name[item[0]], item[1]))
count += 1
if count >= K:
break
r_top_K.append(temp)
#time.sleep(0.5)
data['Date'] = trade_date_[-1::-1]
data['Open'] = open_[-1::-1]
data['High'] = high_[-1::-1]
data['Low'] = low_[-1::-1]
data['Close'] = close_[-1::-1]
data['Volume'] = vol_[-1::-1]
data['Amount'] = amount_[-1::-1]
bb = ta.volatility.BollingerBands(close=data['Close'], n=20, ndev=2)
data['BollHBand'] = bb.bollinger_hband()
data['BollLBand'] = bb.bollinger_lband()
data['BollMAvg'] = bb.bollinger_mavg()
return data.iloc[20:], (top_K, r_top_K, w_top_K)
def plot_index(df, top_K, savefile):
df['Date'] = df['Date'].astype('datetime64[ns]')
df = df.set_index('Date')
mc = mpf.make_marketcolors(up='r', down='g', ohlc='white')
style = mpf.make_mpf_style(base_mpf_style='nightclouds', marketcolors=mc)
wconfig = dict()
apdict = mpf.make_addplot(df[['BollHBand', 'BollLBand', 'BollMAvg']])
mpf.plot(df, type='ohlc', volume=True, style=style, title='Stock A Index', return_width_config=wconfig, ylabel='Index', figscale=1.5, tight_layout=True, addplot=apdict, scale_width_adjustment=dict(lines=0.7))
print(wconfig)
plt.savefig(savefile)
plt.close('all')
today = datetime.date.today().strftime("%Y%m%d")
trade_date = api.trade_cal(end_date=today, is_open='1')
print('trade date: %s' % trade_date.iloc[-1]['cal_date'])
print('open: %.2f' % df.iloc[-1]['Open'])
print('high: %.2f' % df.iloc[-1]['High'])
print('low: %.2f' % df.iloc[-1]['Low'])
print('close: %.2f' % df.iloc[-1]['Close'])
print('volume: %.2f万手' % df.iloc[-1]['Volume'])
print('amount: %.2f亿' % df.iloc[-1]['Amount'])
print('percent change: %+.2f%%' % ((df.iloc[-1]['Close'] - df.iloc[-2]['Close']) / df.iloc[-2]['Close'] * 100.))
print("权重占比前十: %s" % ' '.join(top_K[2][0]))
print('指数占比前十: %s' % ' '.join(top_K[0][0]))
print('指数占比倒数前十: %s' % ' '.join(top_K[1][0]))
def add_preday_info(data):
new_data = data.reset_index(drop=True)
extend = pd.DataFrame()
pre_open = []
pre_high = []
pre_low = []
pre_change = []
pre_pct_chg = []
pre_vol = []
pre_amount = []
for idx in range(len(new_data) - 1):
pre_open.append(new_data.iloc[idx + 1].open)
pre_high.append(new_data.iloc[idx + 1].high)
pre_low.append(new_data.iloc[idx + 1].low)
pre_change.append(new_data.iloc[idx + 1].change)
pre_pct_chg.append(new_data.iloc[idx + 1].pct_chg)
pre_vol.append(new_data.iloc[idx + 1].vol)
pre_amount.append(new_data.iloc[idx + 1].amount)
pre_open.append(0.)
pre_high.append(0.)
pre_low.append(0.)
pre_change.append(0.)
pre_pct_chg.append(0.)
pre_vol.append(0.)
pre_amount.append(0.)
new_data['pre_open'] = pre_open
new_data['pre_high'] = pre_high
new_data['pre_low'] = pre_low
new_data['pre_change'] = pre_change
new_data['pre_pct_chg'] = pre_pct_chg
new_data['pre_vol'] = pre_vol
new_data['pre_amount'] = pre_amount
# fill predicting target
days = [[] for i in range(predict_days)]
for idx in range(predict_days - 1, len(new_data)):
for i in range(len(days)):
days[i].append(new_data.iloc[idx - i].pct_chg)
# fill invalid days with 0.
for i in range(len(days)):
for idx in range(predict_days - 1):
days[i].insert(0, 0.)
# extend pandas frame
for i in range(len(days)):
col = "pct_chg%d" % (i + 1)
new_data[col] = days[i]
return new_data
def add_ma_info(data):
new_data = data.reset_index(drop=True)
days = [5, 10, 15, 20, 30, 50, 100, 200]
# add simple ma info
cols = ["sma%d" % d for d in days]
for day, col in zip(days, cols):
new_data[col] = ta.utils.sma(new_data.iloc[-1::-1].close, periods=day)[-1::-1]
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
# add exponential ma info
# scaling = s / (1 + d), s is smoothing, typically 2, d is ma days
# ema(t) = v * scaling + ema(t - 1) * (1 - scaling), v is time(t)'s price
cols = ["ema%d" % d for d in days]
for day, col in zip(days, cols):
new_data[col] = ta.utils.ema(new_data.iloc[-1::-1].close, periods=day)[-1::-1]
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_rsi_info(data):
new_data = data.reset_index(drop=True)
'''
RSI = 100 - 100 / (1 + RS)
RS = average up / average down
average up = sum(up moves) / N
average downn = sum(down moves) / N
'''
# calculate ups and downs
N = [2,3,4,5,6]
cols = ["rsi%d" % n for n in N]
for n, col in zip(N, cols):
new_data[col] = ta.momentum.rsi(new_data.iloc[-1::-1].close, n=n)[-1::-1]
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_crossover_info(data):
# this project is for short-swing trading, so I just
# track 5-day period ema crossover with 10-day, 15-day, 20-day,
# 30-day, 50-day, 100-day, 200-day,
# -1 for breakdowns, 0 for normal, 1 for breakouts
new_data = data.reset_index(drop=True)
tracking_day = 'ema5'
cross_day = ['ema10', 'ema15', 'ema20', 'ema30', 'ema50', 'ema100', 'ema200']
cross_cols = ['cross5-10', 'cross5-15', 'cross5-20', 'cross5-30', 'cross5-50', 'cross5-100', 'cross5-200']
for ema, cross_col in zip(cross_day, cross_cols):
prestatus = 0
if new_data.iloc[-2][tracking_day] >= new_data.iloc[-2][ema]:
prestatus = 1
else:
prestatus = -1
crossover = []
crossover.append(prestatus)
for idx in range(len(new_data) - 2, -1, -1):
if prestatus == -1:
if new_data.iloc[idx][tracking_day] >= new_data.iloc[idx][ema]:
crossover.append(1)
prestatus = 1
else:
crossover.append(0)
elif prestatus == 1:
if new_data.iloc[idx][tracking_day] >= new_data.iloc[idx][ema]:
crossover.append(0)
else:
crossover.append(-1)
prestatus = -1
new_data[cross_col] = crossover[-1::-1]
precross_cols = ['pre_cross5-10', 'pre_cross5-15', 'pre_cross5-20', 'pre_cross5-30', 'pre_cross5-50', 'pre_cross5-100', 'pre_cross5-200']
for cross_col, precross_col in zip(cross_cols, precross_cols):
vals = new_data.iloc[1:][cross_col].tolist()
vals.append(0)
new_data[precross_col] = vals
return new_data
def add_long_crossover_info(data):
# add 50-day 100-day crossover info, I think
# it is not important for short-swing trading,
# but sometimes it happens, just add this feature
new_data = data.reset_index(drop=True)
tracking_day = 'ema50'
cross_day = ['ema100']
cross_cols = ['longcross']
for ema, cross_col in zip(cross_day, cross_cols):
prestatus = 0
if new_data.iloc[-2][tracking_day] >= new_data.iloc[-2][ema]:
prestatus = 1
else:
prestatus = -1
crossover = []
crossover.append(prestatus)
for idx in range(len(new_data) - 2, -1, -1):
if prestatus == -1:
if new_data.iloc[idx][tracking_day] >= new_data.iloc[idx][ema]:
crossover.append(1)
prestatus = 1
else:
crossover.append(0)
elif prestatus == 1:
if new_data.iloc[idx][tracking_day] >= new_data.iloc[idx][ema]:
crossover.append(0)
else:
crossover.append(-1)
prestatus = -1
new_data[cross_col] = crossover[-1::-1]
precross_cols = ['pre_longcross']
for cross_col, precross_col in zip(cross_cols, precross_cols):
vals = new_data.iloc[1:][cross_col].tolist()
vals.append(0)
new_data[precross_col] = vals
return new_data
def add_bollinger_band_info(data):
new_data = data.reset_index(drop=True)
#N = [20, 14, 12, 10, 5, 4, 3, 2]
N = [20, 10]
for n in N:
bb = ta.volatility.BollingerBands(close=new_data.iloc[-1::-1].close, n=n, ndev=2)
col = 'boll_hband%d' % n
new_data[col] = bb.bollinger_hband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_lband%d' % n
new_data[col] = bb.bollinger_lband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_hband_ind%d' % n
new_data[col] = bb.bollinger_hband_indicator()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_lband_ind%d' % n
new_data[col] = bb.bollinger_lband_indicator()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_mavg%d' % n
new_data[col] = bb.bollinger_mavg()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_pband%d' % n
new_data[col] = bb.bollinger_pband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_wband%d' % n
new_data[col] = bb.bollinger_wband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
return new_data
def add_obv_info(data):
new_data = data.reset_index(drop=True)
obv = ta.volume.OnBalanceVolumeIndicator(close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol)
new_data['obv'] = obv.on_balance_volume()
temp = new_data.iloc[1:]['obv'].tolist()
temp.append(np.nan)
new_data['pre_obv'] = temp
return new_data
def add_adi_info(data):
new_data = data.reset_index(drop=True)
adi = ta.volume.AccDistIndexIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol)
new_data['adi'] = adi.acc_dist_index()
temp = new_data.iloc[1:]['adi'].tolist()
temp.append(np.nan)
new_data['pre_adi'] = temp
return new_data
def add_cmf_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
cmf = ta.volume.ChaikinMoneyFlowIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol, n=day)
col = "cmf%d" % day
new_data[col] = cmf.chaikin_money_flow()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_fi_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
fi = ta.volume.ForceIndexIndicator(close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol, n=day)
col = "fi%d" % day
new_data[col] = fi.force_index()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_eom_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
eom = ta.volume.EaseOfMovementIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, volume=new_data.iloc[-1::-1].vol, n=day)
col = "eom%d" % day
new_data[col] = eom.ease_of_movement()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "sma_eom%d" % day
new_data[col] = eom.sma_ease_of_movement()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_vpt_info(data):
new_data = data.reset_index(drop=True)
vpt = ta.volume.VolumePriceTrendIndicator(close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol)
col = "vpt"
new_data[col] = vpt.volume_price_trend()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_nvi_info(data):
new_data = data.reset_index(drop=True)
nvi = ta.volume.NegativeVolumeIndexIndicator(close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol)
col = "nvi"
new_data[col] = nvi.negative_volume_index()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_vwap_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
vwap = ta.volume.VolumeWeightedAveragePrice(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, volume=new_data.iloc[-1::-1].vol, close=new_data.iloc[-1::-1].close, n=day)
col = "vwap%d" % day
new_data[col] = vwap.volume_weighted_average_price()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_atr_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30,14]
for day in days:
atr = ta.volatility.AverageTrueRange(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "atr%d" % day
new_data[col] = atr.average_true_range()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_kc_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
kc = ta.volatility.KeltnerChannel(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "kc_mband%d" % day
new_data[col] = kc.keltner_channel_mband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "kc_pband%d" % day
new_data[col] = kc.keltner_channel_pband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "kc_wband%d" % day
new_data[col] = kc.keltner_channel_wband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_dc_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
dc = ta.volatility.DonchianChannel(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "dc_mband%d" % day
new_data[col] = dc.donchian_channel_mband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "dc_pband%d" % day
new_data[col] = dc.donchian_channel_pband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "dc_wband%d" % day
new_data[col] = dc.donchian_channel_wband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_macd_info(data):
new_data = data.reset_index(drop=True)
macd = ta.trend.MACD(close=new_data.iloc[-1::-1].close, n_slow=15, n_fast=5, n_sign=7)
col = "macd"
new_data[col] = macd.macd()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "macd_diff"
new_data[col] = macd.macd_diff()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "macd_signal"
new_data[col] = macd.macd_signal()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_adx_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30,14,10]
for day in days:
adx = ta.trend.ADXIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "adx%d" % day
new_data[col] = adx.adx()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "adx_neg%d" % day
new_data[col] = adx.adx_neg()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "adx_pos%d" % day
new_data[col] = adx.adx_pos()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_vi_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
vi = ta.trend.VortexIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "vi_diff%d" % day
new_data[col] = vi.vortex_indicator_diff()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "vi_neg%d" % day
new_data[col] = vi.vortex_indicator_neg()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "vi_pos%d" % day
new_data[col] = vi.vortex_indicator_pos()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_trix_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
trix = ta.trend.TRIXIndicator(close=new_data.iloc[-1::-1].close, n=day)
col = "trix%d" % day
new_data[col] = trix.trix()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_mi_info(data):
new_data = data.reset_index(drop=True)
mi = ta.trend.MassIndex(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low)
col = "mi"
new_data[col] = mi.mass_index()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_cci_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
cci = ta.trend.CCIIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "cci%d" % day
new_data[col] = cci.cci()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_dpo_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
dpo = ta.trend.DPOIndicator(close=new_data.iloc[-1::-1].close, n=day)
col = "dpo%d" % day
new_data[col] = dpo.dpo()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_kst_info(data):
new_data = data.reset_index(drop=True)
kst = ta.trend.KSTIndicator(close=new_data.iloc[-1::-1].close)
col = "kst"
new_data[col] = kst.kst()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "kst_diff"
new_data[col] = kst.kst_diff()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "kst_sig"
new_data[col] = kst.kst_sig()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_ichimoku_info(data):
new_data = data.reset_index(drop=True)
ichimoku = ta.trend.IchimokuIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low)
col = "ichimoku_a"
new_data[col] = ichimoku.ichimoku_a()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "ichimoku_b"
new_data[col] = ichimoku.ichimoku_b()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "ichimoku_base"
new_data[col] = ichimoku.ichimoku_base_line()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "ichimoku_conv"
new_data[col] = ichimoku.ichimoku_conversion_line()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_psar_info(data):
new_data = data.reset_index(drop=True)
psar = ta.trend.PSARIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close)
col = "psar"
new_data[col] = psar.psar()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "psar_down"
new_data[col] = psar.psar_down()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "psar_down_idc"
new_data[col] = psar.psar_down_indicator()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "psar_up"
new_data[col] = psar.psar_up()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "psar_up_idc"
new_data[col] = psar.psar_up_indicator()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_tsi_info(data):
new_data = data.reset_index(drop=True)
tsi = ta.momentum.TSIIndicator(close=new_data.iloc[-1::-1].close)
col = "tsi"
new_data[col] = tsi.tsi()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_uo_info(data):
new_data = data.reset_index(drop=True)
uo = ta.momentum.UltimateOscillator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close)
col = "uo"
new_data[col] = uo.uo()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_so_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
so = ta.momentum.StochasticOscillator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "stoch%d" % day
new_data[col] = so.stoch()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "stoch_signal%d" % day
new_data[col] = so.stoch_signal()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_wr_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
wr = ta.momentum.WilliamsRIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, lbp=day)
col = "wr%d" % day
new_data[col] = wr.wr()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_ao_info(data):
new_data = data.reset_index(drop=True)
ao = ta.momentum.AwesomeOscillatorIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low)
col = "ao"
new_data[col] = ao.ao()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_kama_info(data):
new_data = data.reset_index(drop=True)
kama = ta.momentum.KAMAIndicator(close=new_data.iloc[-1::-1].close)
col = "kama"
new_data[col] = kama.kama()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_roc_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
roc = ta.momentum.ROCIndicator(close=new_data.iloc[-1::-1].close, n=day)
col = "roc%d" % day
new_data[col] = roc.roc()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_dr_info(data):
new_data = data.reset_index(drop=True)
dr = ta.others.DailyReturnIndicator(close=new_data.iloc[-1::-1].close)
col = "dr"
new_data[col] = dr.daily_return()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_dlr_info(data):
new_data = data.reset_index(drop=True)
dlr = ta.others.DailyLogReturnIndicator(close=new_data.iloc[-1::-1].close)
col = "dlr"
new_data[col] = dlr.daily_log_return()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_cr_info(data):
new_data = data.reset_index(drop=True)
cr = ta.others.CumulativeReturnIndicator(close=new_data.iloc[-1::-1].close)
col = "cr"
new_data[col] = cr.cumulative_return()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_mfi_info(data):
new_data = data.reset_index(drop=True)
mfi = ta.volume.MFIIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol, n=5)
col = "mfi"
new_data[col] = mfi.money_flow_index()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
# support and resistance
def add_sr_info(data):
new_data = data.reset_index(drop=True)
v = new_data['vol'].to_numpy()
p = new_data['close'].to_numpy()
h = new_data['high'].to_numpy()
l = new_data['low'].to_numpy()
avg = (p + h + l) / 3.
sr = [v * p for v, p in zip(v, avg)]
new_data['sup_res'] = sr
boll = ta.volatility.BollingerBands(close=new_data.iloc[-1::-1]['sup_res'])
new_data['sup_res_h'] = boll.bollinger_hband()
new_data['sup_res_l'] = boll.bollinger_lband()
new_data['sup_res_mavg'] = boll.bollinger_mavg()
new_data['sup_res_p'] = boll.bollinger_pband()
return new_data
def add_features(data):
new_data = data.reset_index(drop=True)
# previous day info
#new_data = add_preday_info(new_data)
# moving average info
new_data = add_ma_info(new_data)
# rsi info
new_data = add_rsi_info(new_data)
# crossover of moving average
#new_data = add_crossover_info(new_data)
# long crossover of moving average
#new_data = add_long_crossover_info(new_data)
# bollinger bands
new_data = add_bollinger_band_info(new_data)
# on-balance volume
#new_data = add_obv_info(new_data)
# accumulation/distribution index
#new_data = add_adi_info(new_data)
# chaikin money flow
#new_data = add_cmf_info(new_data)
# force index
#new_data = add_fi_info(new_data)
# ease of movement
#new_data = add_eom_info(new_data)
# volume price trend
#new_data = add_vpt_info(new_data)
# negative volume index
#new_data = add_nvi_info(new_data)
# volume weighted average price
#new_data = add_vwap_info(new_data)
# average true range
#new_data = add_atr_info(new_data)
# keltner channel
#new_data = add_kc_info(new_data)
# donchian channel
#new_data = add_dc_info(new_data)
# moving average convergence divergence
#new_data = add_macd_info(new_data)
# average directional movement index
new_data = add_adx_info(new_data)
# vortex indicator
#new_data = add_vi_info(new_data)
# trix indicator
#new_data = add_trix_info(new_data)
# mass index
#new_data = add_mi_info(new_data)
# commodity channel index
#new_data = add_cci_info(new_data)
# detrended price oscillator
#new_data = add_dpo_info(new_data)
# kst oscillator
#new_data = add_kst_info(new_data)
# ichimoku kinko hyo
#new_data = add_ichimoku_info(new_data)
# parabolic stop and reverse
new_data = add_psar_info(new_data)
# true strength index
#new_data = add_tsi_info(new_data)
# ultimate oscillator
#new_data = add_uo_info(new_data)
# stochastic oscillator
#new_data = add_so_info(new_data)
# williams %R
#new_data = add_wr_info(new_data)
# awesome oscillator
#new_data = add_ao_info(new_data)
# kaufman's adaptive moving average
#new_data = add_kama_info(new_data)
# rate of change
#new_data = add_roc_info(new_data)
# daily return
#new_data = add_dr_info(new_data)
# daily log return
#new_data = add_dlr_info(new_data)
# cumulative return
#new_data = add_cr_info(new_data)
# money flow index
#new_data = add_mfi_info(new_data)
# support and resistance
new_data = add_sr_info(new_data)
return new_data
def plot_data(data, days, close, cols, filename, stock):
x = [i for i in range(days)]
count = 0
plt.figure()
fig, ax = plt.subplots(len(cols), figsize=[6.4 * 3, 4 * len(cols)])
if not isinstance(ax, np.ndarray):
ax = [ax]
for col in cols:
if 'ema' in col or 'boll_band' in col or 'pct_chg' in col:
vals2 = data.iloc[0:days].iloc[-1::-1][close].to_numpy()
vals3 = data.iloc[0:days].iloc[-1::-1]['ema5'].to_numpy()
sns.lineplot(x=x, y=vals3, ax=ax[count])
sns.lineplot(x=x, y=vals2, ax=ax[count])
elif 'vol' in col:
vals = data.iloc[0:days].iloc[-1::-1]['vol'].to_numpy()
sns.lineplot(x=x, y=vals, ax=ax[count])
elif 'gap' in col:
vals2 = data.iloc[0:days].iloc[-1::-1][close].to_numpy()
vals2 = StandardScaler().fit_transform(vals2.reshape(-1, 1)).flatten()
sns.lineplot(x=x, y=vals2, ax=ax[count])
vals = data.iloc[0:days].iloc[-1::-1]['vol'].to_numpy()
vals = StandardScaler().fit_transform(vals.reshape(-1, 1)).flatten()
sns.lineplot(x=x, y=vals, ax=ax[count])
max_ = max([np.amax(vals2), np.amax(vals)])
min_ = min([np.amin(vals2),
|
np.amin(vals)
|
numpy.amin
|
import numpy as np
import cv2
try:
from picamera import PiCamera
from picamera.array import PiRGBArray
CAMERA_ACTIVE = True
except ModuleNotFoundError:
CAMERA_ACTIVE = False
import pickle
import glob
import os
import time
import peakutils.peak
from IPython.display import Image
def show_image(img):
cv2.imwrite('testimage.png',img)
display(Image(filename="testimage.png"))
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
class robo_camera:
def __init__(self,resolution="1920x1080",shutter_speed=2000,camera_matrix="camera_matrix.pckl"):
self.mtx = None
self.dist = None
self.cam_calibrated = False
if(camera_matrix is not None):
pickfle = open(camera_matrix, "rb")
mtx,dist = pickle.load(pickfle)
self.mtx = mtx
self.dist = dist
self.cam_calibrated = True
if(CAMERA_ACTIVE):
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.shutter_speed = 2000
def init_blob_detector(self,mint=130,maxt=500,mina=27,maxa=130,mincir=0.9,mincon=0.95,minin=.8):
#################BLOB DETECTOR###############
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = mint
params.maxThreshold = maxt
# Filter by Area.
params.filterByArea = True
params.minArea = mina
params.maxArea = maxa
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = mincir
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = mincon
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = minin
# Create a detector with the parameters
detector = cv2.SimpleBlobDetector_create(params)
return detector
def detect_colonies(self,impath,image_transform,vector_transform,display_image=False,save_image=True,\
mint=130,maxt=500,mina=27,maxa=130,mincir=0.9,mincon=0.95,minin=.8):
img = cv2.imread(os.path.join('.','pictures',impath))
x = time.localtime()
tstamp = str(x.tm_year)[2:]+str(x.tm_mon)+str(x.tm_mday)+str(x.tm_hour)+str(x.tm_min)
colonypick_path = os.path.join('.','pictures',str(tstamp+'_picked_'+impath))
#this next part warps the image so that it is rectified and in the middle of the frame
transform = image_transform
warp = cv2.warpPerspective(img,transform,(500,550)) #transforming the calibrated image
warp_gray = cv2.cvtColor(warp,cv2.COLOR_BGR2GRAY)
#making a mask for colony finding
warp_empty = np.zeros(shape=[550, 500, 1], dtype=np.uint8)
center_coordinates = (230, 250)
axesLength = (170, 140)
warp_colony_mask = cv2.ellipse(warp_empty,center_coordinates,axesLength,0,0,360,255,-1)
warp_gray_inv = 255-warp_gray
warp_blur = cv2.GaussianBlur(warp_gray_inv,(101,101),50,borderType=cv2.BORDER_DEFAULT)
#background subtraction
colony_nobg = cv2.subtract(warp_gray_inv,warp_blur)
colony_crop = (255-cv2.bitwise_and(colony_nobg, colony_nobg, mask=warp_colony_mask))*4
#colony_crop = colony_crop.convertTo(colony_crop,cv2.CV_8U)
maxval = np.max(colony_crop) #maximum pixel value in the image
minval = np.min(colony_crop) #minimum pixel value in the image
gaus = cv2.adaptiveThreshold(colony_crop, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 91, 12)
gaus_eroded = cv2.dilate(gaus,np.ones([3,3]),iterations=1)
gaus_eroded = cv2.erode(gaus_eroded,np.ones([3,3]),iterations=1)
#gaus_eroded = gaus
#cv2.imwrite('testimage.png',gaus_eroded)
#display(Image('testimage.png'))
detector = self.init_blob_detector(mint=mint,maxt=maxt,mina=mina,maxa=maxa,mincir=mincir,mincon=mincon,minin=minin)
#blob detection
keypoints = detector.detect(gaus_eroded)
#print(keypoints)
print(len(keypoints))
warp_with_keypoints = cv2.drawKeypoints(warp, keypoints,\
np.array([]), 255, cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
if(save_image):
cv2.imwrite(colonypick_path, warp_with_keypoints)
if(display_image):
display(Image(filename=colonypick_path))
elif(display_image):
cv2.imwrite("testimage.png", warp_with_keypoints)
display(Image(filename="testimage.png"))
imstripe = warp[30:110, 215:235]
imstripe_gray = cv2.cvtColor(imstripe, cv2.COLOR_BGR2GRAY)
im_trace = np.mean(imstripe_gray, axis=1)
im_deriv = smooth(-np.gradient(im_trace, 10), 4)
im_base = peakutils.baseline(im_deriv, 2)
im_deriv_debased = im_deriv-im_base
highs = peakutils.peak.indexes(
im_deriv_debased,
thres=1/max(im_deriv_debased), min_dist=20
)
filteredhighs = []
for high in highs:
if(high >15 and high < 70):
filteredhighs += [high]
high_data = []
for high in filteredhighs:
yval = im_deriv_debased[high]
high_data += [[yval,high]]
#plt.plot([a[1] for a in high_data],[a[0] for a in high_data],'o')
if(len(filteredhighs) != 2):
agar_to_rim_distance = 34 #default assumption
else:
agar_to_rim_distance = filteredhighs[1]-filteredhighs[0]
#print("agar to rim distance is "+str(agar_to_rim_distance))
return keypoints
def capture(self,savepath=None):
if(not CAMERA_ACTIVE):
return None
rawCapture = PiRGBArray(self.camera)
self.camera.capture(rawCapture,format="bgr")
img = rawCapture.array
if(self.cam_calibrated):
#undistort the camera if it is calibrated
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(self.mtx,self.dist,\
(w,h),1,(w,h))
dst = cv2.undistort(img, self.mtx, self.dist, None, newcameramtx)
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
img = dst
if(savepath is not None):
#save it if that is desired
cv2.imwrite(savepath,img)
return img
def mask_lit_background(self,bg_image = None):
if(bg_image is None):
bg_image = self.capture()
bgimg_gray = cv2.cvtColor(bg_image,cv2.COLOR_BGR2GRAY)
#bg_subtractor = cv2.createBackgroundSubtractorMOG2()
#fgmask = bg_subtractor.apply(bgimg_gray) #the first image is the background
#kernel = np.ones((5,5), np.uint8)
kernel = np.array([[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0]], dtype=np.uint8)
ret,thresh = cv2.threshold(bgimg_gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
thresh = cv2.erode(thresh,kernel,iterations=1) #get rid of noise
thresh = cv2.dilate(thresh,kernel,iterations=2) #fill up the holes
thresh = cv2.erode(thresh,kernel,iterations=10) #make it smaller
return thresh, bgimg_gray #bg_subtractor
def detect_needle(self,bg_image=None,thresh=None,bg_subtractor=None,needle_image=None):
"""detects a needle on top of a lit backdrop"""
#setup background subtraction
kernel = np.ones((3,3), np.uint8)
if(bg_image is None):
assert((thresh is not None) and (bg_subtractor is not None))
else:
thresh,bg_subtractor = self.mask_lit_background(bg_image)
#show_image(bg_subtractor)
#cv2.imshow("",bg_subtractor)
#now the mask which represents the table is called "thresh"
if(needle_image is None):
needle_image = self.capture()
gray = 255-cv2.cvtColor(needle_image,cv2.COLOR_BGR2GRAY)
bgmask = cv2.subtract(gray,255-bg_subtractor)
#show_image(bgmask)
bgmask = cv2.erode(bgmask,kernel,iterations=1) #erode a bit to get rid of junk
#bgmask = cv2.dilate(bgmask,kernel,iterations=1) #erode a bit to get rid of junk
ret,binary_needlepic = cv2.threshold(bgmask,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
outputvalues = cv2.findContours(binary_needlepic, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#following is for compatibility with the raspberry pi version of opencv
if(len(outputvalues)== 3):
contours = outputvalues[1]
elif(len(outputvalues)==2):
contours = outputvalues[0]
biggest_contour = np.zeros((binary_needlepic.shape[0],binary_needlepic.shape[1],1),np.uint8)
newcontours = []
area_thresh = 100
for contour in contours:
carea = cv2.contourArea(contour)
if(carea > area_thresh):
newcontours += [contour]
cv2.drawContours(biggest_contour, newcontours, -1, (255, 255, 255), -1)
result = cv2.bitwise_and(thresh,biggest_contour)
corners = cv2.goodFeaturesToTrack(biggest_contour, 1, .1, 10,mask=thresh,useHarrisDetector=True)
corners = np.int0(corners)
assert(len(corners[0])==1)
return corners[0][0]
def needle_jog_calibration(self,bgimg_path,needlepath_dict):
"""performs the calibration that is required for rectifying an image given four pictures of the colony picking needle in different positions..
bgimg_path: path to an image of the background with no needle
needlepath_dict: a dictionary containing a needle image path as the key with the corresponding x,y,z coordinates as the value."""
bgimg = cv2.imread(bgimg_path)
thresh,bg_subtractor = self.mask_lit_background(bgimg)
needlepoints = {}
needle_positions = []
for needle_image in needlepath_dict:
needle_pos = tuple(needlepath_dict[needle_image])
needle_positions += [needle_pos]
img = cv2.imread(needle_image)
npoint = self.detect_needle(thresh = thresh,\
bg_subtractor=bg_subtractor,needle_image=img)
needlepoints[needle_pos]= npoint
needle_xs = [a[0] for a in needle_positions]
needle_ys = [a[1] for a in needle_positions]
#needle_zs = [a[2] for a in needle_positions]
sorted_positions = [
(max(needle_xs),min(needle_ys)),
(max(needle_xs),max(needle_ys)),
(min(needle_xs),max(needle_ys)),
(min(needle_xs),min(needle_ys)),
]
sorted_needlepoint = []
for sorted_pos in sorted_positions:
sorted_needlepoint += [needlepoints[sorted_pos]]
squarelist = [[0,200],[150,200],[150,50],[0,50]]
square = np.array(squarelist,np.float32)+150
trapezoid = np.array(sorted_needlepoint,np.float32)
transform = cv2.getPerspectiveTransform(trapezoid,square) #the transformation matrix
square_2d = square
#robo_x = rm.colonyPicker.load_posfile(posfilename="robot_positions.csv")["needle_pos"]["backlit_plate"]["X"]
#robo_y = rm.colonyPicker.load_posfile(posfilename="robot_positions.csv")["needle_pos"]["backlit_plate"]["Y"]
robot_square =
|
np.array(needle_positions,np.float32)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""Testing :mod:`~discO.utils.random`."""
__all__ = [
"Test_NumpyRNGContext",
]
##############################################################################
# IMPORTS
# THIRD PARTY
import numpy as np
import pytest
# PROJECT-SPECIFIC
from discO.tests.helper import ObjectTest
from discO.utils import random
##############################################################################
# PARAMETERS
##############################################################################
# TESTS
##############################################################################
class Test_NumpyRNGContext(ObjectTest, obj=random.NumpyRNGContext):
#######################################################
# Method tests
def test___init__(self):
"""Test method ``__init__``."""
# seed None
obj = self.obj(None)
assert obj.seed is None
# int
obj = self.obj(2)
assert obj.seed == 2
# RandomState
obj = self.obj(np.random.RandomState(3))
name1, state1, *rest1 = obj.seed.get_state()
name2, state2, *rest2 = np.random.RandomState(3).get_state()
assert name1 == name2
assert all(np.equal(state1, state2))
assert rest1 == rest2
# Generator
obj = self.obj(np.random.default_rng(3))
assert (
obj.seed.__getstate__() == np.random.default_rng(3).__getstate__()
)
# /def
def test___enter__(self):
"""Test method ``__enter__``."""
# seed None
with self.obj(None):
ns = np.random.rand(5)
assert len(ns) == 5
# int
with self.obj(2):
ns = np.random.rand(5)
assert np.allclose(
ns,
np.array(
[0.4359949, 0.02592623, 0.54966248, 0.43532239, 0.4203678],
),
)
# RandomState
with self.obj(np.random.RandomState(3)):
ns = np.random.rand(5)
assert np.allclose(
ns,
np.array(
[
0.5507979,
0.70814782,
0.29090474,
0.51082761,
0.89294695,
],
),
)
# Generator
with pytest.warns(UserWarning):
with self.obj(
|
np.random.default_rng(3)
|
numpy.random.default_rng
|
from dataiku.customrecipe import *
import dl_image_toolbox_utils as utils
from sklearn.model_selection import train_test_split
from keras import optimizers, initializers, metrics, regularizers
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.layers import Dropout, Dense
from keras.models import Model
from keras.utils.training_utils import multi_gpu_model
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import pandas as pd
import constants
import math
import shutil
import numpy as np
import dataiku
###################################################################################################################
## LOADING ALL REQUIRED INFO AND
## SETTING VARIABLES
###################################################################################################################
# Recipe config
recipe_config = get_recipe_config()
should_use_gpu = recipe_config.get('should_use_gpu', False)
list_gpu = recipe_config["list_gpu"]
gpu_allocation = recipe_config["gpu_allocation"]
train_ratio = float(recipe_config["train_ratio"])
input_shape = (int(recipe_config["image_width"]),int(recipe_config["image_height"]),3)
batch_size = int(recipe_config["batch_size"])
optimizer = recipe_config["model_optimizer"]
learning_rate = recipe_config["model_learning_rate"]
custom_params_opti = recipe_config.get("model_custom_params_opti", [])
nb_epochs = int(recipe_config["nb_epochs"])
nb_steps_per_epoch = int(recipe_config["nb_steps_per_epoch"])
nb_validation_steps = int(recipe_config["nb_validation_steps"])
data_augmentation = recipe_config["data_augmentation"]
n_augmentation = int(recipe_config["n_augmentation"])
custom_params_data_augment = recipe_config.get("model_custom_params_data_augmentation", [])
tensorboard = recipe_config["tensorboard"]
random_seed = int(recipe_config["random_seed"])
# gpu
gpu_options = utils.load_gpu_options(should_use_gpu, list_gpu, gpu_allocation)
n_gpu = gpu_options.get("n_gpu", 0)
# Folders
image_folder_input_name = get_input_names_for_role('image_folder')[0]
image_folder = dataiku.Folder(image_folder_input_name)
utils.check_managed_folder_filesystem(image_folder)
image_folder_path = image_folder.get_path()
model_folder_input_name = get_input_names_for_role('model_folder')[0]
model_folder = dataiku.Folder(model_folder_input_name)
utils.check_managed_folder_filesystem(model_folder)
model_folder_path = model_folder.get_path()
output_model_folder_name = get_output_names_for_role('model_output')[0]
output_model_folder = dataiku.Folder(output_model_folder_name)
utils.check_managed_folder_filesystem(output_model_folder)
output_model_folder_path = output_model_folder.get_path()
# Label Dataset : Keeping only the two relevant columns
label_dataset_input_name = get_input_names_for_role('label_dataset')[0]
label_dataset = dataiku.Dataset(label_dataset_input_name)
renaming_mapping = {
recipe_config["col_filename"]: constants.FILENAME,
recipe_config["col_label"]: constants.LABEL
}
label_df = label_dataset.get_dataframe().rename(columns=renaming_mapping)[renaming_mapping.values()]
# Model config
model_config = utils.get_config(model_folder_path)
###################################################################################################################
## BUILD TRAIN/TEST SETS
###################################################################################################################
df_train, df_test = train_test_split(label_df, stratify=label_df[constants.LABEL], train_size=train_ratio, random_state=random_seed)
labels = list(np.unique(label_df[constants.LABEL]))
n_classes = len(labels)
###################################################################################################################
## LOAD MODEL
###################################################################################################################
# Loading pre-trained model
def load_model_and_apply_recipe_params(model_folder_path, input_shape, n_classes, recipe_config):
pooling = recipe_config["model_pooling"]
reg = recipe_config["model_reg"]
dropout = float(recipe_config["model_dropout"])
model_and_pp = utils.load_instantiate_keras_model_preprocessing(model_folder_path, goal=constants.RETRAINING,
input_shape=input_shape,
pooling=pooling,
reg=reg,
dropout=dropout,
n_classes=n_classes)
model = model_and_pp["model"]
preprocessing = model_and_pp["preprocessing"]
model_params = model_and_pp["model_params"]
# CHOOSING LAYER TO RETRAIN
layer_to_retrain = recipe_config["layer_to_retrain"]
print("Will Retrain layer(s) with mode: {}".format(layer_to_retrain))
if layer_to_retrain == "all" :
for lay in model.layers :
lay.trainable = True
elif layer_to_retrain == "last" :
for lay in model.layers[:-1] :
lay.trainable = False
lay = model.layers[-1]
lay.trainable = True
elif layer_to_retrain == "n_last" :
n_last = int(recipe_config["layer_to_retrain_n"])
for lay in model.layers[:-n_last] :
lay.trainable = False
for lay in model.layers[-n_last:] :
lay.trainable = True
model.summary()
return model, preprocessing, model_params
if should_use_gpu and n_gpu > 1:
with tf.device('/cpu:0'):
base_model, preprocessing, model_params = load_model_and_apply_recipe_params(model_folder_path, input_shape, n_classes, recipe_config)
model = multi_gpu_model(base_model, n_gpu)
else:
model, preprocessing, model_params = load_model_and_apply_recipe_params(model_folder_path, input_shape, n_classes, recipe_config)
###################################################################################################################
## BUILD GENERATORS
## Info: Generators must loop infinitely, each loop yielding the batches of preprocessed data.
## It will be used at each epoch, hence the infinite loop.
###################################################################################################################
@utils.threadsafe_generator
def augmentation_generator(df_imgs, image_folder_path, batch_size, n_augmentation, input_shape, labels, preprocessing, TrainImageGen):
nb_imgs = df_imgs.shape[0]
batch_size_adapted = int( batch_size / n_augmentation )
nb_batch = int(math.ceil( nb_imgs * 1.0 / batch_size_adapted ))
while True:
for num_batch in range(nb_batch):
df_imgs_batch = df_imgs.iloc[num_batch * batch_size_adapted : (num_batch + 1) * batch_size_adapted, :]
nb_imgs_batch = df_imgs_batch.shape[0]
X_batch_list = []
y_batch_list = []
for num_img in range(nb_imgs_batch):
row = df_imgs_batch.iloc[num_img, :]
img_filename = row[constants.FILENAME]
img_path = utils.get_file_path(image_folder_path, img_filename)
label = row[constants.LABEL]
label_index = labels.index(label)
try:
x = utils.preprocess_img(img_path, input_shape, preprocessing)
x =
|
np.tile(x, (n_augmentation, 1, 1, 1))
|
numpy.tile
|
import numpy as np
from dl.nn import Module
from dl.graph import Variable, batchNorm
class BatchNormLayer(Module):
"""
BatchNorm Layer object.
"""
def __init__(self, dim: int, eps=1e-5, momentum=0.1):
"""
BatchNorm Layer object.
Parameters
----------
dim:
input dimension.
eps:
epsilon to avoid divide by zero
momentum:
momentum used to compute moving average of mean and stddev.
"""
super().__init__()
self.dim = dim
self.gamma = Variable(np.ones((dim, 1)))
self.beta = Variable(
|
np.zeros((dim, 1))
|
numpy.zeros
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
def pcol( x, y, data, **kwargs):
"""function h=pcol(x,y,v)
function h=pcol(x,y,v, projection = mp )
plots 2D scalar fields v on the MITgcm cubed sphere grid with pcolor.
x,y are really 'xg', and 'yg', that is, they should be the coordinates
of the points one half grid cell to the left and bottom, that is
vorticity points for tracers, etc.
The optional flag 'sphere' results in a 3D visualization on the sphere
without any specific projection. Good for debugging.
If present, 'projection' (a basemap instance) is used to transform
coordinates. Unfortunatly, cylindrical and conic maps are limited to
the [-180 180] range.
projection = 'sphere' results in a 3D visualization on the sphere
without any specific projection. Good for debugging.
Example script to use pcol.py:
from mpl_toolkits.basemap import Basemap
import MITgcmutils as mit
import matplotlib.pyplot as plt
from sq import sq
x=mit.rdmds('XG'); y=mit.rdmds('YG'); e=mit.rdmds('Eta',np.Inf)
fig = plt.figure();
mp = Basemap(projection='moll',lon_0 = 0.,
resolution = 'l', area_thresh = 1000.)
plt.clf()
h = mit.cs.pcol(x,y,sq(e), projection = mp)
mp.fillcontinents(color = 'grey')
mp.drawmapboundary()
mp.drawmeridians(np.arange(0, 360, 30))
mp.drawparallels(np.arange(-90, 90, 30))
plt.show()
"""
# pcol first divides the 2D cs-field(6*n,n) into six faces. Then for
# each face, an extra row and colum is added from the neighboring faces in
# order to fool pcolor into drawing the entire field and not just
# (n-1,m-1) data points. There are two corner points that have no explicit
# coordinates so that they have to be found by
# interpolation/averaging. Then each face is divided into 4 tiles,
# assuming cs-geometry, and each tile is plotted individually in
# order to avoid problems due to ambigous longitude values (the jump
# between -180 and 180, or 360 and 0 degrees). As long as the poles
# are at the centers of the north and south faces and the first tile is
# symmetric about its center this should work.
# get the figure handle
fig=plt.gcf()
mapit = 0
if 'projection' in kwargs:
mp = kwargs['projection']
if mp=='sphere': mapit=-1
else: mapit = 1
# convert to [-180 180[ representation
x = np.where(x>180,x-360.,x)
ny,nx = data.shape
# determine range for color range
cax = [data.min(),data.max()]
if cax[1]-cax[0]==0: cax = [cax[0]-1,cax[1]+1]
if mapit == -1:
# set up 3D plot
if len(fig.axes)>0:
# if present, remove and replace the last axis of fig
geom=fig.axes[-1].get_geometry()
plt.delaxes(fig.axes[-1])
else:
# otherwise use full figure
geom = ((1,1,1))
ax = fig.add_subplot(geom[0],geom[1],geom[2],projection = '3d',
axisbg='None')
# define color range
tmp = data - data.min()
N = tmp/tmp.max()
# use this colormap
colmap = cm.jet
colmap.set_bad('w',1.0)
mycolmap = colmap(N) #cm.jet(N)
ph=np.array([])
jc=x.shape[0]/2
xxf=np.empty((jc+1,jc+1,4))
yyf=xxf
ffld=np.empty((jc,jc,4))
xff=[]
yff=[]
fldf=[]
for k in range(0,6):
ix = np.arange(0,ny) + k*ny
xff.append(x[0:ny,ix])
yff.append(y[0:ny,ix])
fldf.append(data[0:ny,ix])
# find the missing corners by interpolation (one in the North Atlantic)
xfodd = (xff[0][-1,0]+xff[2][-1,0]+xff[4][-1,0])/3.
yfodd = (yff[0][-1,0]+yff[2][-1,0]+yff[4][-1,0])/3.
# and one south of Australia
xfeven= (xff[1][0,-1]+xff[3][0,-1]+xff[5][0,-1])/3.
yfeven= (yff[1][0,-1]+yff[3][0,-1]+yff[5][0,-1])/3.
# loop over tiles
for k in range(0,6):
kodd = 2*(k/2)
kodd2 = kodd
if kodd==4: kodd2=kodd-6
keven = 2*(k/2)
keven2 = keven
if keven==4: keven2=keven-6
fld = fldf[k]
if np.mod(k+1,2):
xf = np.vstack( [ np.column_stack( [xff[k],xff[1+kodd][:,0]] ),
np.flipud(np.append(xff[2+kodd2][:,0],xfodd))] )
yf = np.vstack( [ np.column_stack( [yff[k],yff[1+kodd][:,0]] ),
np.flipud(np.append(yff[2+kodd2][:,0],yfodd))] )
else:
xf = np.column_stack( [np.vstack( [xff[k],xff[2+keven2][0,:]] ),
np.flipud(np.append(xff[3+keven2][0,:],
xfeven))] )
yf = np.column_stack( [np.vstack( [yff[k],yff[2+keven2][0,:]] ),
np.flipud(np.append(yff[3+keven2][0,:],
yfeven))] )
if mapit==-1:
ix = np.arange(0,ny) + k*ny
# no projection at all (projection argument is 'sphere'),
# just convert to cartesian coordinates and plot a 3D sphere
deg2rad=np.pi/180.
xcart,ycart,zcart = sph2cart( xf*deg2rad, yf*deg2rad )
ax.plot_surface(xcart,ycart,zcart,rstride=1,cstride=1,
facecolors=mycolmap[0:ny,ix],
linewidth=2,shade=False)
ph =
|
np.append(ph, ax)
|
numpy.append
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
from datetime import datetime
from typing import Any, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from kats.detectors.detector import DetectorModel
from kats.detectors.detector_consts import (
AnomalyResponse,
ChangePointInterval,
ConfidenceBand,
PercentageChange,
)
from kats.utils.decomposition import TimeSeriesDecomposition
"""Statistical Significance Detector Module
This module contains simple detectors that apply a t-test over a rolling window to compare
check if there is a statistically significant increase or decrease between the control and test
time periods. In addition to the univariate version of this test, this module includes a
multivariate version that uses a false discovery rate (FDR) controlling procedure to reduce noise.
"""
class StatSigDetectorModel(DetectorModel):
"""
StatSigDetectorModel is a simple detector, which compares a control and test period.
The detector assumes that the time series data comes from a iid normal distribution,
and applies a t-test to check if the means between the control and test period are
significantly different.
We start with the history data, and then as for the current data, we apply a rolling
window, adding one data point at a time from the current data, and detecting significant
change. We return the t-statistic as a score, which reflects the severity of the
change.
We suggest using n_control >= 30 to get good estimates
Attributes:
n_control: number of data points(or time units) of history to compare with
n_test: number of points(or time_units) to compare the history with
serialized_model: serialized json containing the parameters
time_units: units of time used to measure the intervals. If not provided
we infer it from the provided data.
rem_season: default value is False, if remove seasonality for historical data and data
seasonal_period: str, default value is 'weekly'. Other possible values: 'daily', 'biweekly', 'monthly', 'yearly'
use_corrected_scores: bool, default value is False, using original t-scores or correct t-scores.
max_split_ts_length: int, default value is 500. If the given TS (except historical part) is longer than max_split_ts_length,
we will transform a long univariate TS into a multi-variate TS and then use multistatsig detector, which is faster,
>>> # Example usage:
>>> # history and ts_pt are TimeSeriesData objects and history is larger
>>> # than (n_control + n_test) so that we have sufficient history to
>>> # run the detector
>>> n_control = 28
>>> n_test = 7
>>> import random
>>> control_time = pd.date_range(start='2018-01-01', freq='D', periods=(n_control + n_test))
>>> test_time = pd.date_range(start='2018-02-05', freq='D', periods=n_test)
>>> control_val = [random.normalvariate(100,10) for _ in range(n_control + n_test)]
>>> test_val = [random.normalvariate(120,10) for _ in range(n_test)]
>>> hist_ts = TimeSeriesData(time=control_time, value=pd.Series(control_val))
>>> data_ts = TimeSeriesData(time=test_time, value=pd.Series(test_val))
>>> ss_detect = StatSigDetectorModel(n_control=n_control, n_test=n_test)
>>> anom = ss_detect.fit_predict(data=data_ts, historical_data=hist_ts)
"""
data: Optional[TimeSeriesData] = None
def __init__(
self,
n_control: Optional[int] = None,
n_test: Optional[int] = None,
serialized_model: Optional[bytes] = None,
time_unit: Optional[str] = None,
rem_season: bool = False,
seasonal_period: str = "weekly",
use_corrected_scores: bool = True,
max_split_ts_length: int = 500,
) -> None:
if serialized_model:
model_dict = json.loads(serialized_model)
self.n_test: int = model_dict["n_test"]
self.n_control: int = model_dict["n_control"]
self.time_unit: str = model_dict["time_unit"]
# for seasonality
self.rem_season: bool = model_dict.get("rem_season", rem_season)
self.seasonal_period: str = model_dict.get(
"seasonal_period", seasonal_period
)
# for big data and correct t-scores
self.use_corrected_scores: bool = model_dict.get(
"use_corrected_scores", use_corrected_scores
)
# threshold for splitting long TS
self.max_split_ts_length: int = model_dict.get(
"max_split_ts_length", max_split_ts_length
)
else:
self.n_test: Optional[int] = n_test
self.n_control: Optional[int] = n_control
self.time_unit: Optional[str] = time_unit
# for seasonality
self.rem_season: bool = rem_season
self.seasonal_period: str = seasonal_period
# big data and t-scores
self.use_corrected_scores: bool = use_corrected_scores
# threshold for splitting long TS
self.max_split_ts_length: int = max_split_ts_length
if (self.n_control is None) or (self.n_test is None):
raise ValueError(
"You must either provide serialized model or values for control "
"and test intervals."
)
self.control_interval: Optional[ChangePointInterval] = None
self.test_interval: Optional[ChangePointInterval] = None
self.response: Optional[AnomalyResponse] = None
self.is_initialized = False # flag on whether initialized or not
self.last_N = 0 # this is the size of the last chunk of data we saw
self.data_history: Optional[TimeSeriesData] = None
# for seasonality
self.data_season: Optional[TimeSeriesData] = None
# big data strategy
self.bigdata_trans_flag: Optional[bool] = None
self.remaining: Optional[int] = None
def serialize(self) -> bytes:
"""
Serializes by putting model parameters in a json
"""
model_dict = {
"n_control": self.n_control,
"n_test": self.n_test,
"time_unit": self.time_unit,
"rem_season": self.rem_season,
"seasonal_period": self.seasonal_period,
"use_corrected_scores": self.use_corrected_scores,
"max_split_ts_length": self.max_split_ts_length,
}
return json.dumps(model_dict).encode("utf-8")
def fit_predict(
self,
data: TimeSeriesData,
historical_data: Optional[TimeSeriesData] = None,
**kwargs: Any,
) -> AnomalyResponse:
"""
This is the main working function.
The function returns an AnomalyResponse object of length
equal to the length of the data.
We require len(historical_data) > (n_control + n_test).
Args:
data: TimeSeriesData, A univariate TimeSeriesData for which we are running the StatSigDetectorModel
historical_data: Optional[TimeSeriesData] Historical data used to do detection for initial points in data
"""
if not data.is_univariate():
msg = "Input is multivariate but StatSigDetector expected univariate input."
logging.error(msg)
raise ValueError(msg)
self._set_time_unit(data=data, historical_data=historical_data)
self.last_N = len(data)
# this ensures we start with a default response of
# the size of the data
self._init_response(data)
response = self.response
assert response is not None
# when there is no need to update
# just return the initial response of zeros
if not self._should_update(data=data, historical_data=historical_data):
return response
# handle cases where there is either no historical data, or
# not enough historical data
data, historical_data = self._handle_not_enough_history(
data=data,
historical_data=historical_data,
)
# remove seasonality
if self.rem_season:
sh_data = SeasonalityHandler(
data=data, seasonal_period=self.seasonal_period
)
self.data_season = sh_data.get_seasonality()
data = sh_data.remove_seasonality()
if historical_data:
sh_hist_data = SeasonalityHandler(
data=historical_data,
seasonal_period=self.seasonal_period,
)
historical_data = sh_hist_data.remove_seasonality()
self.data = data
# first initialize this with the historical data
self._init_data(historical_data)
# if using new t-scores
if self.use_corrected_scores:
if (
len(data) > self.max_split_ts_length
# pyre-ignore[16]: `Optional` has no attribute `time`.
and pd.infer_freq(historical_data.time) == pd.infer_freq(data.time)
):
self.bigdata_trans_flag = True
else:
self.bigdata_trans_flag = False
else:
self.bigdata_trans_flag = False
# if need trans to multi-TS
if self.bigdata_trans_flag:
new_data_ts = self._reorganize_big_data(self.max_split_ts_length)
ss_detect = MultiStatSigDetectorModel(
n_control=self.n_control,
n_test=self.n_test,
time_unit=self.time_unit,
rem_season=False,
seasonal_period=self.seasonal_period,
skip_rescaling=True,
use_corrected_scores=self.use_corrected_scores,
)
anom = ss_detect.fit_predict(data=new_data_ts)
self._reorganize_back(anom)
else:
self._init_control_test(
data if historical_data is None else historical_data
)
# set the flag to true
self.is_initialized = True
# now run through the data to get the prediction
for i in range(len(data)):
current_time = data.time.iloc[i]
ts_pt = TimeSeriesData(
time=pd.Series(current_time, copy=False),
value=pd.Series(data.value.iloc[i], copy=False),
)
self._update_data(ts_pt)
self._update_control_test(ts_pt)
self._update_response(ts_pt.time.iloc[0])
# add seasonality back
if self.rem_season:
data_season = self.data_season
confidence_band = response.confidence_band
predicted_ts = response.predicted_ts
assert data_season is not None
assert confidence_band is not None
assert predicted_ts is not None
start_idx = len(response.scores) - len(data_season)
datatime = response.scores.time
self.response = AnomalyResponse(
scores=response.scores,
confidence_band=ConfidenceBand(
upper=TimeSeriesData(
time=datatime,
value=pd.concat(
[
pd.Series(
confidence_band.upper.value.values[:start_idx],
copy=False,
),
pd.Series(
np.asarray(
confidence_band.upper.value.values[start_idx:]
)
+ np.asarray(data_season.value.values),
copy=False,
),
],
copy=False,
),
),
lower=TimeSeriesData(
time=datatime,
value=pd.concat(
[
pd.Series(
confidence_band.lower.value.values[:start_idx],
copy=False,
),
pd.Series(
np.asarray(
confidence_band.lower.value.values[start_idx:]
)
+ np.asarray(data_season.value.values),
copy=False,
),
],
copy=False,
),
),
),
predicted_ts=TimeSeriesData(
time=datatime,
value=pd.concat(
[
pd.Series(
predicted_ts.value.values[:start_idx], copy=False
),
pd.Series(
|
np.asarray(predicted_ts.value.values[start_idx:])
|
numpy.asarray
|
from __future__ import division # in case this script is used in python 2
import h5py as h5
import numpy as np
import string
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.gridspec as gridspec
import matplotlib.gridspec as gridspec
# import matplotlib
import matplotlib.pyplot as plt
# for e.g., minor ticks
from matplotlib.ticker import (FormatStrFormatter,
AutoMinorLocator)
#Set latex environment for plots/labels
import matplotlib
matplotlib.rc('font', **{'family': 'sans-serif'})#, 'sans-serif': ['Helvetica']})
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble'] = [r'\boldmath']
from matplotlib.offsetbox import AnchoredText
from matplotlib import rc
from matplotlib import rcParams
import seaborn as sns
from astropy import units as u
from astropy import constants as const
from scipy.spatial.distance import cdist
rc('font', family='serif', weight = 'bold')
rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
matplotlib.rcParams['text.latex.preamble'] = [r'\boldmath']
rc('axes', linewidth=2)
matplotlib.rcParams['xtick.major.size'] = 12
matplotlib.rcParams['ytick.major.size'] = 12
matplotlib.rcParams['xtick.minor.size'] = 8
matplotlib.rcParams['ytick.minor.size'] = 8
matplotlib.rcParams['font.weight']= 'bold'
matplotlib.rcParams.update({'font.weight': 'bold'})
fs = 24 # fontsize for plots
rc('axes', linewidth=2)
def layoutAxes(ax, nameX='', nameY='', \
labelSizeMajor = 10, fontsize = 25, second=False, labelpad=None, setMinor=True):
"""
Tiny code to do the layout for axes in matplotlib
"""
tickLengthMajor = 10
tickLengthMinor = 5
tickWidthMajor = 1.5
tickWidthMinor = 1.5
#rc('axes', linewidth=2)
#label1 always refers to first axis not the twin
if not second:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
if second:
for tick in ax.xaxis.get_major_ticks():
tick.label2.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
for tick in ax.yaxis.get_major_ticks():
tick.label2.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(1.2)
ax.tick_params(length=tickLengthMajor, width=tickWidthMajor, which='major')
ax.tick_params(length=tickLengthMinor, width=tickWidthMinor, which='minor')
ax.set_xlabel(nameX, fontsize=fontsize,labelpad=labelpad)#,fontweight='bold')
ax.set_ylabel(nameY, fontsize=fontsize,labelpad=labelpad)#, fontweight='bold')
if setMinor==True:
# add minor ticks:
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
return ax
def layoutAxesNoXandYlabel(ax, nameX='', nameY='', \
labelSizeMajor = 10, fontsize = 25, second=False, labelpad=None, setMinor=True):
"""
Tiny code to do the layout for axes in matplotlib
"""
tickLengthMajor = 10
tickLengthMinor = 5
tickWidthMajor = 1.5
tickWidthMinor = 1.5
#rc('axes', linewidth=2)
#label1 always refers to first axis not the twin
if not second:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
if second:
for tick in ax.xaxis.get_major_ticks():
tick.label2.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
for tick in ax.yaxis.get_major_ticks():
tick.label2.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(1.2)
ax.tick_params(length=tickLengthMajor, width=tickWidthMajor, which='major')
ax.tick_params(length=tickLengthMinor, width=tickWidthMinor, which='minor')
# ax.set_xlabel(nameX, fontsize=fontsize,labelpad=labelpad)#,fontweight='bold')
# ax.set_ylabel(nameY, fontsize=fontsize,labelpad=labelpad)#, fontweight='bold')
if setMinor==True:
# add minor ticks:
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
return ax
def layoutAxesNoXlabel(ax, nameX='', nameY='', \
labelSizeMajor = 10, fontsize = 25, second=False, labelpad=None, setMinor=True, rotation=90):
"""
Tiny code to do the layout for axes in matplotlib
"""
tickLengthMajor = 10
tickLengthMinor = 5
tickWidthMajor = 1.5
tickWidthMinor = 1.5
#rc('axes', linewidth=2)
#label1 always refers to first axis not the twin
if not second:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
if second:
for tick in ax.xaxis.get_major_ticks():
tick.label2.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
for tick in ax.yaxis.get_major_ticks():
tick.label2.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(1.2)
ax.tick_params(length=tickLengthMajor, width=tickWidthMajor, which='major')
ax.tick_params(length=tickLengthMinor, width=tickWidthMinor, which='minor')
# ax.set_xlabel(nameX, fontsize=fontsize,labelpad=labelpad)#,fontweight='bold')
ax.set_ylabel(nameY, fontsize=fontsize,labelpad=labelpad, rotation=rotation, va="center")#, fontweight='bold')
if setMinor==True:
# add minor ticks:
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
return ax
def layoutAxesNoYlabel(ax, nameX='', nameY='', \
labelSizeMajor = 10, fontsize = 25, second=False, labelpad=None, setMinor=True, rotation=0):
"""
Tiny code to do the layout for axes in matplotlib
"""
tickLengthMajor = 10
tickLengthMinor = 5
tickWidthMajor = 1.5
tickWidthMinor = 1.5
#rc('axes', linewidth=2)
#label1 always refers to first axis not the twin
if not second:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
# for tick in ax.yaxis.get_major_ticks():
# tick.label1.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
if second:
for tick in ax.xaxis.get_major_ticks():
tick.label2.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
# for tick in ax.yaxis.get_major_ticks():
# tick.label2.set_fontsize(fontsize)
#tick.label1.set_fontweight('bold')
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(1.2)
ax.tick_params(length=tickLengthMajor, width=tickWidthMajor, which='major')
ax.tick_params(length=tickLengthMinor, width=tickWidthMinor, which='minor')
ax.set_xlabel(nameX, fontsize=fontsize,labelpad=labelpad, rotation=rotation, va="center")#,fontweight='bold')
# ax.set_ylabel(nameY, fontsize=fontsize,labelpad=labelpad)#, fontweight='bold')
if setMinor==True:
# add minor ticks:
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
return ax
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.75) # for box around text in plot
dictChannelsBHNSListBolt = [r'\textbf{case A}', r'\textbf{case B}', r'\textbf{case C}',\
r'\textbf{case B only stable}',\
r'\textbf{case B immediate CE}',
r'\textbf{case C immediate} \textbf{CE}',\
r'\textbf{double-core CE}', r'\textbf{other}']
zorderlist = { 'stable B':10, 'stable B no CEE':13, \
'case B immediate CE':12,'stable C':15,\
r'case C immediate CE':17,
'stable A':14, \
r'double-core CE':11, 'other':16\
}
dictChannelsBHNSListBolt = [r'\textbf{(I) Classic}', \
r'\textbf{(II) Only stable mass transfer}',\
r'\textbf{(III) Single-core CE as first mass transfer}',\
r'\textbf{(IV) Double-core CE as first mass transfer}', r'\textbf{(V) Other}']
dictChannelsBHNSList = ['classic', \
'stable B no CEE', \
'immediate CE',\
r'double-core CE', 'other']
zorderlist = { 'classic':10, 'stable B no CEE':13, \
'immediate CE':12,\
r'double-core CE':11, 'other':16\
}
# default settings for labels and names of BPS models
DCOname_dict = {'BHNS':'BHNS', 'BBH':'BHBH', 'BNS':'NSNS'}
nModels=20 #
BPSnameslist = list(string.ascii_uppercase)[0:nModels]
modelDirList = ['fiducial', 'massTransferEfficiencyFixed_0_25', 'massTransferEfficiencyFixed_0_5', 'massTransferEfficiencyFixed_0_75', \
'unstableCaseBB', 'unstableCaseBB','alpha0_1', 'alpha0_5', 'alpha2_0', 'alpha10', 'fiducial', 'rapid', 'maxNSmass2_0', 'maxNSmass3_0', 'noPISN', 'ccSNkick_100km_s', 'ccSNkick_30km_s', 'noBHkick', 'wolf_rayet_multiplier_0_1', 'wolf_rayet_multiplier_5']
alphabetDirDict = {BPSnameslist[i]: modelDirList[i] for i in range(len(BPSnameslist))}
BPScolors = sns.color_palette("husl", nModels)
colorDirDict = {BPSnameslist[i]: BPScolors[i] for i in range(len(BPSnameslist))}
# physicalNamesBPSmodels = [r'\textbf{fiducial}',\
# r'$\beta=0.25$', r'$\beta=0.5$', r'$\beta=0.75$',r'\textbf{unstable case BB}',r'\textbf{unstable case BB + optimistic CE}',\
# r'$\alpha_{\rm{CE}}=0.1$', r'$\alpha_{\rm{CE}}=0.5$', r'$\alpha_{\rm{CE}}=2$', r'$\alpha_{\rm{CE}}=10$', r'\textbf{optimistic CE}',\
# r'\textbf{rapid SN}', r'$\rm{max} \ m_{\rm{NS}}=2.0\,\rm{M}_{\odot}$', r'$\rm{max} \ m_{\rm{NS}}=3.0\,\rm{M}_{\odot}$',\
# r'\textbf{no PISN}', r'\textbf{SN} '+ r'$\sigma_{\rm{rms}}^{\rm{1D}}=100\,\rm{km}\,\rm{s}^{-1}$',r'\textbf{SN} '+ r'$\sigma_{\rm{rms}}^{\rm{1D}}=30\,\rm{km}\,\rm{s}^{-1}$',\
# r'\textbf{SN} '+ r'$v_{\rm{k,BH}}=0\,\rm{km}\,\rm{s}^{-1}$', r'$\rm{f}_{\rm{WR}} = 0.1$', r'$\rm{f}_{\rm{WR}} = 5$' ]
physicalNamesBPSmodels = [r'\textbf{fiducial}',\
r'$\beta=0.25$', r'$\beta=0.5$', r'$\beta=0.75$',r'\textbf{unstable case BB}',r'\textbf{E + K}',\
r'$\alpha_{\rm{CE}}=0.1$', r'$\alpha_{\rm{CE}}=0.5$', r'$\alpha_{\rm{CE}}=2$', r'$\alpha_{\rm{CE}}=10$', r'\textbf{optimistic CE}',\
r'\textbf{rapid SN}', r'$\rm{max} \ m_{\rm{NS}}=2.0\,\rm{M}_{\odot}$', r'$\rm{max} \ m_{\rm{NS}}=3.0\,\rm{M}_{\odot}$',\
r'\textbf{no PISN}', r'\textbf{SN} '+ r'$\sigma_{\rm{rms}}^{\rm{1D}}=100\,\rm{km}\,\rm{s}^{-1}$',r'\textbf{SN} '+ r'$\sigma_{\rm{rms}}^{\rm{1D}}=30\,\rm{km}\,\rm{s}^{-1}$',\
r'\textbf{SN} '+ r'$v_{\rm{k,BH}}=0\,\rm{km}\,\rm{s}^{-1}$', r'$\rm{f}_{\rm{WR}} = 0.1$', r'$\rm{f}_{\rm{WR}} = 5$' ]
DCOtypeColorsDict = {'BHNS':'#66c2a5', 'BHBH':'#8da0cb', 'BBH':'#8da0cb', 'NSNS':'#fc8d62', 'BNS':'#fc8d62'}
alphabetPhysicalNameDict = {BPSnameslist[i]: physicalNamesBPSmodels[i] for i in range(len(BPSnameslist))}
physicalNamesBPSmodelsWithEnter = [r'\textbf{fiducial}',\
r'$\beta=0.25$', r'$\beta=0.5$', r'$\beta=0.75$',r'\textbf{unstable}' + '\n'+ r'\textbf{case BB}', r'\textbf{E + K}',\
r'$\alpha_{\rm{CE}}=0.1$', r'$\alpha_{\rm{CE}}=0.5$', r'$\alpha_{\rm{CE}}=2$', r'$\alpha_{\rm{CE}}=10$', r'\textbf{optimistic}' +'\n' + r'\textbf{CE}',\
r'\textbf{rapid SN}', r'$\rm{max} \ m_{\rm{NS}}$' +'\n' + r'$2.0\,\rm{M}_{\odot}$', r'$\rm{max} \ m_{\rm{NS}}$' +'\n' + r'$3.0\,\rm{M}_{\odot}$',\
r'\textbf{no PISN}', r'\textbf{SN} '+ r'$\sigma_{\rm{rms}}^{\rm{1D}}$' +'\n' + r'$100\,\rm{km}\,\rm{s}^{-1}$',r'\textbf{SN} '+ r'$\sigma_{\rm{rms}}^{\rm{1D}}$' +'\n' + r'$30\,\rm{km}\,\rm{s}^{-1}$',\
r'\textbf{SN} '+ r'$v_{\rm{k,BH}}$' +'\n' + r'$0\,\rm{km}\,\rm{s}^{-1}$' , r'$\rm{f}_{\rm{WR}} = 0.1$', r'$\rm{f}_{\rm{WR}} = 5$']
alphabetPhysicalNameDictWithEnter = {BPSnameslist[i]: physicalNamesBPSmodelsWithEnter[i] for i in range(len(BPSnameslist))}
# physicalNamesBPSmodelsWithEnter = [r'\textbf{fiducial}',\
# r'$\beta=0.25$', r'$\beta=0.5$', r'$\beta=0.75$',r'\textbf{unstable}' + '\n'+ r'\textbf{case BB}',\
# r'$\alpha_{\rm{CE}}=0.5$', r'$\alpha_{\rm{CE}}=2$', r'\textbf{optimistic CE}',\
# r'\textbf{rapid SN}', r'$\rm{max} \ m_{\rm{NS}}$' +'\n' + r'$2.0\,\rm{M}_{\odot}$', r'$\rm{max} \ m_{\rm{NS}}$' +'\n' + r'$3.0\,\rm{M}_{\odot}$',\
# r'\textbf{no PISN}', r'\textbf{SN} '+ r'$\sigma_{\rm{rms}}^{\rm{1D}}$' +'\n' + r'$100\,\rm{km}\,\rm{s}^{-1}$',r'\textbf{SN} '+ r'$\sigma_{\rm{rms}}^{\rm{1D}}$' +'\n' + r'$30\,\rm{km}\,\rm{s}^{-1}$',\
# r'\textbf{SN} '+ r'$v_{\rm{k,BH}}$' +'\n' + r'$0\,\rm{km}\,\rm{s}^{-1}$' , r'$\rm{f}_{\rm{WR}} = 0.1$', r'$\rm{f}_{\rm{WR}} = 5$']
# alphabetPhysicalNameDictWithEnter = {BPSnameslist[i]: physicalNamesBPSmodelsWithEnter[i] for i in range(len(BPSnameslist))}
colorlist = [ '#118AB2', '#EF476F', '#FFD166', '#073B4C', 'gray']
GWTC_indexDict = {'Mass1':0, 'Mass2':1, 'Mtot':2, 'Mchirp':3, 'q':4}
def obtainDataSTROOPWAFEL(param, pathToDirectory):
"""returns for STROOPWAFEL (AIS) simulation the data of wanted variable
combines the data from AIS_oratory and AIS_sampling
param = [xparam, fxparam] , are the name of the variable and hdf5 keyname where it is in
e.g. param = ['M1', 'doubleCompactObjects'] (see also: print(list(f.keys())))
pathToDirectory is pathname to Directory where AIS_oratory & AIS_sampling directories are
"""
xparam, fxparam = param
pathAIS = pathToDirectory +'/COMPASOutput.h5'
fAIS = h5.File(pathAIS)
##### get parameter from two directories and combine them ############
xvalues = fAIS[fxparam][xparam][...].squeeze()
return xvalues
def maskTargetDCOsSTROOPWAFEL(DCOtype, boolDCOmask, f, otherSelection, otherparam):
"""returns mask of DCOs of interest
fxparam is hdf5 keyname of file where variable for which you want to mask DCOs is in
DCOtype = 'BBH' / 'ALL' / 'BHNS' or 'BNS'
boolDCOmask = [Hubble, RLOF, Pessimistic] # boolean values whether to mask mergers in a HUbble time,
binaries that have RLOFSecondaryAfterCEE = True, and Pessimistic binaries (i.e. optimisticCEFlag == 0)
pathToDirectory is pathname to Directory where _oratory & _sampling directories are
"""
Hubble, RLOF, Pessimistic = boolDCOmask
fDCO = f['doubleCompactObjects']
# mask binaries of given DCO type
if DCOtype == 'BNS':
mask0 = ((fDCO['stellarType1'][...] == 13) & (fDCO['stellarType2'][...] == 13))
elif (DCOtype == 'BHNS') | (DCOtype == 'NSBH'):
mask0 = ((fDCO['stellarType1'][...] == 13) & (fDCO['stellarType2'][...] == 14)) | \
((fDCO['stellarType1'][...] == 14) & (fDCO['stellarType2'][...] == 13) )
elif DCOtype == 'BBH':
mask0 = ((fDCO['stellarType1'][...] == 14) & (fDCO['stellarType2'][...] == 14))
elif (DCOtype == 'all') | (DCOtype == 'ALL') :
mask0 = ((fDCO['stellarType1'][...] == 14) | (fDCO['stellarType1'][...] == 13))
else:
print('error: DCO type not known')
# Hubble mask
if Hubble:
mask1 = (fDCO['mergesInHubbleTimeFlag'][...]==True)
elif not Hubble:
mask1 = (fDCO['mergesInHubbleTimeFlag'][...]==True) | (fDCO['mergesInHubbleTimeFlag'][...]==False)
# RLOF mask
if RLOF:
mask2 = (fDCO['RLOFSecondaryAfterCEE'][...]==False)
elif not RLOF:
mask2 = (fDCO['RLOFSecondaryAfterCEE'][...]==False) | (fDCO['RLOFSecondaryAfterCEE'][...]==True)
# Pessimistic mask : if True mask systems that have optimistic CE flag ==1
if Pessimistic:
mask3 = np.logical_not(fDCO["optimisticCEFlag"][...] == 1)
elif not Pessimistic:
mask3 = np.logical_not(fDCO["optimisticCEFlag"][...] == 1) + \
np.logical_not(fDCO["optimisticCEFlag"][...] == 0)
# combine the different masks and the oratory and refinement masks
combinedmask = mask0 * mask1 * mask2 * mask3
combinedmask = combinedmask.squeeze()
if otherSelection =='UFD':
KpcToKM = 3.086 * 10**(16) # kpc to km
MyrToYr = 1E6 # yrs
YrToSec = 3.154 *1E7 #sec
UFD_epsilon = otherparam[0]
UFD_Rvir = otherparam[1]
Xbh1 = otherparam[2]
Rns = otherparam[3]
fSN = f['supernovae']
seedsOfIntererst = fDCO['seed'][...].squeeze()
seedsSN = fSN['randomSeed'][...].squeeze()
bools = np.in1d(seedsSN, seedsOfIntererst)
tc = fDCO['tc'][...].squeeze()
vsys = fSN['systemicVelocity'][...].squeeze()[bools]
vsysSN2 = vsys[1:][::2]
traveldistance = tc * vsysSN2 * MyrToYr * YrToSec
radiusUFDgalaxy = UFD_epsilon * UFD_Rvir * KpcToKM
maskCandidatesUFD = (traveldistance <= radiusUFDgalaxy) | ((vsysSN2 <= 44) & (tc * MyrToYr *YrToSec<= radiusUFDgalaxy))
combinedmask = maskCandidatesUFD*combinedmask
return combinedmask
def obtainweightsSTROOPWAFEL(pathToDirectory):
"""returns weights for all DCOs and all systems for STROOPWAFEL
pathToDirectory is pathname to Directory where AIS_oratory & AIS_sampling directories are
"""
pathAIS = pathToDirectory +'/COMPASOutput.h5' # '/home/floor/Data_Thesis/bdMC/Z0_002'
fAIS = h5.File(pathAIS)
##### get the DCO and all system weights ############
DCOsweights = fAIS['doubleCompactObjects']['weight'][...].squeeze()
systemsweights = fAIS['systems']['weight'][...].squeeze()
return DCOsweights, systemsweights
def chirpmass(m1, m2):
numer = (m1*m2)**(3./5)
denom = (m1+m2)**(1./5)
return numer/denom
def obtainM1BHandM2BHassymetric(m1, m2):
m1bh, m2bh = np.zeros_like(m1), np.zeros_like(m1)
maskm1heavier = ( m1 >= m2)
maskm2heavier = (m1 < m2)
m1bh[maskm1heavier] = m1[maskm1heavier]
m1bh[maskm2heavier] = m2[maskm2heavier]
m2bh[maskm1heavier] = m2[maskm1heavier]
m2bh[maskm2heavier] = m1[maskm2heavier]
return m1bh, m2bh # m1bh has all the heaviest systems
def getMaskBHNS(m1bh, m2bh):
# add later on the 2nd explodes first
maskBHNS = m1bh >= m2bh # we have a BH=NS
return maskBHNS
def below3Msun(m1bh):
# add later on the 2nd explodes first
maskBHNS = m1bh <= 3 # we have a BH=NS
return maskBHNS
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, shape (n, ), optional, default: None
An array of weights, of the same shape as `x`. Each value in `x`
only contributes its associated weight towards the bin count
(instead of 1).
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : float
Effective sample size using Kish's approximation.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
kde.pdf(points) : ndarray
Alias for ``kde.evaluate(points)``.
kde.set_bandwidth(bw_method='scott') : None
Computes the bandwidth, i.e. the coefficient that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
.. versionadded:: 0.11.0
kde.covariance_factor : float
Computes the coefficient (`kde.factor`) that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
The default is `scotts_factor`. A subclass can overwrite this method
to provide a different method, or set it through a call to
`kde.set_bandwidth`.
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] <NAME>, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] <NAME>, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] <NAME>, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] <NAME> and <NAME>, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
>>> "Measurement model, return two coupled measurements."
>>> m1 = np.random.normal(size=n)
>>> m2 = np.random.normal(scale=0.5, size=n)
>>> return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = np.atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self.weights = weights / np.sum(weights)
else:
self.weights = np.ones(self.n) / self.n
# Compute the effective sample size
# http://surveyanalysis.org/wiki/Design_Effects_and_Effective_Sample_Size#Kish.27s_approximate_formula_for_computing_effective_sample_size
self.neff = 1.0 / np.sum(self.weights ** 2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = np.atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = np.reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
# compute the normalised residuals
chi2 = cdist(points.T, self.dataset.T, 'mahalanobis', VI=self.inv_cov) ** 2
# compute the pdf
result = np.sum(np.exp(-.5 * chi2) * self.weights, axis=1) / self._norm_factor
return result
__call__ = evaluate
def scotts_factor(self):
return np.power(self.neff, -1./(self.d+4))
def silverman_factor(self):
return np.power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method): # and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
# Compute the mean and residuals
_mean = np.sum(self.weights * self.dataset, axis=1)
_residual = (self.dataset - _mean[:, None])
# Compute the biased covariance
self._data_covariance = np.atleast_2d(np.dot(_residual * self.weights, _residual.T))
# Correct for bias (http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_covariance)
self._data_covariance /= (1 -
|
np.sum(self.weights ** 2)
|
numpy.sum
|
import numpy as np
import onnxruntime
import pytest
from dnnv.nn.converters.onnx import *
from dnnv.nn.operations import *
def test_Mul():
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5, 6]).astype(np.float32)
z = x * y # expected output [4., 10., 18.]
op = Mul(x, y)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
assert np.allclose(result, z)
op = Mul(Input((3,), np.dtype(np.float32)), Input((3,),
|
np.dtype(np.float32)
|
numpy.dtype
|
import numpy as np
import gym
from gym import error, spaces, utils
from gym.utils import seeding
######################
# TicTacToeEnv class
######################
class TicTacToeEnv(gym.Env):
metadata = {'render.modes': ['human', 'ansi']}
BOARD_ROWS = 3
BOARD_COLS = 3
def __init__(self):
super().__init__()
board = Board(self.BOARD_ROWS, self.BOARD_COLS)
self.action_space = spaces.Discrete(board.board.size)
self.observation_space = spaces.Box(
low=0,
high=len(board.STONE_TYPES),
shape=board.board.shape
)
p1 = HumanPlayer(is_p1=True)
p2 = RandomPlayer(is_p1=False)
#p2 = AIPlayer(is_p1=False, rows=self.BOARD_ROWS, cols=self.BOARD_COLS)
self.env = TicTacToe(board, p1, p2)
self.reward_range = [-100., 1.]
self.np_random = None
def step(self, action):
x, y = divmod(action, 3)
x, y, symbol = self.env.p1.act(self.env.board.board, x=x, y=y)
self.env.board.put(x, y, symbol)
result = self.env.board.judge()
done = np.any(result == True)
if done:
if result[0]:
# p1 won
reward = 1
elif result[1]:
# p2 won
reward = 0
else:
# draw
reward = 0.5
info = {}
return self.env.board.board, reward, done, info
x, y, symbol = self.env.p2.act(self.env.board.board)
self.env.board.put(x, y, symbol)
result = self.env.board.judge()
done = np.any(result == True)
if result[0]:
# p1 won
reward = 1
elif result[1]:
# p2 won
reward = 0
else:
# draw
reward = 0.5
info = {}
return self.env.board.board, reward, done, info
def reset(self):
#self.env.p2.backup()
#self.env.p2.reset()
self.env.board.reset()
return self.env.board.board
def render(self, mode='human', close=False):
if mode == 'human':
self.env.board.show()
elif mode == 'ansi':
return self.env.board.hash()
else:
super().render(mode=mode)
def close(self):
pass
def seed(self, seed=None):
return self.env.p2.seed(seed)
###############
# Board class
###############
class Board:
STONE_TYPES = [
'.', # 0: empty
'x', # 1: stone 1
'o', # 2: stone 2
]
def __init__(self, nrows, ncols):
self.nrows = nrows
self.ncols = ncols
self.board = np.zeros((nrows, ncols), dtype=np.int32)
def reset(self):
self.board = np.zeros((self.nrows, self.ncols), dtype=np.int32)
def show(self):
print('------')
for i in range(self.nrows):
for j in range(self.ncols):
print(self.STONE_TYPES[self.board[i, j]], end=' ')
print()
print('------')
def hash(self):
return str(self.board)
def is_end(self):
return np.any(self.judge() == True)
def check(self, x, y):
if x < 0 or x >= self.nrows:
#print('invalid x')
return False
if y < 0 or y >= self.ncols:
#print('invalid y')
return False
if self.board[x, y] != 0:
#print('cannot put onto ({}, {})'.format(x, y))
return False
return True
def put(self, x, y, symbol):
if not self.check(x, y):
return False
self.board[x, y] = symbol
return True
def next_state(self, x, y, symbol):
new_board = self.new_board()
new_board.board[x, y] = symbol
return new_board
def new_board(self):
new_board = Board(self.nrows, self.ncols)
new_board.board = np.copy(self.board)
return new_board
def judge(self):
p1_won = False
p2_won = False
# check vertical
p1_won = np.any(np.all(self.board == 1, axis=0))
p2_won = np.any(np.all(self.board == 2, axis=0))
# check horizontal
p1_won = np.any(np.all(self.board == 1, axis=1)) or p1_won
p2_won = np.any(np.all(self.board == 2, axis=1)) or p2_won
# check diagonal
p1_won = np.all(np.diag(self.board) == 1) or p1_won
p2_won = np.all(np.diag(self.board) == 2) or p2_won
p1_won = np.all(np.diag(
|
np.fliplr(self.board)
|
numpy.fliplr
|
#
#------------------------------------------------------------------------------
# Copyright (c) 2013-2014, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# eea_int.py - This file is part of the PySptools package.
#
"""
PPI, NFINDR, ATGP, FIPPI classes
"""
import numpy as np
from . import eea
from .inval import *
from .docstring import *
def _normalize(M):
"""
Normalizes M to be in range [0, 1].
Parameters:
M: `numpy array`
1D, 2D or 3D data.
Returns: `numpy array`
Normalized data.
"""
minVal = np.min(M)
maxVal =
|
np.max(M)
|
numpy.max
|
import numpy as np
import pickle
import os
# import scipy.io as sio
import utils
from PIL import Image
from Libs.Datasets.Places365Dataset import Places365Dataset
# Define dataset
dataset_name = 'places365_standard'
# Paths
dataset_dir = os.path.join('Data', 'Datasets', dataset_name)
StatisticsPath = os.path.join('Attribution Results', dataset_name)
MatricesPath = os.path.join(StatisticsPath, 'Occlusion Matrices')
ResultsPath = os.path.join(StatisticsPath, 'Statistics')
# Intialize Dataset
dataset = Places365Dataset(dataset_dir, "val")
# Files names
dataset_filenames = dataset.filenames
# Scene Classes
scene_classes = dataset.classes
n_scenes = len(scene_classes)
# Semantic Classes
sem_classes = utils.readSemanticClasses(dataset_dir)
n_semantic = len(sem_classes)
# Ground Truth
labels = dataset.labels
labels_index = dataset.labelsindex
labels_index = np.asarray(labels_index)
# Predictions
with open(os.path.join(StatisticsPath, 'Validation_Predictions.pkl'), 'rb') as f:
predictions = pickle.load(f)
predictions = np.squeeze(np.asarray(predictions))
for scene_index, scene in enumerate(scene_classes):
print('Extracting statistics from scene {} ({}/{})'.format(scene, scene_index, n_scenes - 1))
# Create folder for scene
scene_folder = os.path.join(ResultsPath, scene)
if not os.path.isdir(scene_folder):
os.makedirs(scene_folder)
# Index which detections are predicted as scene index
Det = np.squeeze(np.argwhere(predictions == scene_index))
n_det = len(Det)
# Index those GT frames that are scene index
GT = np.squeeze(np.argwhere(labels_index == scene_index))
n_gt = len(GT)
# Compute True Positives and False Negatives
tp = np.asarray([x for x in Det if x in GT])
fn = np.asarray([x for x in GT if x not in Det])
# Number of correct and non correct predictions
n_correct_predictions = tp.shape[0]
n_error_predictions = fn.shape[0]
# Define matrices
dec_aciertos = {'Score Deviation': [], 'Sample': [], 'Pred': [], 'GT': []}
dec_distractors = {'Score Deviation': [], 'Sample': [], 'Pred': [], 'GT': []}
dec_predictions = {'Score Deviation': [], 'Sample': [], 'Pred': [], 'GT': []}
sem_hist_gt = []
sem_hist_pred = []
# Global statistics for ground truth. Only correct images for Score Deviation Maps
for sample_GT in GT:
# Load Matrix of predictions.
with open(os.path.join(MatricesPath, 'RGB_matrix_pred_' + str(sample_GT+1).zfill(5) + '.pkl'), 'rb') as f:
sample_mat = pickle.load(f)
# Read Semantic Image and compute histogram of labels
sem_path = os.path.join(dataset_dir, 'noisy_annotations_RGB', 'val', labels[sample_GT], dataset_filenames[sample_GT].split('.')[0] + '.png')
sem = np.asarray(Image.open(sem_path))
# Top@1 Semantic Labels are encoded in the last channel
sem = sem[:, :, 2]
# Histogram
sem_hist_gt.append(np.histogram(sem, bins=np.arange(n_semantic + 1))[0])
if sample_GT in tp:
# GT sample is in True Positives
# Subtraction of original probability againts the obtained by occluding semantic classes
dec_aciertos['Score Deviation'].append(sample_mat[0, scene_index] - sample_mat[1:, scene_index])
dec_aciertos['Sample'].append(sample_GT)
dec_aciertos['Pred'].append(predictions[sample_GT])
dec_aciertos['GT'].append(labels_index[sample_GT])
else:
# GT Sample is False negative
# Get which semantic class inhibition gets the max scene value
row, col = np.where(sample_mat[1:, :] == np.max(sample_mat[1:, :]))
# If is the same as scene_index is a distractor
if scene_index in col:
dec_distractors['Score Deviation'].append(row)
dec_distractors['Sample'].append(sample_GT)
dec_distractors['Pred'].append(predictions[sample_GT])
dec_distractors['GT'].append(labels_index[sample_GT])
# Global statistics for detections. Correct and non correct images
for sample_det in Det:
# Load Matrix of predictions.
with open(os.path.join(MatricesPath, 'RGB_matrix_pred_' + str(sample_det+1).zfill(5) + '.pkl'), 'rb') as f:
sample_mat = pickle.load(f)
# Read Semantic Image and compute histogram of labels
sem_path = os.path.join(dataset_dir, 'noisy_annotations_RGB', 'val', labels[sample_det], dataset_filenames[sample_det].split('.')[0] + '.png')
sem = np.asarray(Image.open(sem_path))
# Top@1 Semantic Labels are encoded in the last channel
sem = sem[:, :, 2]
# Histogram
sem_hist_pred.append(np.histogram(sem, bins=np.arange(n_semantic + 1))[0])
# Subtraction of original probability against the obtained by occluding semantic classes
dec_predictions['Score Deviation'].append(sample_mat[0, scene_index] - sample_mat[1:, scene_index])
dec_predictions['Sample'].append(sample_det)
dec_predictions['Pred'].append(predictions[sample_det])
dec_predictions['GT'].append(labels_index[sample_det])
# Aggregate histograms of semantic masks and obtain probability distributions
sem_hist_gt = np.sum(np.asarray(sem_hist_gt), axis=0)
sem_hist_gt = sem_hist_gt /
|
np.sum(sem_hist_gt, axis=0)
|
numpy.sum
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
df = pd.read_pickle('forces/PROJ9712_RUN3_CLONE18_310.pkl')
df = df.loc[df['resName'] == 'K+']
qs = np.array([np.array(i) for i in df['q']])
x, y, z = qs.T
f_nb = np.array([
|
np.array(i)
|
numpy.array
|
from __future__ import division
import gc
import numpy as np
"""
Class for encapsulating a single Multiple Sequence Alignment(MSA)
"""
class Alignment:
def __init__(self):
self.desc = '' #not used for now
# chrom has identifier like 'cluster123', which mark a division on the core-genome, which is a conserved region found cross all reference genomes
self.chrom = ''
self.nseqs = 0
self.ncols = 0 # number of sites
self.seqs = [] # actual sequence for each sample
self.sample_ids = []
"""
attributes below were described in function update()
"""
self.char_mat = []
self.local_pos = []
self.count_mat = []
self.freq_mat = []
self.ref_alleles = []
self.alt_alleles = []
self.third_alleles = []
self.forth_alleles = []
self.ref_prob_mat = []
self.alt_prob_mat = []
self.third_prob_mat = []
self.forth_prob_mat = []
self.sample_presence = []
self.ref_freqs = []
self.alt_freqs = []
self.third_freqs = []
self.forth_freqs = []
self.prevalence = []
self.aligned_pctg = []
def update(self):
assert len(self.seqs) > 1
self.nseqs = len(self.seqs)
self.ncols = len(self.seqs[0].seq)
self.sample_ids = [seq.id for seq in self.seqs]
"""
generate char matrix from the aligned sequences
example:
sample1: ATCG
sample2: ATGG
sample3: ATGC
the char matrix is the transpose of [[A, T, G, C],[A, T, G, G],[A, T, G, C]]
"""
self.char_mat = np.array([np.fromstring(seq.seq, dtype='c') for seq in self.seqs])
#print self.char_mat.shape
#print self.char_mat.nbytes
"""
count A, T, G, C, N and - for each site on the sequences
local_pos stores all local positions of each site on this core-genome division (or alignment)
"""
As = np.sum(self.char_mat == b'A', axis=0)
Ts = np.sum(self.char_mat == b'T', axis=0)
Gs = np.sum(self.char_mat == b'G', axis=0)
Cs = np.sum(self.char_mat == b'C', axis=0)
Ns = np.sum(self.char_mat == b'N', axis=0)
Gaps =
|
np.sum(self.char_mat == b'-', axis=0)
|
numpy.sum
|
import numpy as np
import pandas as pd
from typing import Union
from sklearn.covariance import EmpiricalCovariance
from modules.filters.dml.map_representation import from_map_representation_to_xy
from modules.features.segment_feature import SegmentFeature
from modules.features.landmark_feature import LandmarkFeature
from modules.features.global_positioning_feature import GlobalPositioningFeature
from modules.features.feature import Feature
# TODO LIST:
# * Remove weights from the state. They are only used during update.
# * Rename from 'DMLMCL' to 'DMMCL'
class DMLMCL:
"""
Implements the 'Digital Map-based Monte Carlo Localization' method.
"""
def __init__(self):
self.routes = {}
self.particles = None
self.n_particles = 0
return
# STATE
# ==========================
def check_is_localized(self) -> bool:
"""
Indicates whether the estimation is reliable (localized) or not.
Returns
====
bool.
True if the estimation is localized (one hypothesis on the set of hypotheses). False otherwise.
"""
p_routes = self.particles[:,1]
unique_routes = set(p_routes)
if len(unique_routes) > 1:
return False
return True
def get_mean(self) -> np.array:
"""
Get the state mean in x,y coordinates.
Warning: it does not check if the method is localized!
Returns
=========
mean: numpy.array.
The mean (x,y) position of the estimation.
"""
pointcloud = self.get_particles_as_pointcloud()
mean = np.mean( pointcloud, axis=0 )
return
def get_covariance(self) -> np.array:
"""
Get the covariance of the estimation in x,y coordinates.
Warning: it does not check if the method is localized!
Returns
=========
covariance: numpy.array.
The covariance of the estimation.
"""
pointcloud = self.get_particles_as_pointcloud()
covariance = EmpiricalCovariance().fit(pointcloud).covariance_
return covariance
def get_mean_and_covariance(self) -> Union[np.array, np.array] :
"""
Get the mean and covariance in x,y coordinates.
Warning: it does not check if the method is localized!
Returns
=========
mean: numpy.array.
The mean (x,y) position of the estimation.
covariance: numpy.array.
The covariance of the estimation.
"""
pointcloud = self.get_particles_as_pointcloud()
mean = np.mean( pointcloud, axis=0 )
covariance = EmpiricalCovariance().fit(pointcloud).covariance_
return mean, covariance
# ==========================
# MAP
# ==========================
def add_route(self, route_id : int, ways : pd.DataFrame):
"""
Add a route to the set of routes.
Parameters
===========
route_id: int.
The identifier of the route.
ways: pandas.DataFrame.
The DataFrame of the information of ways contained in the route.
"""
self.routes[route_id] = ways
return
def get_particles_as_pointcloud(self) -> np.array:
"""
Provide the particles as a 2D array of their x,y positions.
Returns
==========
coords_array: numpy.array.
(n_particles,2) array of the xy positions.
"""
coords_array = np.empty((self.n_particles,2))
for row_id in range(self.n_particles):
x, r, w = self.particles[row_id,:]
p_coords = from_map_representation_to_xy(x, self.routes[int(r)])
coords_array[row_id, :] = p_coords
return coords_array
# ==========================
# PARTICLES' MANAGEMENT
# ==========================
def sample_on_route(self, mean : float, std : float, route_id : int, n_particles : int):
"""
Sample particles from a gaussian distribution
Parameters
==========
mean: float.
Mean of the gaussian distribution.
std: float.
Standard deviation of the gaussian distribution.
route_id: int.
The route index to be assigned for each particle.
"""
assert route_id in self.routes, "Error: route not initialized yet!"
particles_x = np.random.normal(loc=mean, scale=std, size=(n_particles,1))
particles_r = np.full((n_particles,1), fill_value=route_id ,dtype="uint32")
particles_w = np.ones((n_particles,1),dtype=float)
particles = np.hstack((particles_x, particles_r, particles_w))
if( self.particles is None ):
self.particles = particles
else:
self.particles = np.vstack((self.particles, particles))
self.n_particles = self.particles.shape[0]
return
def copy_to_route(self, from_idx : int, to_idx : int):
"""
Copies particles from a route to another route.
Parameters
=======
from_idx: int.
The identifier of the route from which the hypothesis's
distribution is going to be copied.
to_idx: int.
The identifier of the route to which the hypothesis's
distribution is going to be copied.
"""
ids_copy = np.where( self.particles[:,1] == from_idx )
particles_copy = np.copy(self.particles[ids_copy,:]).reshape(-1,3)
particles_copy[:,1] = to_idx
stack =
|
np.vstack([self.particles, particles_copy])
|
numpy.vstack
|
# <Source: https://github.com/rtqichen/ffjord/blob/master/lib/toy_data.py >
import numpy as np
import sklearn
import torch
import sklearn.datasets
from PIL import Image
import os
# Dataset iterator
def inf_train_gen(data, rng=None, batch_size=200):
if rng is None:
rng = np.random.RandomState()
#rng = np.random.RandomState(42)
#print(rng)
if data == "1Normal":
z_dist = torch.distributions.Normal(loc=2, scale=.5)
z = z_dist.sample((batch_size, 1))
return z
if data == "2spirals-8gaussians":
data1 = inf_train_gen("2spirals", rng=rng, batch_size=batch_size)
data2 = inf_train_gen("8gaussians", rng=rng, batch_size=batch_size)
return np.concatenate([data1, data2], axis=1)
if data == "4-2spirals-8gaussians":
data1 = inf_train_gen("2spirals", rng=rng, batch_size=batch_size)
data2 = inf_train_gen("8gaussians", rng=rng, batch_size=batch_size)
data3 = inf_train_gen("2spirals", rng=rng, batch_size=batch_size)
data4 = inf_train_gen("8gaussians", rng=rng, batch_size=batch_size)
return np.concatenate([data1, data2, data3, data4], axis=1)
if data == "8-2spirals-8gaussians":
data1 = inf_train_gen("4-2spirals-8gaussians", rng=rng, batch_size=batch_size)
data2 = inf_train_gen("4-2spirals-8gaussians", rng=rng, batch_size=batch_size)
return np.concatenate([data1, data2], axis=1)
if data == "3-MIX_DEP":
data1 = inf_train_gen("2spirals", rng=rng, batch_size=batch_size)
data2 = inf_train_gen("8gaussians", rng=rng, batch_size=batch_size)
data3 = inf_train_gen("circles", rng=rng, batch_size=batch_size)
p = rng.randint(0, 2, batch_size).reshape(-1, 1)
p = np.concatenate([p, p], axis=1)
dist = 2*((data1[:, 0]**2 + data1[:, 1]**2)**.5).reshape(-1, 1) - .5
data3 = np.concatenate([dist, dist], axis=1) * data2*.5 + rng.randn(batch_size, 2) * .1
return np.concatenate([data1, data2, data3], axis=1).astype(data2.dtype)
if data == "7-pyramid":
data1 = inf_train_gen("circles", rng=rng, batch_size=batch_size)
data2 = inf_train_gen("8gaussians", rng=rng, batch_size=batch_size)
data3 = rng.randn(batch_size, 1) + data1.max(1).reshape(-1, 1)
data4 = rng.randn(batch_size, 1) + data2.min(1).reshape(-1, 1)
p = rng.randint(0, 2, batch_size).reshape(-1, 1)
data5 = p * np.sin(data3 + data4) + (1 - p) * np.cos(data3 + data4) + rng.randn(batch_size, 1)
return np.concatenate([data1, data2, data3, data4, data5], axis=1).astype(data2.dtype)
if data == "8-MIX":
data1 = inf_train_gen("2spirals", rng=rng, batch_size=batch_size)
data2 = inf_train_gen("8gaussians", rng=rng, batch_size=batch_size)
data3 = inf_train_gen("swissroll", rng=rng, batch_size=batch_size)
data4 = inf_train_gen("circles", rng=rng, batch_size=batch_size)
data8 = inf_train_gen("moons", rng=rng, batch_size=batch_size)
data6 = inf_train_gen("pinwheel", rng=rng, batch_size=batch_size)
data7 = inf_train_gen("checkerboard", rng=rng, batch_size=batch_size)
data5 = inf_train_gen("line", rng=rng, batch_size=batch_size)
std = np.array([1.604934 , 1.584863 , 2.0310535, 2.0305095, 1.337718 , 1.4043778, 1.6944685, 1.6935346,
1.7434783, 1.0092416, 1.4860426, 1.485661 , 2.3067558, 2.311637 , 1.4430547, 1.4430547], dtype=np.float32)
data = np.concatenate([data1, data2, data3, data4, data5, data6, data7, data8], axis=1)
return data/std
if data == "7-MIX":
data1 = inf_train_gen("2spirals", rng=rng, batch_size=batch_size)
data2 = inf_train_gen("8gaussians", rng=rng, batch_size=batch_size)
data3 = inf_train_gen("swissroll", rng=rng, batch_size=batch_size)
data4 = inf_train_gen("circles", rng=rng, batch_size=batch_size)
data5 = inf_train_gen("moons", rng=rng, batch_size=batch_size)
data6 = inf_train_gen("pinwheel", rng=rng, batch_size=batch_size)
data7 = inf_train_gen("checkerboard", rng=rng, batch_size=batch_size)
std = np.array([1.604934 , 1.584863 , 2.0310535, 2.0305095, 1.337718 , 1.4043778, 1.6944685, 1.6935346,
1.7434783, 1.0092416, 1.4860426, 1.485661 , 2.3067558, 2.311637], dtype=np.float32)
data = np.concatenate([data1, data2, data3, data4, data5, data6, data7], axis=1)
return data/std
if data == "swissroll":
data = sklearn.datasets.make_swiss_roll(n_samples=batch_size, noise=1.0)[0]
data = data.astype("float32")[:, [0, 2]]
data /= 5
return data
elif data == "circles":
data = sklearn.datasets.make_circles(n_samples=batch_size, factor=.5, noise=0.08)[0]
data = data.astype("float32")
data *= 3
return data
elif data == "moons":
data = sklearn.datasets.make_moons(n_samples=batch_size, noise=0.1)[0]
data = data.astype("float32")
data = data * 2 + np.array([-1, -0.2])
data = data.astype("float32")
return data
elif data == "8gaussians":
scale = 4.
centers = [(1, 0), (-1, 0), (0, 1), (0, -1), (1. / np.sqrt(2), 1. / np.sqrt(2)),
(1. / np.sqrt(2), -1. / np.sqrt(2)), (-1. / np.sqrt(2),
1. / np.sqrt(2)), (-1. / np.sqrt(2), -1. / np.sqrt(2))]
centers = [(scale * x, scale * y) for x, y in centers]
dataset = []
for i in range(batch_size):
point = rng.randn(2) * 0.5
idx = rng.randint(8)
center = centers[idx]
point[0] += center[0]
point[1] += center[1]
dataset.append(point)
dataset = np.array(dataset, dtype="float32")
dataset /= 1.414
return dataset
elif data == "2gaussians":
scale = 4.
centers = [(.5, -.5), (-.5, .5)]
centers = [(scale * x, scale * y) for x, y in centers]
dataset = []
for i in range(batch_size):
point = rng.randn(2) * .75
idx = rng.randint(2)
center = centers[idx]
point[0] += center[0]
point[1] += center[1]
dataset.append(point)
dataset = np.array(dataset, dtype="float32")
#dataset /= 1.414
return dataset
elif data == "4gaussians":
scale = 4.
centers = [(.5, -.5), (-.5, .5), (.5, .5), (-.5, -.5)]
centers = [(scale * x, scale * y) for x, y in centers]
dataset = []
for i in range(batch_size):
point = rng.randn(2) * .75
idx = rng.randint(4)
center = centers[idx]
point[0] += center[0]
point[1] += center[1]
dataset.append(point)
dataset = np.array(dataset, dtype="float32")
# dataset /= 1.414
return dataset
elif data == "2igaussians":
scale = 4.
centers = [(.5, 0.), (-.5, .0)]
centers = [(scale * x, scale * y) for x, y in centers]
dataset = []
for i in range(batch_size):
point = rng.randn(2) * .75
idx = rng.randint(2)
center = centers[idx]
point[0] += center[0]
point[1] += center[1]
dataset.append(point)
dataset = np.array(dataset, dtype="float32")
# dataset /= 1.414
return dataset
elif data == "conditionnal8gaussians":
scale = 4.
centers = [(1, 0), (-1, 0), (0, 1), (0, -1), (1. / np.sqrt(2), 1. / np.sqrt(2)),
(1. / np.sqrt(2), -1. / np.sqrt(2)), (-1. / np.sqrt(2),
1. / np.sqrt(2)), (-1. / np.sqrt(2), -1. / np.sqrt(2))]
centers = [(scale * x, scale * y) for x, y in centers]
dataset = []
context = np.zeros((batch_size, 8))
for i in range(batch_size):
point = rng.randn(2) * 0.5
idx = rng.randint(8)
context[i, idx] = 1
center = centers[idx]
point[0] += center[0]
point[1] += center[1]
dataset.append(point)
dataset = np.array(dataset, dtype="float32")
dataset /= 1.414
return dataset, context
elif data == "pinwheel":
radial_std = 0.3
tangential_std = 0.1
num_classes = 5
num_per_class = batch_size // 5
rate = 0.25
rads = np.linspace(0, 2 * np.pi, num_classes, endpoint=False)
features = rng.randn(num_classes*num_per_class, 2) \
* np.array([radial_std, tangential_std])
features[:, 0] += 1.
labels = np.repeat(np.arange(num_classes), num_per_class)
angles = rads[labels] + rate *
|
np.exp(features[:, 0])
|
numpy.exp
|
import supereeg as se
import numpy as np
import scipy
import sys
import pycwt as wavelet
import matplotlib.pyplot as plt
import sklearn
if __name__ == "__main__":
fname = sys.argv[1]
# try:
bo = se.load(fname)
freqs = np.logspace(
|
np.log10(2)
|
numpy.log10
|
'''
Tests the model
'''
import numpy as np
import sys
import os
import json
from tqdm import tqdm
from keras.models import load_model
from keras.models import model_from_json
from keras import backend as K
import argparse
import pickle
def dist(x,y):
s = 0
z = x-y
for element in z:
s += element**2
return np.sqrt(s)
def closest(dictionary, vec):
min_dist = 1000000000000
for key,val in dictionary.items():
v = np.array(val)[0]
d = dist(v, vec)
if d < min_dist:
min_dist = d
closest = key
closest_vec = val
return closest, np.array(closest_vec)
if __name__ == '__main__':
########## PARSE ARGUMENTS ##########
parser = argparse.ArgumentParser(description="Arguments for training.")
parser.add_argument('--include_grammar', required=True, action='store', dest='INCLUDE_POS',
default="Y", help='Y to include the parts of speech in training,\
N to exclude parts of speech in training.')
parser = parser.parse_args()
########## SET DIRECTORIES ##########
DATA_DIR = os.path.join("data", "train", "cleaned")
MAPPING_FILE = os.path.join("utils", "mapping.pkl")
if parser.INCLUDE_POS.lower() == 'y':
INCLUDE_POS = True
RNN_MODEL = os.path.join("models", "rnn_model_pos.hdf5")
elif parser.INCLUDE_POS.lower() == 'n':
INCLUDE_POS = False
RNN_MODEL = os.path.join("models", "rnn_model_no_pos.hdf5")
else:
print("Invalid argument for \"--include_grammar\"")
sys.exit()
NUM_POS_TAGS = 47
########## IMPORT DATA ##########
with open(MAPPING_FILE, 'rb') as f:
mapping = pickle.load(f)
print("**** Data Loaded ****")
########## LOAD MODEL ##########
model = load_model(RNN_MODEL)
print("**** Models Loaded ****")
########## GENERATE ##########
print("**** Generating Sentences ****")
# set up start token
token = mapping['ST']
token = np.array(token)
token = np.reshape(token, (1,) + token.shape)
if INCLUDE_POS:
final_shape = token.shape[-1] + NUM_POS_TAGS
else:
final_shape = token.shape[-1]
tmp = np.zeros(shape=(1,1,final_shape))
tmp[0,0,:len(token[0,0])] = token[0,0,:]
token = tmp
noise = np.random.rand(token.shape[0], token.shape[1], token.shape[2])
noise /= 10 #small amount of noise
print(token.shape)
print(noise.shape)
en_count = 0
words = []
words.append('ST')
# generate words
while en_count <= 50:
out = model.predict([token, noise])
# snap the network's prediction to the closest real word, and also
# snap the network's prediction to the closest vector in our space
# so that it predicts with real words as previous values
closest_word, closest_vec = closest(mapping, out[0,0,:])
token = np.zeros(shape=out.shape)
token[0,0,:] = closest_vec
# fix shapes
tmp =
|
np.zeros(shape=(1,1,final_shape))
|
numpy.zeros
|
"""
Scales have guides and these are what help users make sense of
the data mapped onto the scale. Common examples of guides include
the x-axis, the y-axis, the keyed legend and a colorbar legend.
The guides have demarcations(breaks), some of which must be labelled.
The `*_format` functions below create functions that convert data
values as understood by a specific scale and return string
representations of those values. Manipulating the string
representation of a value helps improve readability of the guide.
"""
import re
from bisect import bisect_right
from warnings import warn
import numpy as np
from matplotlib.dates import DateFormatter, date2num
from matplotlib.ticker import ScalarFormatter
from .breaks import timedelta_helper
from .utils import round_any, precision, match
from .utils import same_log10_order_of_magnitude
__all__ = ['custom_format', 'currency_format', 'dollar_format',
'percent_format', 'scientific_format', 'date_format',
'mpl_format', 'log_format', 'timedelta_format',
'pvalue_format', 'ordinal_format', 'number_bytes_format']
class custom_format:
"""
Custom format
Parameters
----------
fmt : str, optional
Format string. Default is the generic new style
format braces, ``{}``.
style : 'new' | 'old'
Whether to use new style or old style formatting.
New style uses the :meth:`str.format` while old
style uses ``%``. The format string must be written
accordingly.
Examples
--------
>>> formatter = custom_format('{:.2f} USD')
>>> formatter([3.987, 2, 42.42])
['3.99 USD', '2.00 USD', '42.42 USD']
"""
def __init__(self, fmt='{}', style='new'):
self.fmt = fmt
self.style = style
def __call__(self, x):
"""
Format a sequence of inputs
Parameters
----------
x : array
Input
Returns
-------
out : list
List of strings.
"""
if self.style == 'new':
return [self.fmt.format(val) for val in x]
elif self.style == 'old':
return [self.fmt % val for val in x]
else:
raise ValueError(
"style should be either 'new' or 'old'")
# formatting functions
class currency_format:
"""
Currency formatter
Parameters
----------
prefix : str
What to put before the value.
suffix : str
What to put after the value.
digits : int
Number of significant digits
big_mark : str
The thousands separator. This is usually
a comma or a dot.
Examples
--------
>>> x = [1.232, 99.2334, 4.6, 9, 4500]
>>> currency_format()(x)
['$1.23', '$99.23', '$4.60', '$9.00', '$4500.00']
>>> currency_format('C$', digits=0, big_mark=',')(x)
['C$1', 'C$99', 'C$5', 'C$9', 'C$4,500']
"""
def __init__(self, prefix='$', suffix='', digits=2, big_mark=''):
self.prefix = prefix
self.suffix = suffix
self.digits = digits
self.big_mark = big_mark
def __call__(self, x):
"""
Format a sequence of inputs
Parameters
----------
x : array
Input
Returns
-------
out : list
List of strings.
"""
# create {:.2f} or {:,.2f}
big_mark = self.big_mark
comma = ',' if big_mark else ''
tpl = ''.join((self.prefix, '{:', comma, '.',
str(self.digits), 'f}', self.suffix))
labels = [tpl.format(val) for val in x]
if big_mark and big_mark != ',':
labels = [val.replace(',', big_mark) for val in labels]
return labels
dollar_format = currency_format
dollar = dollar_format()
class comma_format:
"""
Format number with commas separating thousands
Parameters
----------
digits : int
Number of digits after the decimal point.
Examples
--------
>>> comma_format()([1000, 2, 33000, 400])
['1,000', '2', '33,000', '400']
"""
def __init__(self, digits=0):
self.formatter = currency_format(
prefix='', digits=digits, big_mark=',')
def __call__(self, x):
"""
Format a sequence of inputs
Parameters
----------
x : array
Input
Returns
-------
out : list
List of strings.
"""
return self.formatter(x)
class percent_format:
"""
Percent formatter
Multiply by one hundred and display percent sign
Parameters
----------
use_comma : bool
If True, use a comma to separate the thousands.
Default is False.
Examples
--------
>>> formatter = percent_format()
>>> formatter([.45, 9.515, .01])
['45%', '952%', '1%']
>>> formatter([.654, .8963, .1])
['65.4%', '89.6%', '10.0%']
"""
def __init__(self, use_comma=False):
self.big_mark = ',' if use_comma else ''
def __call__(self, x):
"""
Format a sequence of inputs
Parameters
----------
x : array
Input
Returns
-------
out : list
List of strings.
"""
if len(x) == 0:
return []
_precision = precision(x)
x = round_any(x, _precision / 100) * 100
# When the precision is less than 1, we show
if _precision > 1:
digits = 0
else:
digits = abs(int(np.log10(_precision)))
formatter = currency_format(prefix='',
suffix='%',
digits=digits,
big_mark=self.big_mark)
labels = formatter(x)
# Remove unnecessary zeros after the decimal
pattern = re.compile(r'\.0+%$')
if all(pattern.search(val) for val in labels):
labels = [pattern.sub('%', val) for val in labels]
return labels
percent = percent_format()
class scientific_format:
"""
Scientific formatter
Parameters
----------
digits : int
Significant digits.
Examples
--------
>>> x = [.12, .23, .34, 45]
>>> scientific_format()(x)
['1.2e-01', '2.3e-01', '3.4e-01', '4.5e+01']
Notes
-----
Be careful when using many digits (15+ on a 64
bit computer). Consider of the `machine epsilon`_.
.. _machine epsilon: https://en.wikipedia.org/wiki/Machine_epsilon
"""
def __init__(self, digits=3):
tpl = ''.join(['{:.', str(digits), 'e}'])
self.formatter = custom_format(tpl)
def __call__(self, x):
if len(x) == 0:
return []
zeros_re = re.compile(r'(0+)e')
def count_zeros(s):
match = zeros_re.search(s)
if match:
return len(match.group(1))
else:
return 0
# format and then remove superfluous zeros
labels = self.formatter(x)
n = min([count_zeros(val) for val in labels])
if n:
labels = [val.replace('0'*n+'e', 'e') for val in labels]
return labels
scientific = scientific_format()
def _format(formatter, x):
"""
Helper to format and tidy up
"""
# For MPL to play nice
formatter.create_dummy_axis()
# For sensible decimal places
formatter.set_locs([val for val in x if ~np.isnan(val)])
try:
oom = int(formatter.orderOfMagnitude)
except AttributeError:
oom = 0
labels = [formatter(tick) for tick in x]
# Remove unnecessary decimals
pattern = re.compile(r'\.0+$')
for i, label in enumerate(labels):
match = pattern.search(label)
if match:
labels[i] = pattern.sub('', label)
# MPL does not add the exponential component
if oom:
labels = ['{}e{}'.format(s, oom) if s != '0' else s
for s in labels]
return labels
class mpl_format:
"""
Format using MPL formatter for scalars
Examples
--------
>>> mpl_format()([.654, .8963, .1])
['0.6540', '0.8963', '0.1000']
"""
def __init__(self):
self.formatter = ScalarFormatter(useOffset=False)
def __call__(self, x):
"""
Format a sequence of inputs
Parameters
----------
x : array
Input
Returns
-------
out : list
List of strings.
"""
return _format(self.formatter, x)
class log_format:
"""
Log Formatter
Parameters
----------
base : int
Base of the logarithm. Default is 10.
exponent_limits : tuple
limits (int, int) where if the any of the powers of the
numbers falls outside, then the labels will be in
exponent form. This only applies for base 10.
Examples
--------
>>> log_format()([0.001, 0.1, 100])
['0.001', '0.1', '100']
>>> log_format()([0.0001, 0.1, 10000])
['1e-4', '1e-1', '1e4']
"""
def __init__(self, base=10, exponent_limits=(-4, 4), **kwargs):
self.base = base
self.exponent_limits = exponent_limits
if 'exponent_threshold' in kwargs:
warn(
"`exponent_threshold` parameter has been deprecated ",
"Use exponent_limits instead",
DeprecationWarning)
def _tidyup_labels(self, labels):
"""
Make all labels uniform in format and remove redundant zeros
for labels in exponential format.
Parameters
----------
labels : list-like
Labels to be tidied.
Returns
-------
out : list-like
Labels
"""
def remove_zeroes(s):
"""
Remove unnecessary zeros for float string s
"""
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
exponent = int(tup[1])
if exponent:
s = '%se%d' % (mantissa, exponent)
else:
s = mantissa
return s
def as_exp(s):
"""
Float string s as in exponential format
"""
return s if 'e' in s else '{:1.0e}'.format(float(s))
# If any are in exponential format, make all of
# them expontential
has_e =
|
np.array(['e' in x for x in labels])
|
numpy.array
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
import cunumeric as num
def compare_assert(a_np, a_num):
if not num.allclose(a_np, a_num):
print("numpy, shape " + str(a_np.shape) + ":")
print(a_np)
print("cuNumeric, shape " + str(a_num.shape) + ":")
print(a_num)
assert False
def check_sort_axis(a_np, a_num, axis):
compare_assert(a_np, a_num)
print("Sorting axis " + str(axis) + ":")
sort_np = np.sort(a_np, axis, kind="stable")
sort_num = num.sort(a_num, axis, kind="stable")
compare_assert(sort_np, sort_num)
sort_np = np.sort(a_np, axis)
sort_num = num.sort(a_num, axis)
compare_assert(sort_np, sort_num)
argsort_np = np.argsort(a_np, axis, kind="stable")
argsort_num = num.argsort(a_num, axis, kind="stable")
compare_assert(argsort_np, argsort_num)
def check_1D():
np.random.seed(42)
A_np = np.array(np.random.randint(10, size=30), dtype=np.int32)
A_num = num.array(A_np)
print("Sorting array : " + str(A_np))
sortA_np = np.sort(A_np)
print("Result numpy : " + str(sortA_np))
sortA_num = num.sort(A_num)
print("Result cunumeric: " + str(sortA_num))
compare_assert(sortA_np, sortA_num)
A_num.sort()
print("Result (inplace): " + str(A_num))
compare_assert(sortA_np, A_num)
def check_2D():
np.random.seed(42)
x_dim = 5
y_dim = 3
A_np = np.array(
|
np.random.randint(10, size=x_dim * y_dim)
|
numpy.random.randint
|
import torch
import numpy as np
import torch.utils.data as dataloader
import os
from config import Config
from models.segmentor_v1 import HybridUNet_single_out_consistency
from miscellaneous.metrics import dice, rAVD, hd, brier, nll, ece
import pandas as pd
from data.datagenerator import CTData_test
from miscellaneous.utils import Evaluation
from matplotlib import pyplot as plt
import h5py
import time
import cv2 as cv
from sklearn.metrics import brier_score_loss
from sklearn.calibration import calibration_curve
def online_eval(model, dataloader, txtlog, submit_path, uncertaintys_path, save_segmentation, save_uncertainty):
txtlog.write( "Dice_mean fg|bg|hausdorff_dist|ravd|ece|nll|sklearn_brier\n")
my_evaluation = Evaluation()
start_time = time.time()
with torch.no_grad():
dice_new_list = []
data_dict_list = []
hausdorff_dist_list = []
ravd_list = []
shape_list = []
testset_list_pre = []
testset_list_gt = []
nll_list = []
brier_list = []
brier_sklearn_list = []
ece_list = []
for data_val in dataloader:
images_val, targets_val, subject, slice, images_origin = data_val
model.eval()
images_val = images_val.to(device)
targets_val = targets_val.to(device)
outputs = model(images_val, test_config.lamda_sem)
# final_out [i-1,i,i+1]
outputs_val = outputs.final_out
softmax = outputs.softmax_out
# calculate predicted entropy as uncertainty
softmax_1 = torch.unsqueeze(softmax[:,1,...],dim=1)
softmax_2 = torch.unsqueeze(softmax[:, 3, ...], dim=1)
softmax_3 = torch.unsqueeze(softmax[:, 5, ...], dim=1)
softmax_fg = torch.cat((softmax_1, softmax_2, softmax_3), dim=1)
softmax_fg_numpy = softmax_fg.data.cpu().numpy()
softmax_fg_numpy = np.squeeze(softmax_fg_numpy, axis=0)
mean_fg = np.mean(softmax_fg_numpy, axis=0)
entropy = -mean_fg*np.log(mean_fg)
# softmax outputs for uncertainty quantification
softmax_final_out = softmax[:,6:8,...]
softmax_final_out = np.squeeze(softmax_final_out.data.cpu().numpy(), axis=0)
# 逐切片处理
outputs_val_1 = outputs_val[:,0:2, ...]
image_origin = images_origin.data.cpu().numpy()
image_origin1 = np.squeeze(image_origin, axis=0)
image_origin1 = image_origin1[:, :, 1]
_, predicted_1 = torch.max(outputs_val_1.data, 1)
# ----------Compute dice-----------
predicted_val_1 = predicted_1.data.cpu().numpy()
subject_val = subject.data.cpu().numpy()
slice_val = slice.data.cpu().numpy()
slice_val_1 = slice_val[0][1]
targets_val = targets_val.data.cpu().numpy()
targets_val_1 = targets_val[:,1, ...]
shape_list.append(predicted_val_1.shape)
data_dict_list.append({"subject": subject_val[0], "slice": slice_val_1, "pre": np.squeeze(predicted_val_1,axis=0),
"target": np.squeeze(targets_val_1, axis=0), "image": image_origin1, "uncertainty": entropy, "softmax_out":softmax_final_out})
# test the elaps of uncertainty quantification
end_time = time.time()
print("elapsed:{}".format(end_time-start_time))
# 利用pandas分组
pd_data = pd.DataFrame(data_dict_list)
for subject, volume_data in pd_data.groupby("subject"):
pre = volume_data["pre"]
tar = volume_data["target"]
slices = volume_data["slice"]
image = volume_data["image"]
uncertain = volume_data["uncertainty"]
softmax_prob = volume_data["softmax_out"]
pre_array = pre.values
target_array = tar.values
image_array = image.values
uncertain_arr = uncertain.values
slices_arr = slices.values
softmax_prob_arr = softmax_prob.values
pre_temp = np.zeros((len(pre_array), pre_array[0].shape[0], pre_array[0].shape[1]), dtype="int16")
target_temp = np.zeros((len(pre_array), target_array[0].shape[0], target_array[0].shape[1]), dtype="int16")
# dimentions: slices*class*width*height
softmax_probs_temp = np.zeros((len(pre_array), softmax_prob_arr[0].shape[0], softmax_prob_arr[0].shape[1],softmax_prob_arr[0].shape[2]), dtype="float32")
for i in range(len(pre_array)):
pre_temp[i, :, :] = pre_array[i]
target_temp[i, :, :] = target_array[i]
softmax_probs_temp[i,:,:,:] = softmax_prob_arr[i]
# 保存预测结果与GT及图像
if save_segmentation:
image_slice = image_array[i]
# save image and segmentation
my_evaluation.save_contour_label(image_slice.astype("int16"),
target_array[i],save_path=submit_path, color="red", file_name=str(subject)+"_"+
str(slices_arr[i])+"label",show_mask=True)
my_evaluation.save_contour_label(image_slice.astype("int16"),
pre_array[i], save_path=submit_path, color="blue", file_name=str(subject)+"_"+
str(slices_arr[i])+"pre", show_mask=True)
orig_path = os.path.join(submit_path, str(subject)+"_"+str(slices_arr[i])+'.png')
cv.imwrite(orig_path, image_slice.astype("uint8"))
if save_uncertainty:
# Predicted error map
error = np.abs(pre_array[i]-target_array[i])
error_name = str(subject) + "_" + str(slices_arr[i]) + "error.png"
error_file_path = os.path.join(uncertaintys_path, error_name)
plt.figure()
plt.imshow(error, cmap=plt.cm.Reds, interpolation='nearest')
# Visulization of the uncertainty
file_name = str(subject) + "_" + str(slices_arr[i]) + ".png"
file_path = os.path.join(uncertaintys_path, file_name)
plt.colorbar()
plt.xticks([])
plt.yticks([])
plt.savefig(error_file_path)
plt.clf()
plt.cla()
plt.close()
plt.figure()
plt.imshow(uncertain_arr[i], cmap=plt.cm.rainbow, interpolation='nearest')
plt.colorbar()
plt.xticks([])
plt.yticks([])
# plt.axes('off')
plt.savefig(file_path)
plt.clf()
plt.cla()
plt.close()
dsc_list1 = []
if 0 == np.count_nonzero(pre_temp):
print("zero"+"_"+str(subject))
continue
# calculate the dice metric
for i in range(0, test_config.num_classes):
dsc_i = dice(pre_temp, target_temp, i)
dsc_list1.append(dsc_i)
# Calculate Hausdorff Distance 以及ravd
hausdorff_dist = hd(pre_temp, target_temp, [5, 0.42, 0.42])
# we measure the absolute volume difference
ravd = abs(rAVD(pre_temp, target_temp))
# calculate the volume of ICH for GT and predictions
volume_gt = calculate_volume(target_temp)
volume_pre = calculate_volume(pre_temp)
# Evaluate uncertainty qualification with nll, brier, ece
softmax_probs_temp = softmax_probs_temp.transpose(1,0,2,3)
brier_socre = brier(torch.from_numpy(softmax_probs_temp).float(), torch.from_numpy(target_temp).long())
ece_subject_wise,_,_= ece(softmax_probs_temp[1,:,:,:], target_temp, 10)
# Test sklearn
target_onehot_temp = one_hot(target_temp, 2)
brier_sklearn = brier_score_loss(target_onehot_temp[0, ...].flatten(), softmax_probs_temp[0, ...].flatten())+\
brier_score_loss(target_onehot_temp[1,...].flatten(), softmax_probs_temp[1,...].flatten())
nll_score = nll(torch.from_numpy(softmax_probs_temp).float(), torch.from_numpy(target_temp).long())
print("nll_score:{} brier_socre:{}".format(nll_score.data.numpy(), brier_socre.data.numpy()))
print("dice_bg:{} dice_fg:{} Hausdorff_dist:{} ravd:{}".format(dsc_list1[0], dsc_list1[1],hausdorff_dist, ravd))
txtlog.write("ID{:30} {:3f} {:3f} {:3f} {:3f} {:3f} {:3f} {:3f} {:3f} {:3f} \n".format(subject, dsc_list1[0], dsc_list1[1],
hausdorff_dist, ravd, ece_subject_wise, nll_score, brier_sklearn,volume_gt, volume_pre))
dice_new_list.append(dsc_list1)
hausdorff_dist_list.append(hausdorff_dist)
ravd_list.append(ravd)
brier_list.append(brier_socre.data.numpy())
nll_list.append(nll_score.data.numpy())
brier_sklearn_list.append(brier_sklearn)
ece_list.append(ece_subject_wise)
# store all the test data
testset_list_pre.append(softmax_probs_temp[1,:,:,:])
testset_list_gt.append(target_temp)
dice_array = np.array(dice_new_list)
dice_mean = np.mean(dice_array, axis=0)
haus_dist_arr = np.array(hausdorff_dist_list)
hausdorff_dist_mean = np.mean(haus_dist_arr, axis=0)
ravd_arr = np.array(ravd_list)
ravd_mean = np.mean(ravd_arr, axis=0)
# uncertainty quantification
brier_array = np.mean(
|
np.array(brier_list)
|
numpy.array
|
import cv2
import numpy as np
import math
def read_class_names(class_file_name):
'''loads class name from a file'''
names = {}
with open(class_file_name, 'r') as data:
for ID, name in enumerate(data):
names[ID] = name.strip('\n')
return names
def py_nms(boxes, scores, max_boxes=80, iou_thresh=0.5):
"""
Pure Python NMS baseline.
Arguments: boxes: shape of [-1, 4], the value of '-1' means that dont know the
exact number of boxes
scores: shape of [-1,]
max_boxes: representing the maximum of boxes to be selected by non_max_suppression
iou_thresh: representing iou_threshold for deciding to keep boxes
"""
assert boxes.shape[1] == 4 and len(scores.shape) == 1
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= iou_thresh)[0]
order = order[inds + 1]
return keep[:max_boxes]
def image_preporcess(image, target_size, gt_boxes=None):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
ih, iw = target_size
h, w, _ = image.shape
scale = min(iw/w, ih/h)
nw, nh = int(scale * w), int(scale * h)
image_resized = cv2.resize(image, (nw, nh))
image_paded = np.full(shape=[ih, iw, 3], fill_value=128.0, dtype=np.float32)
dw, dh = (iw - nw) // 2, (ih-nh) // 2
image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized
image_paded = image_paded / 255.
if gt_boxes is None:
return image_paded
else:
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
return image_paded, gt_boxes
def post_process(detections, org_img_shape, input_size, down_ratio, score_threshold):
bboxes = detections[0, :, 0:4]
scores = detections[0, :, 4]
classes = detections[0, :, 5]
org_h, org_w = org_img_shape
resize_ratio = min(input_size[1] / org_w, input_size[0] / org_h)
dw = (input_size[1] - resize_ratio * org_w) / 2
dh = (input_size[0] - resize_ratio * org_h) / 2
bboxes[:, 0::2] = 1.0 * (bboxes[:, 0::2] * down_ratio - dw) / resize_ratio
bboxes[:, 1::2] = 1.0 * (bboxes[:, 1::2] * down_ratio - dh) / resize_ratio
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, org_w)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, org_h)
score_mask = scores >= score_threshold
bboxes, socres, classes = bboxes[score_mask], scores[score_mask], classes[score_mask]
return np.concatenate([bboxes, socres[:, np.newaxis], classes[:, np.newaxis]], axis=-1)
def bboxes_draw_on_img(img, classes_id, scores, bboxes, class_names, thickness=2):
colors_tableau = [(158, 218, 229), (31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207)]
scale = 0.4
text_thickness = 1
line_type = 8
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
color = colors_tableau[int(classes_id[i])]
# Draw bounding boxes
x1_src = int(bbox[0])
y1_src = int(bbox[1])
x2_src = int(bbox[2])
y2_src = int(bbox[3])
cv2.rectangle(img, (x1_src, y1_src), (x2_src, y2_src), color, thickness)
# Draw text
s = '%s: %.2f' % (class_names[int(classes_id[i])], scores[i])
# text_size is (width, height)
text_size, baseline = cv2.getTextSize(s, cv2.FONT_HERSHEY_SIMPLEX, scale, text_thickness)
p1 = (y1_src - text_size[1], x1_src)
cv2.rectangle(img, (p1[1] - thickness//2, p1[0] - thickness - baseline), (p1[1] + text_size[0], p1[0] + text_size[1]), color, -1)
cv2.putText(img, s, (p1[1], p1[0] + baseline), cv2.FONT_HERSHEY_SIMPLEX, scale, (255,255,255), text_thickness, line_type)
return img
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.average = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.average = self.sum / float(self.count)
def get_preds_gpu(detections, image_id):
'''
Given the y_pred of an input image, get the predicted bbox and label info.
return:
pred_content: 2d list.
'''
cls_in_img = list(set(detections[:, 5]))
results = []
pred_content = []
for c in cls_in_img:
cls_mask = (detections[:, 5] == c)
classified_det = detections[cls_mask]
classified_bboxes = classified_det[:, :4]
classified_scores = classified_det[:, 4]
inds = py_nms(classified_bboxes, classified_scores, max_boxes=50, iou_thresh=0.5)
# results.extend(classified_det[inds])
results.extend(classified_det[inds].tolist())
for bbox in results:
import cfg
if bbox[4] >= cfg.score_threshold:
x_min, y_min, x_max, y_max = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
score = float(bbox[4])
label = int(bbox[5])
pred_content.append([image_id, x_min, y_min, x_max, y_max, score, label])
# print("results is", results)
# print("pred_content is", pred_content)
return pred_content
def parse_line(line):
'''
Given a line from the training/test txt file, return parsed info.
line format: line_index, img_path, img_width, img_height, [box_info_1 (5 number)], ...
return:
line_idx: int64
pic_path: string.
boxes: shape [N, 4], N is the ground truth count, elements in the second
dimension are [x_min, y_min, x_max, y_max]
labels: shape [N]. class index.
img_width: int.
img_height: int
'''
if 'str' not in str(type(line)):
line = line.decode()
s = line.strip().split(' ')
line_idx = s[0]
pic_path = s[1]
img_width = int(s[2])
img_height = int(s[3])
s = s[4:]
box_cnt = len(s)
assert box_cnt > 0
boxes = []
labels = []
gt_labels = np.array([list(map(lambda x: int(float(x)), box.split(','))) for box in s])
# print(labels)
for idx, label in enumerate(gt_labels):
# box = label[:4]
# class_name = label[4]
class_name, x_min, y_min, x_max, y_max = label[4], label[0], label[1], label[2], label[3]
boxes.append([x_min, y_min, x_max, y_max])
labels.append(class_name)
boxes = np.asarray(boxes, np.float32)
labels = np.asarray(labels, np.float32)
return pic_path, boxes, labels, img_width, img_height
gt_dict = {} # key: img_id, value: gt object list
def parse_gt_rec(gt_filename, target_img_size, letterbox_resize=True):
'''
parse and re-organize the gt info.
return:
gt_dict: dict. Each key is a img_id, the value is the gt bboxes in the corresponding img.
'''
global gt_dict
if not gt_dict:
new_width, new_height = target_img_size
with open(gt_filename, 'r') as f:
for img_id, line in enumerate(f):
pic_path, boxes, labels, ori_width, ori_height = parse_line(line)
objects = []
for i in range(len(labels)):
x_min, y_min, x_max, y_max = boxes[i]
label = labels[i]
if letterbox_resize:
resize_ratio = min(new_width / ori_width, new_height / ori_height)
resize_w = int(resize_ratio * ori_width)
resize_h = int(resize_ratio * ori_height)
dw = int((new_width - resize_w) / 2)
dh = int((new_height - resize_h) / 2)
objects.append([x_min * resize_ratio + dw,
y_min * resize_ratio + dh,
x_max * resize_ratio + dw,
y_max * resize_ratio + dh,
label])
else:
# objects.append([x_min * new_width / ori_width,
# y_min * new_height / ori_height,
# x_max * new_width / ori_width,
# y_max * new_height / ori_height,
# label])
objects.append([x_min, y_min, x_max, y_max , label])
gt_dict[img_id] = objects
return gt_dict
# The following two functions are modified from FAIR's Detectron repo to calculate mAP:
# https://github.com/facebookresearch/Detectron/blob/master/detectron/datasets/voc_eval.py
def voc_ap(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap =
|
np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
|
numpy.sum
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import action_selection as asl
import seaborn as sns
import pandas as pd
from scipy.stats import entropy
plt.style.use('seaborn-whitegrid')
from pandas.plotting import table
from misc import params_list, simulate, make_title, test_vals, calc_dkl, cols
import pickle as pickle
import time
import os as os
import itertools as itertools
import string as string
path = os.getcwd() + '\\parameter_data\\'
tests = np.asarray(['conflict', 'agreement','goal', 'habit'],dtype="object")
# %%
'''
used parameters
'''
pols = [8,81]
bs = [3]
As = [1]
ws = [1]
ss = [0.0004]
selectors = ['rdm', 'ardm'] #
path = os.getcwd() + '\\parameter_data\\'
tests = np.asarray(['conflict', 'agreement','goal', 'habit'],dtype="object")
par_list = []
for p in itertools.product(pols, selectors, bs, ws, ss, As, params_list):
par_list.append([p[0]]+[p[1]] + [p[2]]+ [p[3]]+ [p[4]] + [p[5]] + [p[6]])
#%%
'''
function definitions
'''
def test(df,size=4000):
if not df.shape[0] == size:
raise ValueError('WRONG SELECTION')
def load_file(ttl):
with open (ttl, 'rb') as fp:
data = pickle.load(fp)
return data
def extract_params(ttl):
names = ['standard', 'post_prior1', 'post_prior0', 'like_prior1', 'like_prior0']
params_dict = {
'standard_b': [False, False, True],
'post_prior1': [True, False, True],
'post_prior0': [True, False, False],
'like_prior1': [False, True, True],
'like_prior0': [False, True, False]
}
pars = ttl.split('_')
a_present = False
for indx, par in enumerate(pars):
if par == 'b':
if not len(pars[indx+1]) == 1:
b = float(pars[indx+1])
else:
b = int(pars[indx+1])
if par == 's':
s = float(pars[indx+1])
if par == 'wd':
if not len(pars[indx+1]) == 1:
wd = float(pars[indx+1])
else:
wd = int(pars[indx+1])
if par == 'a':
a_present = True
if not len(pars[indx+1]) == 1:
a = float(pars[indx+1])
else:
a = int(pars[indx+1])
# a = float(pars[indx+1])
# print(pars)
npi = int(pars[1])
selector = pars[2]
regime = '_'.join(pars[3:5])
pars = params_dict[regime]
if regime == 'standard_b':
regime = 'standard'
if a_present:
return [npi, selector, b, wd,s, a, pars + [regime]]
else:
return [npi, selector, b, wd,s, 1, pars + [regime]]
def extract_post(npi=3, nmodes=4):
x_positions = []
for i in range(nmodes):
x_positions.append([x for x in range(i*npi + i, i*npi + i + npi)])
polss = np.asarray([3,8,81,2])
i = np.where(polss == npi)[0][0]
posts = np.zeros([nmodes, npi]) # translate the posteriors
post = np.asarray(test_vals)[i,:,0] # into a numpy array
for indx, p in enumerate(post):
posts[indx,:] = np.asarray(p)
return posts, x_positions
def load_fits(trials=1000):
tests = np.asarray(['conflict', 'agreement','goal', 'habit'],dtype="object")
names = ['npi', 'selector','b','w','s','A', 'regime', 'post_fit','individual_fit','avg_fit','ID','file_ttl','stats']
nmodes = 4
path = os.getcwd() + '/parameter_data/'
files = os.listdir(path)
total = len(files)
npis = np.zeros(total)
selectors = np.zeros(total, dtype='object')
bs = np.zeros(total, dtype="float")
ws = np.zeros(total)
ss = np.zeros(total)
As = np.zeros(total)
regimes = np.zeros(total, dtype="object")
post_fit = np.zeros(total)
individual_fit = np.zeros(total, dtype="object")
post_fit_avg = np.zeros(total)
polss = np.array([3,8,81])
ID = np.zeros(total)
titles = np.zeros(total, dtype="object")
stats = np.zeros(total, dtype="object")
fucked = []
for ind, f in enumerate(files):
if ind < total:
p = extract_params(f)
# print(ind)
# print(f)
npi = p[0]
y =
|
np.where(polss == npi)
|
numpy.where
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
import tensorflow as tf
import numpy as np
import collections
from ops import input_ops
FLAGS = tf.flags.FLAGS
def read_vocab_embs(vocabulary_file, embedding_matrix_file):
tf.logging.info("Reading vocabulary from %s", vocabulary_file)
with tf.gfile.GFile(vocabulary_file, mode="r") as f:
lines = list(f.readlines())
vocab = [line.strip() for line in lines]
with open(embedding_matrix_file, "r") as f:
embedding_matrix = np.load(f)
tf.logging.info("Loaded embedding matrix with shape %s",
embedding_matrix.shape)
word_embedding_dict = collections.OrderedDict(
zip(vocab, embedding_matrix))
return word_embedding_dict
def read_vocab(vocabulary_file):
tf.logging.info("Reading vocabulary from %s", vocabulary_file)
with tf.gfile.GFile(vocabulary_file, mode="r") as f:
lines = list(f.readlines())
reverse_vocab = [line.strip() for line in lines]
tf.logging.info("Loaded vocabulary with %d words.", len(reverse_vocab))
#tf.logging.info("Loading embedding matrix from %s", embedding_matrix_file)
# Note: tf.gfile.GFile doesn't work here because np.load() calls f.seek()
# with 3 arguments.
word_embedding_dict = collections.OrderedDict(
zip(reverse_vocab, range(len(reverse_vocab))))
return word_embedding_dict
class s2v(object):
"""Skip-thoughts model."""
def __init__(self, config, mode="train", input_reader=None, input_queue=None):
"""Basic setup. The actual TensorFlow graph is constructed in build().
Args:
config: Object containing configuration parameters.
mode: "train", "eval" or "encode".
input_reader: Subclass of tf.ReaderBase for reading the input serialized
tf.Example protocol buffers. Defaults to TFRecordReader.
Raises:
ValueError: If mode is invalid.
"""
if mode not in ["train", "eval", "encode"]:
raise ValueError("Unrecognized mode: %s" % mode)
self.config = config
self.mode = mode
self.reader = input_reader if input_reader else tf.TFRecordReader()
self.input_queue = input_queue
# Initializer used for non-recurrent weights.
self.uniform_initializer = tf.random_uniform_initializer(
minval=-FLAGS.uniform_init_scale,
maxval=FLAGS.uniform_init_scale)
# Input sentences represented as sequences of word ids. "encode" is the
# source sentence, "decode_pre" is the previous sentence and "decode_post"
# is the next sentence.
# Each is an int64 Tensor with shape [batch_size, padded_length].
self.encode_ids = None
# Boolean masks distinguishing real words (1) from padded words (0).
# Each is an int32 Tensor with shape [batch_size, padded_length].
self.encode_mask = None
# Input sentences represented as sequences of word embeddings.
# Each is a float32 Tensor with shape [batch_size, padded_length, emb_dim].
self.encode_emb = None
# The output from the sentence encoder.
# A float32 Tensor with shape [batch_size, num_gru_units].
self.thought_vectors = None
# The total loss to optimize.
self.total_loss = None
def build_inputs(self):
if self.mode == "encode":
encode_ids = tf.placeholder(tf.int64, (None, None), name="encode_ids")
encode_mask = tf.placeholder(tf.int8, (None, None), name="encode_mask")
else:
# Prefetch serialized tf.Example protos.
input_queue = input_ops.prefetch_input_data(
self.reader,
FLAGS.input_file_pattern,
shuffle=FLAGS.shuffle_input_data,
capacity=FLAGS.input_queue_capacity,
num_reader_threads=FLAGS.num_input_reader_threads)
# Deserialize a batch.
serialized = input_queue.dequeue_many(FLAGS.batch_size)
encode = input_ops.parse_example_batch(serialized)
encode_ids = tf.identity(encode.ids, name="encode_ids")
encode_mask = tf.identity(encode.mask, name="encode_mask")
self.encode_ids = encode_ids
self.encode_mask = encode_mask
def build_word_embeddings(self):
rand_init = self.uniform_initializer
self.word_embeddings = []
self.encode_emb = []
self.init = None
for v in self.config.vocab_configs:
if v.mode == 'fixed':
if self.mode == "train":
word_emb = tf.get_variable(
name=v.name,
shape=[v.size, v.dim],
trainable=False)
embedding_placeholder = tf.placeholder(
tf.float32, [v.size, v.dim])
embedding_init = word_emb.assign(embedding_placeholder)
rand = np.random.rand(1, v.dim)
word_vecs = np.load(v.embs_file)
load_vocab_size = word_vecs.shape[0]
assert(load_vocab_size == v.size - 1)
word_init = np.concatenate((rand, word_vecs), axis=0)
self.init = (embedding_init, embedding_placeholder, word_init)
else:
word_emb = tf.get_variable(
name=v.name,
shape=[v.size, v.dim])
encode_emb = tf.nn.embedding_lookup(word_emb, self.encode_ids)
self.word_emb = word_emb
self.encode_emb.extend([encode_emb, encode_emb])
if v.mode == 'trained':
for inout in ["", "_out"]:
word_emb = tf.get_variable(
name=v.name + inout,
shape=[v.size, v.dim],
initializer=rand_init)
if self.mode == 'train':
self.word_embeddings.append(word_emb)
encode_emb = tf.nn.embedding_lookup(word_emb, self.encode_ids)
self.encode_emb.append(encode_emb)
if v.mode == 'expand':
for inout in ["", "_out"]:
encode_emb = tf.placeholder(tf.float32, (
None, None, v.dim), v.name + inout)
self.encode_emb.append(encode_emb)
word_emb_dict = read_vocab_embs(v.vocab_file + inout + ".txt",
v.embs_file + inout + ".npy")
self.word_embeddings.append(word_emb_dict)
if v.mode != 'expand' and self.mode == 'encode':
word_emb_dict = read_vocab(v.vocab_file)
self.word_embeddings.extend([word_emb_dict, word_emb_dict])
def _initialize_cell(self, num_units, cell_type="GRU"):
if cell_type == "GRU":
return tf.contrib.rnn.GRUCell(num_units=num_units)
elif cell_type == "LSTM":
return tf.contrib.rnn.LSTMCell(num_units=num_units)
else:
raise ValueError("Invalid cell type")
def bow(self, word_embs, mask):
mask_f = tf.expand_dims(tf.cast(mask, tf.float32), -1)
word_embs_mask = word_embs * mask_f
bow = tf.reduce_sum(word_embs_mask, axis=1)
return bow
def rnn(self, word_embs, mask, scope, encoder_dim, cell_type="GRU"):
length = tf.to_int32(tf.reduce_sum(mask, 1), name="length")
if self.config.bidir:
if encoder_dim % 2:
raise ValueError(
"encoder_dim must be even when using a bidirectional encoder.")
num_units = encoder_dim // 2
cell_fw = self._initialize_cell(num_units, cell_type=cell_type)
cell_bw = self._initialize_cell(num_units, cell_type=cell_type)
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=word_embs,
sequence_length=length,
dtype=tf.float32,
scope=scope)
if cell_type == "LSTM":
states = [states[0][1], states[1][1]]
state = tf.concat(states, 1)
else:
cell = self._initialize_cell(encoder_dim, cell_type=cell_type)
outputs, state = tf.nn.dynamic_rnn(
cell=cell,
inputs=word_embs,
sequence_length=length,
dtype=tf.float32,
scope=scope)
if cell_type == "LSTM":
state = state[1]
return state
def build_encoder(self):
"""Builds the sentence encoder.
Inputs:
self.encode_emb
self.encode_mask
Outputs:
self.thought_vectors
Raises:
ValueError: if config.bidirectional_encoder is True and config.encoder_dim
is odd.
"""
names = ["","_out"]
self.thought_vectors = []
# print(self.config.encoder)
# encode_emb = tf.compat.v1.Print(self.encode_emb, [tf.shape(self.encode_emb)], "encode_emb shape: ")
encode_emb = self.encode_emb
for i in range(2):
with tf.variable_scope("encoder" + names[i]) as scope:
if self.config.encoder == "gru":
sent_rep = self.rnn(encode_emb[i], self.encode_mask, scope, self.config.encoder_dim, cell_type="GRU")
elif self.config.encoder == "lstm":
sent_rep = self.rnn(encode_emb[i], self.encode_mask, scope, self.config.encoder_dim, cell_type="LSTM")
elif self.config.encoder == 'bow':
sent_rep = self.bow(encode_emb[i], self.encode_mask)
else:
raise ValueError("Invalid encoder")
if self.config.encoder_norm:
# sent_rep = tf.compat.v1.Print(sent_rep, [tf.shape(sent_rep)], "sent_rep shape: ")
sent_rep = tf.nn.l2_normalize(sent_rep, axis=1)
thought_vectors = tf.identity(sent_rep, name="thought_vectors")
self.thought_vectors.append(thought_vectors)
def build_loss(self):
"""Builds the loss Tensor.
Outputs:
self.total_loss
"""
loss_config = self.config.loss_config
all_sen_embs = self.thought_vectors
losses = []
to_log = {}
print(self.config)
print(loss_config)
# Positive pair targets
# diag = all zeros
pos_targets_np = np.zeros((FLAGS.batch_size, FLAGS.batch_size))
ctxt_sent_pos = list(range(-FLAGS.context_size, FLAGS.context_size + 1))
ctxt_sent_pos.remove(0)
for ctxt_pos in ctxt_sent_pos:
pos_targets_np += np.eye(FLAGS.batch_size, k=ctxt_pos)
pos_targets_np_sum = np.sum(pos_targets_np, axis=1, keepdims=True)
pos_targets_np = pos_targets_np / pos_targets_np_sum
# matmul scores
if FLAGS.dropout:
mask_shp = [1, self.config.encoder_dim]
bin_mask = tf.random_uniform(mask_shp) > FLAGS.dropout_rate
bin_mask = tf.where(bin_mask, tf.ones(mask_shp), tf.zeros(mask_shp))
src = all_sen_embs[0] * bin_mask
dst = all_sen_embs[1] * bin_mask
mm_scores = tf.matmul(src, dst, transpose_b=True)
else:
mm_scores = tf.matmul(all_sen_embs[0], all_sen_embs[1], transpose_b=True)
if loss_config.c != 0:
c_scores = mm_scores / loss_config.ct
# Ignore source sentence
if self.config.encoder_norm:
nodiag_mask_np = np.ones((FLAGS.batch_size, FLAGS.batch_size), dtype=np.bool)
np.fill_diagonal(nodiag_mask_np, False)
nodiag_pos_targets_np = pos_targets_np[nodiag_mask_np].reshape(FLAGS.batch_size, FLAGS.batch_size - 1)
pos_targets = tf.constant(nodiag_pos_targets_np, dtype=tf.float32) # still normalized since diag(pos_targets_np) = 0
nodiag_mask = tf.constant(nodiag_mask_np)
c_scores = tf.reshape(c_scores[nodiag_mask], [FLAGS.batch_size, FLAGS.batch_size - 1])
else:
pos_targets = tf.constant(pos_targets_np, dtype=tf.float32)
c_scores = tf.matrix_set_diag(c_scores,
|
np.zeros(FLAGS.batch_size)
|
numpy.zeros
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.