prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
from cohere.src_py.beamlines.viz import CXDViz
import math as m
import xrayutilities.experiment as xuexp
from xrayutilities.io import spec as spec
def parse_spec(specfile, scan):
"""
Reads parameters necessary to run visualization from spec file for given scan.
Parameters
----------
specfile : str
spec file name
scan : int
scan number to use to recover the saved measurements
Returns
-------
delta, gamma, theta, phi, chi, scanmot, scanmot_del, detdist, detector_name, energy
"""
# Scan numbers start at one but the list is 0 indexed
try:
ss = spec.SPECFile(specfile)[scan - 1]
except Exception as ex:
print(str(ex))
print ('Could not parse ' + specfile )
return None,None,None,None,None,None,None,None,None,None
# Stuff from the header
try:
detector_name = str(ss.getheader_element('UIMDET'))
except:
detector_name = None
try:
command = ss.command.split()
scanmot = command[1]
scanmot_del = (float(command[3]) - float(command[2])) / int(command[4])
except:
scanmot = None
scanmot_del = None
# Motor stuff from the header
try:
delta = ss.init_motor_pos['INIT_MOPO_Delta']
except:
delta = None
try:
gamma = ss.init_motor_pos['INIT_MOPO_Gamma']
except:
gamma = None
try:
theta = ss.init_motor_pos['INIT_MOPO_Theta']
except:
theta = None
try:
phi = ss.init_motor_pos['INIT_MOPO_Phi']
except:
phi = None
try:
chi = ss.init_motor_pos['INIT_MOPO_Chi']
except:
chi = None
try:
detdist = ss.init_motor_pos['INIT_MOPO_camdist']
except:
detdist = None
try:
energy = ss.init_motor_pos['INIT_MOPO_Energy']
except:
energy = None
# returning the scan motor name as well. Sometimes we scan things
# other than theta. So we need to expand the capability of the display
# code.
return delta, gamma, theta, phi, chi, scanmot, scanmot_del, detdist, detector_name, energy
class DispalyParams:
"""
This class encapsulates parameters defining image display. The parameters are read from config file on construction.
This class is basically an information agglomerator for the viz generation.
"""
def __init__(self, config):
"""
The constructor gets config file and fills out the class members.
Parameters
----------
config : str
configuration file name
Returns
-------
none
"""
self.detector = None
deg2rad = np.pi / 180.0
try:
specfile = config['specfile']
last_scan = config['last_scan']
# get stuff from the spec file.
self.delta, self.gamma, self.th, self.phi, self.chi, self.scanmot, self.scanmot_del, self.detdist, self.detector, self.energy = parse_spec(specfile, last_scan)
except:
pass
# drop the ':' from detector name
if self.detector is not None and self.detector.endswith(':'):
self.detector = self.detector[:-1]
try:
self.diffractometer = config['diffractometer']
except:
raise ValueError('diffractometer name not in config file')
# override the parsed parameters with entries in config file
try:
self.detector = config['detector']
except KeyError:
if self.detector is None:
raise ValueError('detector not in spec, please configure')
try:
self.energy = config['energy']
except KeyError:
if self.energy is None:
raise ValueError('energy not in spec, please configure')
try:
self.delta = config['delta']
except KeyError:
if self.delta is None:
raise ValueError('delta not in spec, please configure')
try:
self.gamma = config['gamma']
except KeyError:
if self.gamma is None:
raise ValueError('gamma not in spec, please configure')
try:
self.detdist = config['detdist']
except KeyError:
if self.detdist is None:
raise ValueError('detdist not in spec, please configure')
try:
self.th = config['theta']
except KeyError:
if self.th is None:
raise ValueError('theta not in spec, please configure')
try:
self.chi = config['chi']
except KeyError:
if self.chi is None:
raise ValueError('chi not in spec, please configure')
try:
self.phi = config['phi']
except KeyError:
if self.phi is None:
raise ValueError('phi not in spec, please configure')
try:
self.scanmot = config['scanmot']
except KeyError:
if self.scanmot is None:
raise ValueError('scanmot not in spec, please configure')
try:
self.scanmot_del = config['scanmot_del']
except KeyError:
if self.scanmot_del is None:
raise ValueError('scanmot_del not in spec, please configure')
try:
self.rampups = config['rampups']
except:
self.rampups = 1
try:
self.binning = []
binning = config['binning']
for i in range(len(binning)):
self.binning.append(binning[i])
for _ in range(3 - len(self.binning)):
self.binning.append(1)
except KeyError:
self.binning = [1, 1, 1]
try:
self.crop = []
crop = config['crop']
for i in range(len(crop)):
if crop[i] > 1:
crop[i] = 1.0
self.crop.append(crop[i])
for _ in range(3 - len(self.crop)):
self.crop.append(1.0)
crop[0], crop[1] = crop[1], crop[0]
except KeyError:
self.crop = (1.0, 1.0, 1.0)
def set_instruments(self, detector, diffractometer):
# for beamline aps_34idc both detector and diffractometer must be defined
if detector is None:
print ('detector must be defined')
return False
if diffractometer is None:
print ('diffractometer must be defined')
return False
for attr in diffractometer.__class__.__dict__.keys():
if not attr.startswith('__'):
self.__dict__[attr] = diffractometer.__class__.__dict__[attr]
for attr in diffractometer.__dict__.keys():
if not attr.startswith('__'):
self.__dict__[attr] = diffractometer.__dict__[attr]
for attr in detector.__class__.__dict__.keys():
if not attr.startswith('__'):
self.__dict__[attr] = detector.__class__.__dict__[attr]
for attr in detector.__dict__.keys():
if not attr.startswith('__'):
self.__dict__[attr] = detector.__dict__[attr]
return True
def set_geometry(shape, p):
"""
Sets geometry.
Parameters
----------
shape : tuple
shape of reconstructed array
p : DisplayParmas object
Returns
-------
nothing
"""
# DisplayParams is not expected to do any modifications of params (units, etc)
px = p.pixel[0] * p.binning[0]
py = p.pixel[1] * p.binning[1]
detdist = p.detdist / 1000.0 # convert to meters
scanmot = p.scanmot.strip()
enfix = 1
# if energy is given in kev convert to ev for xrayutilities
if m.floor(m.log10(p.energy)) < 3:
enfix = 1000
energy = p.energy * enfix # x-ray energy in eV
if scanmot == 'en':
scanen = np.array((energy, energy + p.scanmot_del * enfix))
else:
scanen = np.array((energy,))
qc = xuexp.QConversion(p.sampleaxes, p.detectoraxes, p.incidentaxis, en=scanen)
# compute for 4pixel (2x2) detector
qc.init_area(p.pixelorientation[0], p.pixelorientation[1], shape[0], shape[1], 2, 2, distance=detdist,
pwidth1=px, pwidth2=py)
# I think q2 will always be (3,2,2,2) (vec, scanarr, px, py)
# should put some try except around this in case something goes wrong.
if scanmot == 'en': # seems en scans always have to be treated differently since init is unique
q2 = np.array(qc.area(p.th, p.chi, p.phi, p.delta, p.gamma, deg=True))
elif scanmot in p.sampleaxes_name: # based on scanmot args are made for qc.area
args = []
axisindex = p.sampleaxes_name.index(scanmot)
for n in range(len(p.sampleaxes_name)):
if n == axisindex:
scanstart = p.__dict__[scanmot]
args.append(
|
np.array((scanstart, scanstart + p.scanmot_del * p.binning[2]))
|
numpy.array
|
#!/usr/bin/env python3
#
# Copyright (c) 2019-2021 LG Electronics, Inc.
#
# This software contains code licensed as described in LICENSE.
#
from environs import Env
import lgsvl
import time
import yaml
import numpy as np
from numba import njit
from argparse import Namespace
import math
import matplotlib.pyplot as plt
#######################################################################################
##################### PLANNER HELPERS ##########################################
#######################################################################################
njit(fastmath=False, cache=True)
def nearest_point_on_trajectory(point, trajectory):
'''
Return the nearest point along the given piecewise linear trajectory.
Same as nearest_point_on_line_segment, but vectorized. This method is quite fast, time constraints should
not be an issue so long as trajectories are not insanely long.
Order of magnitude: trajectory length: 1000 --> 0.0002 second computation (5000fps)
point: size 2 numpy array
trajectory: Nx2 matrix of (x,y) trajectory waypoints
- these must be unique. If they are not unique, a divide by 0 error will destroy the world
'''
diffs = trajectory[1:,:] - trajectory[:-1,:]
l2s = diffs[:,0]**2 + diffs[:,1]**2
# this is equivalent to the elementwise dot product
# dots = np.sum((point - trajectory[:-1,:]) * diffs[:,:], axis=1)
dots = np.empty((trajectory.shape[0]-1, ))
for i in range(dots.shape[0]):
dots[i] = np.dot((point - trajectory[i, :]), diffs[i, :])
t = dots / l2s
t[t<0.0] = 0.0
t[t>1.0] = 1.0
# t = np.clip(dots / l2s, 0.0, 1.0)
projections = trajectory[:-1,:] + (t*diffs.T).T
# dists = np.linalg.norm(point - projections, axis=1)
dists = np.empty((projections.shape[0],))
for i in range(dists.shape[0]):
temp = point - projections[i]
dists[i] = np.sqrt(np.sum(temp*temp))
min_dist_segment = np.argmin(dists)
return projections[min_dist_segment], dists[min_dist_segment], t[min_dist_segment], min_dist_segment
@njit(fastmath=False, cache=True)
def first_point_on_trajectory_intersecting_circle(point, radius, trajectory, t=0.0, wrap=False):
''' starts at beginning of trajectory, and find the first point one radius away from the given point along the trajectory.
Assumes that the first segment passes within a single radius of the point
http://codereview.stackexchange.com/questions/86421/line-segment-to-circle-collision-algorithm
'''
start_i = int(t)
start_t = t % 1.0
first_t = None
first_i = None
first_p = None
trajectory = np.ascontiguousarray(trajectory)
for i in range(start_i, trajectory.shape[0]-1):
start = trajectory[i,:]
end = trajectory[i+1,:]+1e-6
V = np.ascontiguousarray(end - start)
a = np.dot(V,V)
b = 2.0*np.dot(V, start - point)
c = np.dot(start, start) + np.dot(point,point) - 2.0*np.dot(start, point) - radius*radius
discriminant = b*b-4*a*c
if discriminant < 0:
continue
# print "NO INTERSECTION"
# else:
# if discriminant >= 0.0:
discriminant = np.sqrt(discriminant)
t1 = (-b - discriminant) / (2.0*a)
t2 = (-b + discriminant) / (2.0*a)
if i == start_i:
if t1 >= 0.0 and t1 <= 1.0 and t1 >= start_t:
first_t = t1
first_i = i
first_p = start + t1 * V
break
if t2 >= 0.0 and t2 <= 1.0 and t2 >= start_t:
first_t = t2
first_i = i
first_p = start + t2 * V
break
elif t1 >= 0.0 and t1 <= 1.0:
first_t = t1
first_i = i
first_p = start + t1 * V
break
elif t2 >= 0.0 and t2 <= 1.0:
first_t = t2
first_i = i
first_p = start + t2 * V
break
# wrap around to the beginning of the trajectory if no intersection is found1
if wrap and first_p is None:
for i in range(-1, start_i):
start = trajectory[i % trajectory.shape[0],:]
end = trajectory[(i+1) % trajectory.shape[0],:]+1e-6
V = end - start
a =
|
np.dot(V,V)
|
numpy.dot
|
import numpy as np
from scipy.special import logsumexp
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleStaeckel
from galpy.df import quasiisothermaldf
aAS= actionAngleStaeckel(delta=0.4,pot=MWPotential2014,c=True)
_R0 = 8.
_z0 = 0.025
def gaussian_1d(v_R, v_z, R, z, med_z, params=[30.]):
sigmaR = params[0]
vo = 0.
A_R = (1./(sigmaR*np.sqrt(2*np.pi)))
E_R = (-(v_R-vo)**2)/(2*sigmaR**2)
p = A_R*np.exp(E_R)
logp = np.log(p)
logp = np.sum(logp[np.isfinite(logp)])
return logp
def gaussian_fixedv0(v_R, v_z, R, z, med_z, params=[np.log10(30.),np.log10(30.),0.1]):
sigmaR, sigmaz, contfrac = params
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
vo = 0.
contA = (contfrac)*(1./(100*np.sqrt(2*np.pi)))
contE_R = (-(v_R-vo)**2/(2*100**2))
contE_z = (-(v_z-vo)**2/(2*100**2))
A_R = (1-contfrac)*(1./(sigmaR*np.sqrt(2*np.pi)))
A_z = (1-contfrac)*(1./(sigmaz*np.sqrt(2*np.pi)))
E_R = (-(v_R-vo)**2)/(2*sigmaR**2)
E_z = (-(v_z-vo)**2)/(2*sigmaz**2)
Es = np.dstack([E_R, E_z, contE_R, contE_z])[0]
As = np.dstack([A_R, A_z, contA, contA])[0]
logp = logsumexp(Es, b=As, axis=1)
logp = np.sum(logp)
return logp
def gaussian_expR_quadz_fixedv0(v_R, v_z, R, z,
cov_vRvTvz, med_z,
params=[1/8.,np.log10(50.),1.,1.,1/8.,np.log10(50.),1.,1.,0.01], return_each=False):
vo = 0.
sigmacont = 100.
h_sigmaR, sigmaR, a_R, b_R, h_sigmaz, sigmaz, a_z, b_z, contfrac = params
h_sigmaR, h_sigmaz = 1/h_sigmaR, 1/h_sigmaz
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
sigmacontR, sigmacontz = np.sqrt(sigmacont**2+cov_vRvTvz[:,0,0]), np.sqrt(sigmacont**2+cov_vRvTvz[:,2,2])
z = np.fabs(z)
sigma_fRz_R = np.sqrt(((a_R*(z-med_z)**2+b_R*(z-med_z)+sigmaR)*(np.exp(-1*(R-_R0)/h_sigmaR)))**2+cov_vRvTvz[:,0,0])
A_R = (1-contfrac)*(1./(sigma_fRz_R*np.sqrt(2*np.pi)))
E_R = (-(v_R-vo)**2)/(2*sigma_fRz_R**2)
sigma_fRz_z = np.sqrt(((a_z*(z-med_z)**2+b_z*(z-med_z)+sigmaz)*(np.exp(-1*(R-_R0)/h_sigmaz)))**2+cov_vRvTvz[:,2,2])
A_z = (1-contfrac)*(1./(sigma_fRz_z*np.sqrt(2*np.pi)))
E_z = (-(v_z-vo)**2)/(2*sigma_fRz_z**2)
contA_R = (contfrac)*(1./(sigmacontR*np.sqrt(2*np.pi)))
contA_z = (contfrac)*(1./(sigmacontz*np.sqrt(2*np.pi)))
contE_R = (-(v_R-vo)**2/(2*sigmacontR**2))
contE_z = (-(v_z-vo)**2/(2*sigmacontz**2))
As = np.dstack([A_R, A_z, contA_R, contA_z])[0]
Es = np.dstack([E_R, E_z, contE_R, contE_z])[0]
logp = logsumexp(Es, b=As, axis=1)
if return_each:
return logp
logp = np.sum(logp)
return logp
def gaussian_expR_quadz(v_R, v_z, R, z,
cov_vRvTvz, med_z,
params=[1/8.,np.log10(50.),1.,1.,1/8.,np.log10(50.),1.,1.,0.,0.,0.01]):
vo = 0.
sigmacont=100.
h_sigmaR, sigmaR, a_R, b_R, h_sigmaz, sigmaz, a_z, b_z, v_Ro, v_zo, contfrac = params
h_sigmaR, h_sigmaz = 1/h_sigmaR, 1/h_sigmaz
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
sigmacontR, sigmacontz = np.sqrt(sigmacont**2+cov_vRvTvz[:,0,0]), np.sqrt(sigmacont**2+cov_vRvTvz[:,2,2])
z = np.fabs(z)
sigma_fRz_R = np.sqrt(((a_R*(z-med_z)**2+b_R*(z-med_z)+sigmaR)*(np.exp(-1*(R-_R0)/h_sigmaR)))**2+cov_vRvTvz[:,0,0])
A_R = (1-contfrac)*(1./(sigma_fRz_R*np.sqrt(2*np.pi)))
E_R = (-(v_R-v_Ro)**2)/(2*sigma_fRz_R**2)
sigma_fRz_z = np.sqrt(((a_z*(z-med_z)**2+b_z*(z-med_z)+sigmaz)*(np.exp(-1*(R-_R0)/h_sigmaz)))**2+cov_vRvTvz[:,2,2])
A_z = (1-contfrac)*(1./(sigma_fRz_z*np.sqrt(2*np.pi)))
E_z = (-(v_z-v_zo)**2)/(2*sigma_fRz_z**2)
contA_R = (contfrac)*(1./(sigmacontR*np.sqrt(2*np.pi)))
contA_z = (contfrac)*(1./(sigmacontz*np.sqrt(2*np.pi)))
contE_R = (-(v_R-vo)**2/(2*sigmacontR**2))
contE_z = (-(v_z-vo)**2/(2*sigmacontz**2))
As = np.dstack([A_R, A_z, contA_R, contA_z])[0]
Es = np.dstack([E_R, E_z, contE_R, contE_z])[0]
logp = logsumexp(Es, b=As, axis=1)
logp = np.sum(logp)
return logp
def gaussian_expR_expz_fixedv0(v_R, v_z, R, z, med_z, params=[1/8.,1/8.,np.log10(50.),1/8.,1/8.,np.log10(50.),0.01]):
vo = 0.
hRsigmaR, hzsigmaR, sigmaR, hRsigmaz, hzsigmaz, sigmaz, contfrac = params
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
sigmacont = 200.
z = np.fabs(z)
sigma_fRz_R = sigmaR*np.exp(-hRsigmaR*(R-_R0)-hzsigmaR*(z-med_z))
sigma_fRz_z = sigmaz*np.exp(-hRsigmaz*(R-_R0)-hzsigmaz*(z-med_z))
A_R = (1-contfrac)*(1./(sigma_fRz_R*np.sqrt(2*np.pi)))
E_R = (-(v_R-vo)**2)/(2*sigma_fRz_R**2)
A_z = (1-contfrac)*(1./(sigma_fRz_z*np.sqrt(2*np.pi)))
E_z = (-(v_z-vo)**2)/(2*sigma_fRz_z**2)
contA = (contfrac)*(1./(sigmacont*np.sqrt(2*np.pi)))
contE_R = (-(v_R-vo)**2/(2*sigmacont**2))
contE_z = (-(v_z-vo)**2/(2*sigmacont**2))
As = np.dstack([A_R, A_z, np.ones(len(v_R))*contA, np.ones(len(v_R))*contA])[0]
Es = np.dstack([E_R, E_z, contE_R, contE_z])[0]
logp = logsumexp(Es, b=As, axis=1)
logp = np.sum(logp)
return logp
def gaussian_expR_expz(v_R, v_z, R, z, med_z, params=[1/8.,1/8.,np.log10(50.),1/8.,1/8.,np.log10(50.),0.,0.,0.01]):
vo = 0.
hRsigmaR, hzsigmaR, sigmaR, hRsigmaz, hzsigmaz, sigmaz, v_Ro, v_zo, contfrac = params
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
z = np.fabs(z)
sigma_fRz_R = sigmaR*np.exp(-hRsigmaR*(R-_R0)-hzsigmaR*(z-med_z))
sigma_fRz_z = sigmaz*np.exp(-hRsigmaz*(R-_R0)-hzsigmaz*(z-med_z))
A_R = (1-contfrac)*(1./(sigma_fRz_R*np.sqrt(2*np.pi)))
E_R = (-(v_R-v_Ro)**2)/(2*sigma_fRz_R**2)
A_z = (1-contfrac)*(1./(sigma_fRz_z*np.sqrt(2*np.pi)))
E_z = (-(v_z-v_zo)**2)/(2*sigma_fRz_z**2)
contA = (contfrac)*(1./(100*np.sqrt(2*np.pi)))
contE_R = (-(v_R-vo)**2/(2*100**2))
contE_z = (-(v_z-vo)**2/(2*100**2))
As = np.dstack([A_R, A_z, np.ones(len(v_R))*contA, np.ones(len(v_R))*contA])[0]
Es = np.dstack([E_R, E_z, contE_R, contE_z])[0]
logp = logsumexp(Es, b=As, axis=1)
logp = np.sum(logp)
return logp
def ellipsoid_old(v_R, v_z, R, z,
cov_vRvTvz, med_z,
params=[1/8.,np.log10(50.),1.,1.,1/8.,np.log10(50.),1.,1.,0.,0.,0.01]):
vo = 0.
sigmacont = 100.
h_sigmaR, sigmaR, a_R, b_R, h_sigmaz, sigmaz, a_z, b_z, alpha_0,alpha_1, contfrac = params
h_sigmaR, h_sigmaz = 1/h_sigmaR, 1/h_sigmaz
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
z = np.fabs(z)
sigma_fRz_R = (a_R*(z-med_z)**2+b_R*(z-med_z)+sigmaR)*(np.exp(-1*(R-_R0)/h_sigmaR))
sigma_fRz_z = (a_z*(z-med_z)**2+b_z*(z-med_z)+sigmaz)*(np.exp(-1*(R-_R0)/h_sigmaz))
tana= alpha_0+alpha_1*z/R #+params[11]*(z/R)**2.
sig2rz= (sigma_fRz_R**2.-sigma_fRz_z**2.)*tana/(1.-tana**2.)
#Do likelihood
out= 0.
for ii in range(len(v_R)):
vv= np.array([v_R[ii],v_z[ii]])
VV= np.array([[sigma_fRz_R[ii]**2.+cov_vRvTvz[ii,0,0],
sig2rz[ii]+cov_vRvTvz[ii,0,2]],
[sig2rz[ii]+cov_vRvTvz[ii,0,2],
sigma_fRz_z[ii]**2.+cov_vRvTvz[ii,2,2]]])
outVV= np.array([[sigmacont**2.+cov_vRvTvz[ii,0,0],
cov_vRvTvz[ii,0,2]],
[cov_vRvTvz[ii,0,2],
sigmacont**2.+cov_vRvTvz[ii,2,2]]])
#print VV, outVV, numpy.linalg.det(VV), numpy.linalg.det(outVV)
detVV= np.linalg.det(VV)
if detVV < 0.: return -np.finfo(np.dtype(np.float64)).max
'''
out += np.log(contfrac/np.sqrt(np.linalg.det(outVV))\
*np.exp(-0.5*np.dot(vv,
np.dot(np.linalg.inv(outVV),vv)))
+(1.-contfrac)/np.sqrt(detVV)
*np.exp(-0.5*np.dot(vv,
np.dot(np.linalg.inv(VV),vv))))
'''
Bs = np.array([contfrac/np.sqrt(np.linalg.det(outVV)),
(1.-contfrac)/np.sqrt(detVV)])
As = np.array([-0.5*np.dot(vv,np.dot(np.linalg.inv(outVV),vv)),
-0.5*np.dot(vv,np.dot(np.linalg.inv(VV),vv))])
out += logsumexp(As, b=Bs)
return out
def ellipsoid(v_R, v_z, R, z,
cov_vRvTvz, med_z,
params=[1/8.,np.log10(50.),1.,1.,1/8.,np.log10(50.),1.,1.,0.,0.,0.01]):
vo = 0.
sigmacont = 100.
h_sigmaR, sigmaR, a_R, b_R, h_sigmaz, sigmaz, a_z, b_z, alpha_0,alpha_1, contfrac = params
h_sigmaR, h_sigmaz = 1/h_sigmaR, 1/h_sigmaz
v_R0, v_z0 = 0., 0.
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
z = np.fabs(z)
sigma_fRz_R = (a_R*(z-med_z)**2+b_R*(z-med_z)+sigmaR)*(np.exp(-1*(R-_R0)/h_sigmaR))
sigma_fRz_z = (a_z*(z-med_z)**2+b_z*(z-med_z)+sigmaz)*(np.exp(-1*(R-_R0)/h_sigmaz))
tana= alpha_0+alpha_1*z/R #+params[11]*(z/R)**2.
sig2rz= (sigma_fRz_R**2.-sigma_fRz_z**2.)*tana/(1.-tana**2.)
vvs = np.zeros((len(v_R), 2))
vvs[:,0] = v_R-v_R0
vvs[:,1] = v_z-v_z0
VVs = np.zeros((len(v_R),2,2))
VVs[:,0,0] = sigma_fRz_R**2.+cov_vRvTvz[:,0,0]
VVs[:,0,1] = sig2rz+cov_vRvTvz[:,0,2]
VVs[:,1,0] = sig2rz+cov_vRvTvz[:,0,2]
VVs[:,1,1] = sigma_fRz_z**2.+cov_vRvTvz[:,2,2]
outVVs = np.zeros((len(v_R),2,2))
outVVs[:,0,0] = sigmacont**2.+cov_vRvTvz[:,0,0]
outVVs[:,0,1] = cov_vRvTvz[:,0,2]
outVVs[:,1,0] = cov_vRvTvz[:,0,2]
outVVs[:,1,1] = sigmacont**2.+cov_vRvTvz[:,2,2]
detVVs = np.linalg.det(VVs)
if (detVVs < 0.).any():
return -np.finfo(np.dtype(np.float64)).max
detoutVVs = np.linalg.det(outVVs)
vvVVvv = np.einsum('ij,ij->i', vvs, np.einsum('aij,aj->ai', np.linalg.inv(VVs), vvs))
vvoutVVvv = np.einsum('ij,ij->i', vvs, np.einsum('aij,aj->ai', np.linalg.inv(outVVs), vvs))
#Do likelihood
out= 0.
Bs = np.dstack([contfrac/(2*np.pi*np.sqrt(detoutVVs)), (1.-contfrac)/(2*np.pi*np.sqrt(detVVs))])[0]
As = np.dstack([-0.5*vvoutVVvv, -0.5*vvVVvv])[0]
logsums = logsumexp(As, b=Bs, axis=1)
out = np.sum(logsums)
return out
def ellipsoid_varying_v0(v_R, v_z, R, z,
cov_vRvTvz, med_z,
params=[1/8.,np.log10(50.),1.,1.,1/8.,np.log10(50.),1.,1.,0.,0.,0.,0.,0.01]):
vo = 0.
sigmacont = 100.
h_sigmaR, sigmaR, a_R, b_R, h_sigmaz, sigmaz, a_z, b_z, alpha_0,alpha_1, v_R0, v_z0, contfrac = params
h_sigmaR, h_sigmaz = 1/h_sigmaR, 1/h_sigmaz
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
z = np.fabs(z)
sigma_fRz_R = (a_R*(z-med_z)**2+b_R*(z-med_z)+sigmaR)*(np.exp(-1*(R-_R0)/h_sigmaR))
sigma_fRz_z = (a_z*(z-med_z)**2+b_z*(z-med_z)+sigmaz)*(np.exp(-1*(R-_R0)/h_sigmaz))
tana= alpha_0+alpha_1*z/R #+params[11]*(z/R)**2.
sig2rz= (sigma_fRz_R**2.-sigma_fRz_z**2.)*tana/(1.-tana**2.)
vvs = np.zeros((len(v_R), 2))
vvs[:,0] = v_R-v_R0
vvs[:,1] = v_z-v_z0
VVs = np.zeros((len(v_R),2,2))
VVs[:,0,0] = sigma_fRz_R**2.+cov_vRvTvz[:,0,0]
VVs[:,0,1] = sig2rz+cov_vRvTvz[:,0,2]
VVs[:,1,0] = sig2rz+cov_vRvTvz[:,0,2]
VVs[:,1,1] = sigma_fRz_z**2.+cov_vRvTvz[:,2,2]
outVVs = np.zeros((len(v_R),2,2))
outVVs[:,0,0] = sigmacont**2.+cov_vRvTvz[:,0,0]
outVVs[:,0,1] = cov_vRvTvz[:,0,2]
outVVs[:,1,0] = cov_vRvTvz[:,0,2]
outVVs[:,1,1] = sigmacont**2.+cov_vRvTvz[:,2,2]
detVVs = np.linalg.det(VVs)
if (detVVs < 0.).any():
return -np.finfo(np.dtype(np.float64)).max
detoutVVs = np.linalg.det(outVVs)
vvVVvv = np.einsum('ij,ij->i', vvs, np.einsum('aij,aj->ai', np.linalg.inv(VVs), vvs))
vvoutVVvv = np.einsum('ij,ij->i', vvs, np.einsum('aij,aj->ai', np.linalg.inv(outVVs), vvs))
#Do likelihood
out= 0.
Bs = np.dstack([contfrac/(2*np.pi*np.sqrt(detoutVVs)), (1.-contfrac)/(2*np.pi*np.sqrt(detVVs))])[0]
As = np.dstack([-0.5*vvoutVVvv, -0.5*vvVVvv])[0]
logsums = logsumexp(As, b=Bs, axis=1)
out = np.sum(logsums)
return out
def ellipsoid_v0_R(v_R, v_z, R, z,
cov_vRvTvz, med_z,
params=[1/8.,np.log10(50.),1.,1.,1/8.,np.log10(50.),1.,1.,0.,0.,1/10.,0.,1/10.,0.,0.01]):
vo = 0.
sigmacont = 100.
h_sigmaR, sigmaR, a_R, b_R, h_sigmaz, sigmaz, a_z, b_z, alpha_0,alpha_1, h_vr0, v_R0, h_vz0, v_z0, contfrac = params
h_sigmaR, h_sigmaz = 1/h_sigmaR, 1/h_sigmaz
v_R0_R = v_R0*np.exp((R-_R0)*h_vr0)
v_z0_R = v_z0*np.exp((R-_R0)*h_vz0)
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
z = np.fabs(z)
sigma_fRz_R = (a_R*(z-med_z)**2+b_R*(z-med_z)+sigmaR)*(np.exp(-1*(R-_R0)/h_sigmaR))
sigma_fRz_z = (a_z*(z-med_z)**2+b_z*(z-med_z)+sigmaz)*(np.exp(-1*(R-_R0)/h_sigmaz))
tana= alpha_0+alpha_1*z/R #+params[11]*(z/R)**2.
sig2rz= (sigma_fRz_R**2.-sigma_fRz_z**2.)*tana/(1.-tana**2.)
vvs = np.zeros((len(v_R), 2))
vvs[:,0] = v_R-v_R0_R
vvs[:,1] = v_z-v_z0_R
VVs = np.zeros((len(v_R),2,2))
VVs[:,0,0] = sigma_fRz_R**2.+cov_vRvTvz[:,0,0]
VVs[:,0,1] = sig2rz+cov_vRvTvz[:,0,2]
VVs[:,1,0] = sig2rz+cov_vRvTvz[:,0,2]
VVs[:,1,1] = sigma_fRz_z**2.+cov_vRvTvz[:,2,2]
outVVs = np.zeros((len(v_R),2,2))
outVVs[:,0,0] = sigmacont**2.+cov_vRvTvz[:,0,0]
outVVs[:,0,1] = cov_vRvTvz[:,0,2]
outVVs[:,1,0] = cov_vRvTvz[:,0,2]
outVVs[:,1,1] = sigmacont**2.+cov_vRvTvz[:,2,2]
detVVs = np.linalg.det(VVs)
if (detVVs < 0.).any():
return -np.finfo(np.dtype(np.float64)).max
detoutVVs =
|
np.linalg.det(outVVs)
|
numpy.linalg.det
|
import numpy as np
import argparse
import time
import re
import sys
from operator import itemgetter
parser = argparse.ArgumentParser(description="plot data in A3C log file and update it periodically")
parser.add_argument('filename')
parser.add_argument('-x', '--x-column', type=int, default=1,
help="column index of x-axis (0 origin)")
parser.add_argument('-y', '--y-column', type=int, default=2,
help="column index of y-axis (0 origin)")
parser.add_argument('-a', '--average-number-of-samples', dest="ans", type=int, default=100,
help="average number of samples")
parser.add_argument('-s', '--scale', type=float, default=1e6,
help="scale factor: data in x-column is divided by SCALE")
parser.add_argument('-xl', '--xlabel', default="M steps",
help="label of x-axis")
parser.add_argument('-yl', '--ylabel', default="Score",
help="label of y-axis")
parser.add_argument('-t', '--title', default=None,
help="title of figure")
parser.add_argument('-n', '--interval', type=int, default=10,
help="interval of refresh (0 means no refresh)")
parser.add_argument('-e', '--endmark', default="END",
help="End Mark of in reward line")
parser.add_argument('--save', action='store_true',
help="save graph to file 'filename.png' and don't display it")
parser.add_argument('-i', '--info', choices=["r", "lives", "s", "tes", "v", "pr"], default="r",
help="information in y-axis : r (reward), lives (OHL), s (OHL) tes (OHL), v, pr (psc-reward)")
def read_data(f):
data = []
line = f.readline()
while line != "":
match = prog.match(line)
if match:
t = float(match.group(1))
s = float(match.group(2))
r = float(match.group(3))
data.append([t, s, r])
line = f.readline()
return data
def draw_graph(ax, data):
ans = args.ans
if len(data) < 5:
return
elif len(data) < args.ans:
ans = len(data) - 1
# sort data along args.x_column and make it np.array again
data = sorted(data, key=itemgetter(args.x_column))
data = np.array(data)
x = data[:, args.x_column]
y = data[:, args.y_column]
x_max = np.max(x)
x_min = np.min(x)
y_max = np.max(y)
y_min = np.min(y)
# print("ymax=", y_max, "ymin=", y_min)
y_width = y_max - y_min
if y_width == 0:
if y_max == 0:
y_width = 1.0
else:
y_min = 0
y_width = y_max
ax.set_xlim(xmax = x_max / args.scale)
ax.set_xlim(xmin = 0)
ax.set_ylim(ymax = y_max + y_width * 0.05)
ax.set_ylim(ymin = y_min - y_width * 0.05)
x = x / args.scale
ax.plot(x, y, ',')
weight = np.ones(ans, dtype=np.float)/ans
y_average = np.convolve(y, weight, 'valid')
rim = ans - 1
rim_l = rim // 2
rim_r = rim - rim_l
ax.plot(x[rim_l:-rim_r], y_average)
ax.set_xlabel(args.xlabel)
ax.set_ylabel(args.ylabel)
ax.grid(linewidth=1, linestyle="-", alpha=0.1)
def draw_ohl_graph(ax, data):
# sort data along args.x_column and make it np.array again
all_data = sorted(data, key=itemgetter(args.x_column))
scores = list({e[0] for e in all_data})
scores.sort()
print("scores=", scores)
np_all_data = np.array(all_data)
all_x = np_all_data[:, args.x_column]
all_y = np_all_data[:, args.y_column]
x_max = np.max(all_x)
x_min = np.min(all_x)
y_max = np.max(all_y)
y_min = np.min(all_y)
# print("ymax=", y_max, "ymin=", y_min)
y_width = y_max - y_min
if y_width == 0:
if y_max == 0:
y_width = 1.0
else:
y_min = 0
y_width = y_max
ax.set_xlim(xmax = x_max / args.scale)
ax.set_xlim(xmin = 0)
ax.set_ylim(ymax = y_max + y_width * 0.05)
ax.set_ylim(ymin = y_min - y_width * 0.05)
for score in scores:
# print("score=", score)
data = list(filter(lambda e: e[0] == score, all_data))
data = np.array(data)
x = data[:, args.x_column]
y = data[:, args.y_column]
x = x / args.scale
ans = args.ans
if len(data) < 5:
ax.plot(x, y, '.', label=str(score))
continue
elif len(data) * 0.1 < args.ans:
ans = int(len(data) * 0.1)
if ans < 4:
ans = 4
# print("ans=", ans)
weight =
|
np.ones(ans, dtype=np.float)
|
numpy.ones
|
# -*- coding:utf-8 -*-
"""
Created on Aug 8, 2011
@author: grant
"""
import math
import numpy
from OpenGL import GL, GLU
import TriModel
# TODO: try and remove cgkit, use numpy matrix instead
from cgkit.cgtypes import quat
import numpyTransform
class Joint:
def __init__(self, *args, **kwargs):
'''
Function Signatures:
Joint()
Joint(location, ...)
Arguments {default value}:
location
array like object of form [x,y,z] containing x, y, and z coordinates
of this Joint. { [0,0,0] }
Keywords:
parentJoint
Joint object of which this Joint is a child. {None}
models
TriModel object of list of TriModel objects that represent bones
that are attached to this joint. { [] }
name
Name of joint
initialOrientation
quaternion (type cgkit.cgtypes.quat) representing the initial
orientation. { quat(1,0,0,0) }
showAxis
Boolean that determines whether or not a 3D representation of the
Joint is visible. { False }
axisScale
Number that determines the size of the drawn axis. Must be greater
than 0. { 1.0 }
'''
# self.translateMat = cgtypes.mat4.identity()
self.scaleMat = numpy.matrix(numpy.identity(4))
self.rotateMat = numpy.matrix(numpy.identity(4))
if len(args) > 0:
self.location = numpy.array(args[0], dtype=float)
else:
self.location = numpy.array([0.0, 0.0, 0.0], dtype=float)
self.initialLocationMat = numpy.matrix([[1.0, 0.0, 0.0, self.location[0]],
[0.0, 1.0, 0.0, self.location[1]],
[0.0, 0.0, 1.0, self.location[2]],
[0.0, 0.0, 0.0, 1.0]])
self.translateMat = numpy.matrix([[1.0, 0.0, 0.0, self.location[0]],
[0.0, 1.0, 0.0, self.location[1]],
[0.0, 0.0, 1.0, self.location[2]],
[0.0, 0.0, 0.0, 1.0]])
self.transformICP = numpy.matrix(numpy.identity(4))
self.childJoints = []
# self.scale = 1.0
self.locationUnityScale = self.location.copy()
self.type = 'ball'
self.length = 10.0
self.proximodistalVec = numpy.array([1.0, 0.0, 0.0])
self.secondaryVec = numpy.array([0.0, 1.0, 0.0])
self.tertiaryVec = numpy.array([0.0, 0.0, 1.0])
self.proximodistalVecTransformed = numpy.array([1.0, 0.0, 0.0])
self.secondaryVecTransformed = numpy.array([0.0, 1.0, 0.0])
self.tertiaryVecTransformed = numpy.array([0.0, 0.0, 1.0])
self.proximodistalVecScaled = self.length * self.proximodistalVec
self.proximodistalVecTransformedScaled = self.length * self.proximodistalVecTransformed
self.DOFvec = numpy.array([0, 0, 10.0])
self.DOFangle = math.radians(45.0)
self.DOFtrans = 5.0
if 'parentJoint' in kwargs and isinstance(kwargs['parentJoint'], Joint):
self.parentJoint = kwargs['parentJoint']
self.parentJoint.childJoints.append(self)
else:
self.parentJoint = None
self.models = []
if 'models' in kwargs:
try:
for model in kwargs['models']:
if isinstance(model, TriModel):
self.models.append(model)
self.models[-1].setJoint(self)
except TypeError:
if isinstance(kwargs['models'], TriModel):
self.models.append(kwargs['models'])
self.models[-1].setJoint(self)
if 'initialOrientation' in kwargs and isinstance(kwargs['initialOrientation'], quat):
self.orientation = kwargs['initialOrientation']
else:
self.orientation = quat(1, 0, 0, 0)
self.xAngle = 0.0
self.yAngle = 0.0
self.zAngle = 0.0
if 'showAxis' in kwargs and isinstance(kwargs['showAxis'], bool):
self.showAxis = kwargs['showAxis']
else:
self.showAxis = False
if 'axisScale' in kwargs and kwargs['axisScale'] > 0.0:
self.axisScale = kwargs['axisScale']
else:
self.axisScale = 1.0
if 'name' in kwargs:
self.name = kwargs['name']
else:
self.name = 'Joint'
for model in self.models:
model.initialRotationCenter = self.location.copy()
if self.parentJoint is None:
self.initalLocationRelativeToParentJoint = self.location.copy()
self.initialRelativeOrientationFromParent = self.orientation * quat(1, 0, 0, 0).inverse()
else:
self.initalLocationRelativeToParentJoint = self.location - self.parentJoint.location
self.initialRelativeOrientationFromParent = self.orientation * self.parentJoint.orientation.inverse()
self.relativeOrientationFromParent = quat(self.initialRelativeOrientationFromParent)
self.createAxis(self.showAxis)
def translate(self, coord, absolute=True):
'''
Arguements {Default value}
coord
array like object with dimensions 1x3 in format [x,y,z]
absolute
boolean that tells whether coord is the point to set joint to,
or the step by with to move the joint from its current location
'''
coord = numpy.array(coord)
if coord.shape != (3,):
raise Exception("Incorrect input parameters")
if absolute:
self.translateMat = numpyTransform.translation(coord)
else:
self.translateMat *= numpyTransform.translation(coord)
def rotate(self, *args, **kwargs):
'''
Function Signatures:
rotate(q, ...)
rotate(mat, ...)
rotate(angle, axis, ...)
rotate(xAngle, yAngle, zAngle, ...)
rotate(elevation, azimuth, spin, sphericalCoord=True, ...)
Arguments {default value}:
q
quaternion (cgkit.cgtypes.quat) that defines joint orientation
(relative or absolute)
mat
4x4 rotation matrix (cgkit.cgtypes.mat4) that defines joint orientation
angle
angle (degrees or radians, radians default) which to rotate around
axis
axis
vector [i,j,k] which defines axis to rotate around
xAngle
angle (degrees or radians, radians default) which to rotate around
x axis
yAngle
angle (degrees or radians, radians default) which to rotate around
y axis
zAngle
angle (degrees or radians, radians default) which to rotate around
z axis
elevation
elevation angle (spherical coordinate system)
azimuth
azimuth angle (spherical coordinate system)
spin
spin angle around vector defined by elevation and azimuth
Keywords:
relative
defines whether the change in orientation is relative or absolute
{False}
updateModels
flag that determines if child models should be updated {True}
angleOrder
string that determines the order that Euler angle rotations are
applied. {'xyz'}
unitsDegrees
flag that indicates what units the passed angles are in, degrees or
radians. {False}
sphericalCoord
flag that indicates passed angles are spherical coordinates. In the
spherical coordinate system, elevation rotates around the x axis
first, then azimuth rotates around the y axis, finally spin rotates
around the vector created by elevation and azimuth {False}
'''
if 'relative' in kwargs:
relative = kwargs['relative']
else:
relative = False
if 'updateModels' in kwargs:
updateModels = kwargs['updateModels']
else:
updateModels = True
if 'sphericalCoord' in kwargs:
sphericalCoord = kwargs['sphericalCoord']
else:
sphericalCoord = False
# the baseOrientation of the joint is either its current orientation, relativeAngle=True
# or the baseOrientation is the initial relative orientation from the parent joint, relativeAngle=False
if relative:
baseOrientation = quat(self.orientation)
# change by original orientation difference, to regain original orientation, relative to parent
elif self.parentJoint is None:
baseOrientation = self.initialRelativeOrientationFromParent * quat(1, 0, 0, 0)
else:
baseOrientation = self.initialRelativeOrientationFromParent * self.parentJoint.orientation
if len(args) == 1: # Quaternion rotation
if isinstance(args[0], quat):
rotateMat = args[0].toMat4()
else:
rotateMat = args[0]
elif len(args) == 2: # angle and axis rotation
angle = args[0]
axis = args[1]
# convert angle units to radians if required
if 'unitsDegrees' in kwargs and kwargs['unitsDegrees']:
angle = math.radians(angle)
angle = NormalizeAngleRad(angle)
# create rotate matrix
rotateMat = numpyTransform.rotation(angle, axis, N=4)
elif len(args) == 3: # Euler angle rotation
xAngle = args[0]
yAngle = args[1]
zAngle = args[2]
if 'angleOrder' in kwargs and not sphericalCoord:
angleOrder = kwargs['angleOrder'].lower()
if len(angleOrder) != 3 or angleOrder.find('x') < 0 or angleOrder.find('y') < 0 or angleOrder.find('z') < 0:
raise Exception('invalid angle order string')
else:
angleOrder = 'xyz'
if 'unitsDegrees' in kwargs and kwargs['unitsDegrees']:
xAngle = math.radians(xAngle)
yAngle = math.radians(yAngle)
zAngle = math.radians(zAngle)
if 'relative' in kwargs and kwargs['relative']:
self.xAngle += xAngle
self.yAngle += yAngle
self.zAngle += zAngle
else:
self.xAngle = xAngle
self.yAngle = yAngle
self.zAngle = zAngle
if sphericalCoord:
self.xAngle = NormalizeAngleRad(self.xAngle, -math.pi / 2, math.pi / 2, math.pi)
self.yAngle = NormalizeAngleRad(self.yAngle)
self.zAngle = NormalizeAngleRad(self.zAngle)
else:
self.xAngle = NormalizeAngleRad(self.xAngle)
self.yAngle = NormalizeAngleRad(self.yAngle)
self.zAngle = NormalizeAngleRad(self.zAngle)
rotateMat = numpy.matrix(numpy.identity(4))
# create orientation quaternion by multiplying rotations around local
# x,y, and z axis
# TODO: maybe flip the order of this rotation application?
# FIXME: Spherical rotation not working
for i in xrange(3):
if angleOrder[i] == 'x':
rotateMat *= numpyTransform.rotation(self.xAngle, [1.0, 0.0, 0.0], N=4)
if angleOrder[i] == 'y':
rotateMat *= numpyTransform.rotation(self.yAngle, [0.0, 1.0, 0.0], N=4)
if angleOrder[i] == 'z':
rotateMat *= numpyTransform.rotation(self.zAngle, [0.0, 0.0, 1.0], N=4)
else: # invalid signature
raise Exception("Invalid Function Signature")
if relative:
self.rotateMat *= rotateMat
else:
self.rotateMat = rotateMat
# def setScale(self,*args,**kwargs):
# #TODO:remove after mat4 transformation is done, also rename scaleTempname to scale, remote scale float value
# self.scaleTempname(*args,**kwargs)
def scale(self, *args, **kwargs):
'''
Arguments {Default value}
scale(scale, ...)
scale(scaleX,scaleY,scaleZ, ...)
scale([scaleX,scaleY,scaleZ], ...)
scale
scale in the X,Y, and Z direction
scaleX
scale in the X dimension
scaleY
scale in the Y dimension
scaleZ
scale in the Z dimension
keyword arguments
absolute {True}
boolean that tells whether scale is the new scale (True)
or an amount to adjust current scale by (False)
'''
if len(args) == 1:
scale = numpy.array(args[0], dtype=float)
if scale.shape != (3,):
if scale.shape == ():
scale = numpy.repeat(scale, 3)
else:
scale = numpy.repeat(scale[0], 3)
elif len(args) == 3:
scale = numpy.array([args[0], args[1], args[2]], dtype=float)
if 'absolute' in kwargs and kwargs['absolute'] is False:
self.scaleMat *= numpyTransform.scaling(scale, N=4)
else:
self.scaleMat = numpyTransform.scaling(scale, N=4)
def createAxis(self, axisVisible):
self.xAxis = TriModel.createCone(self.axisScale / 4, self.axisScale, self.location, name='axis_X', joint=self, axis='x', updateOnlyFromGrandparentJoints=True, visible=axisVisible, color=[1.0, 0.0, 0.0, 1.0])
self.yAxis = TriModel.createCone(self.axisScale / 4, self.axisScale, self.location, name='axis_Y', joint=self, axis='y', updateOnlyFromGrandparentJoints=True, visible=axisVisible, color=[0.0, 1.0, 0.0, 1.0])
self.zAxis = TriModel.createCone(self.axisScale / 4, self.axisScale, self.location, name='axis_Z', joint=self, axis='z', updateOnlyFromGrandparentJoints=True, visible=axisVisible, color=[0.0, 0.0, 1.0, 1.0])
def OpenGLPaint(self, colorDrivenMaterial=None, useCallLists=True, parentTransform=numpy.matrix(numpy.identity(4))):
# push matrix
GL.glPushMatrix()
# matrix transformations steps: (applied in reverse order)
# 1: move model initial rotation center to origin
# 2: scale model
# 3: rotate model to new orientation
# 4: move model to parent joint position
if self.name == 'Neck':
pass
GL.glTranslatef(self.translateMat[0, 3], self.translateMat[1, 3], self.translateMat[2, 3]) # aka GL.glMultMatrixf(numpy.array(self.translateMat))
GL.glMultMatrixf(numpy.array(self.rotateMat).T) # need to transpose this because numpy matrices are row-major but OpenGl is expecting column-major matrix
# axis, angle = numpyTransform.axisAngleFromMatrix(self.rotateMat, angleInDegrees=True)
# GL.glRotated(angle, axis[0], axis[1], axis[2])
GL.glScalef(self.scaleMat[0, 0], self.scaleMat[1, 1], self.scaleMat[2, 2])
GL.glTranslatef(-self.initialLocationMat[0, 3], -self.initialLocationMat[1, 3], -self.initialLocationMat[2, 3])
# if self.name == 'Neck':
# print 'Neck Draw Transform'
# print 'Rotation:'
# print self.rotateMat
# print 'Translation:'
# print self.translateMat
# print 'Scale'
# print self.scaleMat
# print 'Original location'
# print self.initialLocationMat
# print 'Transform'
# tform = self.translateMat * self.rotateMat * self.scaleMat * self.initialLocationMat.I
# print tform
# draw models
for model in self.models:
model.OpenGLPaint(colorDrivenMaterial, useCallLists)
# recursivly paint child joints
for childJoint in self.childJoints:
childJoint.OpenGLPaint(colorDrivenMaterial, useCallLists)
# pop matrix
GL.glPopMatrix()
def transformVertices(self, baseTransform=numpy.matrix(numpy.identity(4)), modelID=None):
# create transform matrix
transform = numpy.matrix(baseTransform)
transform *= self.translateMat
transform *= self.rotateMat
transform *= self.scaleMat
transform *= self.initialLocationMat.I # transform *= numpyTransform.translation( (-self.initialLocationMat[0,3], -self.initialLocationMat[1,3], -self.initialLocationMat[2,3]) )
# self.location = (transform * numpy.matrix([[self.locationUnityScale[0]], [self.locationUnityScale[1]], [self.locationUnityScale[2]], [1.0]])).getA().squeeze()[:3]
self.location = numpyTransform.transformPoints(transform, self.locationUnityScale)
transformRotScaleOnly = numpy.matrix(numpy.identity(4))
transformRotScaleOnly[:3, :3] = transform[:3, :3]
self.proximodistalVecTransformed = numpyTransform.transformPoints(transformRotScaleOnly, self.proximodistalVec[numpy.newaxis, :]).squeeze()
self.proximodistalVecTransformedScaled = numpyTransform.transformPoints(transformRotScaleOnly, self.length * self.proximodistalVec[numpy.newaxis, :]).squeeze()
self.secondaryVecTransformed = numpyTransform.transformPoints(transformRotScaleOnly, self.secondaryVec[numpy.newaxis, :]).squeeze()
self.tertiaryVecTransformed = numpyTransform.transformPoints(transformRotScaleOnly, self.tertiaryVec[numpy.newaxis, :]).squeeze()
for model in self.models:
model.transformVertices(transform, modelID)
for childJoint in self.childJoints:
childJoint.transformVertices(transform, modelID)
def getCummulativeTransform(self, jointID, baseTransform=numpy.matrix(numpy.identity(4))):
transform = numpy.matrix(baseTransform)
transform *= self.translateMat
transform *= self.rotateMat
transform *= self.scaleMat
transform *= self.initialLocationMat.I
if id(self) == jointID:
return transform
else:
retTform = None
for childJoint in self.childJoints:
tempTform = childJoint.getCummulativeTransform(jointID, transform)
if tempTform is not None:
retTform = tempTform
break
return retTform
def createKDTrees(self):
for model in self.models:
if model.visible and model.name[:5] != 'axis_': # ignore models that are not visible and models that illustrate the axis
model.createKDTrees()
for childJoint in self.childJoints:
childJoint.createKDTrees()
def getBoundingBox(self, baseTransform=numpy.matrix(numpy.identity(4))):
# TODO: change this to use lists and then use numpy to search for max and min along axis=0 instead of constantly comparing values
points = []
minPoint = None
maxPoint = None
# create transform matrix
transform =
|
numpy.matrix(baseTransform)
|
numpy.matrix
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_PO Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #t-C #source: van Beijma et al. (2018)
initAGB_min = 233-72 #t-C
initAGB_max = 233 + 72 #t-C
#parameters for oil palm plantation. Source: Khasanah et al. (2015)
tf_palmoil = 26 #years
a_nucleus = 2.8167
b_nucleus = 6.8648
a_plasma = 2.5449
b_plasma = 5.0007
c_cont_po_nucleus = 0.5448 #fraction of carbon content in biomass
c_cont_po_plasma = 0.5454 #fraction of carbon content in biomass
tf = 201 #years
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
c_firewood_energy_S2nu = df2nu['Firewood_other_energy_use'].values
c_firewood_energy_S2pl = df2pl['Firewood_other_energy_use'].values
c_firewood_energy_Enu = df3nu['Firewood_other_energy_use'].values
c_firewood_energy_Epl = df3pl['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
c_pellets_Enu = dfEnu['Wood_pellets'].values
c_pellets_Epl = dfEpl['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
tf = 201
t = np.arange(tf)
decomp_emissions = df['C_remainAGB'].values
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
#product lifetime
#building materials
B = 35
TestDSM2nu = DynamicStockModel(t = df2nu['Year'].values, i = df2nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM2pl = DynamicStockModel(t = df2pl['Year'].values, i = df2pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3nu = DynamicStockModel(t = df3nu['Year'].values, i = df3nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3pl = DynamicStockModel(t = df3pl['Year'].values, i = df3pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr2nu, ExitFlag2nu = TestDSM2nu.dimension_check()
CheckStr2pl, ExitFlag2pl = TestDSM2pl.dimension_check()
CheckStr3nu, ExitFlag3nu = TestDSM3nu.dimension_check()
CheckStr3pl, ExitFlag3pl = TestDSM3pl.dimension_check()
Stock_by_cohort2nu, ExitFlag2nu = TestDSM2nu.compute_s_c_inflow_driven()
Stock_by_cohort2pl, ExitFlag2pl = TestDSM2pl.compute_s_c_inflow_driven()
Stock_by_cohort3nu, ExitFlag3nu = TestDSM3nu.compute_s_c_inflow_driven()
Stock_by_cohort3pl, ExitFlag3pl = TestDSM3pl.compute_s_c_inflow_driven()
S2nu, ExitFlag2nu = TestDSM2nu.compute_stock_total()
S2pl, ExitFlag2pl = TestDSM2pl.compute_stock_total()
S3nu, ExitFlag3nu = TestDSM3nu.compute_stock_total()
S3pl, ExitFlag3pl = TestDSM3pl.compute_stock_total()
O_C2nu, ExitFlag2nu = TestDSM2nu.compute_o_c_from_s_c()
O_C2pl, ExitFlag2pl = TestDSM2pl.compute_o_c_from_s_c()
O_C3nu, ExitFlag3nu = TestDSM3nu.compute_o_c_from_s_c()
O_C3pl, ExitFlag3pl = TestDSM3pl.compute_o_c_from_s_c()
O2nu, ExitFlag2nu = TestDSM2nu.compute_outflow_total()
O2pl, ExitFlag2pl = TestDSM2pl.compute_outflow_total()
O3nu, ExitFlag3nu = TestDSM3nu.compute_outflow_total()
O3pl, ExitFlag3pl = TestDSM3pl.compute_outflow_total()
DS2nu, ExitFlag2nu = TestDSM2nu.compute_stock_change()
DS2pl, ExitFlag2pl = TestDSM2pl.compute_stock_change()
DS3nu, ExitFlag3nu = TestDSM3nu.compute_stock_change()
DS3pl, ExitFlag3pl = TestDSM3pl.compute_stock_change()
Bal2nu, ExitFlag2nu = TestDSM2nu.check_stock_balance()
Bal2pl, ExitFlag2pl = TestDSM2pl.check_stock_balance()
Bal3nu, ExitFlag3nu = TestDSM3nu.check_stock_balance()
Bal3pl, ExitFlag3pl = TestDSM3pl.check_stock_balance()
#print output flow
print(TestDSM2nu.o)
print(TestDSM2pl.o)
print(TestDSM3nu.o)
print(TestDSM3pl.o)
#%%
#Step (5): Biomass growth
#Model I Oil Palm Biomass Growth (Khasanah et al. (2015))
A = range(0,tf_palmoil,1)
#calculate the biomass and carbon content of palm oil trees over time
def Y_nucleus(A):
return (44/12*1000*c_cont_po_nucleus*(a_nucleus*A + b_nucleus))
output_Y_nucleus = np.array([Y_nucleus(Ai) for Ai in A])
print(output_Y_nucleus)
def Y_plasma(A):
return (44/12*1000*c_cont_po_plasma*(a_plasma*A + b_plasma))
output_Y_plasma = np.array([Y_plasma(Ai) for Ai in A])
print(output_Y_plasma)
##8 times 25-year cycle of new AGB of oil palm, one year gap between the cycle
#nucleus
counter = range(0,8,1)
y_nucleus = []
for i in counter:
y_nucleus.append(output_Y_nucleus)
flat_list_nucleus = []
for sublist in y_nucleus:
for item in sublist:
flat_list_nucleus.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_nucleus = flat_list_nucleus[:len(flat_list_nucleus)-7]
#plasma
y_plasma = []
for i in counter:
y_plasma.append(output_Y_plasma)
flat_list_plasma = []
for sublist in y_plasma:
for item in sublist:
flat_list_plasma.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_plasma = flat_list_plasma[:len(flat_list_plasma)-7]
#plotting
t = range (0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_nucleus)
plt.plot(t, flat_list_plasma, color='seagreen')
plt.fill_between(t, flat_list_nucleus, flat_list_plasma, color='darkseagreen', alpha=0.4)
plt.xlabel('Time (year)')
plt.ylabel('AGB (tCO2-eq/ha)')
plt.show()
###Yearly Sequestration
###Nucleus
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_nucleus(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_nucleus = [p - q for q, p in zip(flat_list_nucleus, flat_list_nucleus[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_nuclues' with 0 values
flat_list_nucleus = [0 if i < 0 else i for i in flat_list_nucleus]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_nucleus.insert(0,var)
#make 'flat_list_nucleus' elements negative numbers to denote sequestration
flat_list_nucleus = [ -x for x in flat_list_nucleus]
print(flat_list_nucleus)
#Plasma
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_plasma(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_plasma = [t - u for u, t in zip(flat_list_plasma, flat_list_plasma[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_plasma' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_plasma = [0 if i < 0 else i for i in flat_list_plasma]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_plasma.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_plasma = [ -x for x in flat_list_plasma]
print(flat_list_plasma)
#%%
#Step(6): post-harvest processing of wood/palm oil
#post-harvest wood processing
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_HWP_S2nu = df2nu['PH_Emissions_HWP'].values
PH_Emissions_HWP_S2pl = df2pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Enu = df3pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Epl = df3pl['PH_Emissions_HWP'].values
#post-harvest palm oil processing
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_PO_S2nu = df2nu['PH_Emissions_PO'].values
PH_Emissions_PO_S2pl = df2pl['PH_Emissions_PO'].values
PH_Emissions_PO_Enu = df3pl['PH_Emissions_PO'].values
PH_Emissions_PO_Epl = df3pl['PH_Emissions_PO'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S2nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
tf = 201
t = np.arange(tf)
def decomp_CH4_S2nu(t,remainAGB_CH4_S2nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S2nu
#set zero matrix
output_decomp_CH4_S2nu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S2nu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S2nu[i:,i] = decomp_CH4_S2nu(t[:len(t)-i],remain_part_CH4_S2nu)
print(output_decomp_CH4_S2nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S2nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S2nu[:,i] = np.diff(output_decomp_CH4_S2nu[:,i])
i = i + 1
print(subs_matrix_CH4_S2nu[:,:4])
print(len(subs_matrix_CH4_S2nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S2nu = subs_matrix_CH4_S2nu.clip(max=0)
print(subs_matrix_CH4_S2nu[:,:4])
#make the results as absolute values
subs_matrix_CH4_S2nu = abs(subs_matrix_CH4_S2nu)
print(subs_matrix_CH4_S2nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S2nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S2nu)
subs_matrix_CH4_S2nu = np.vstack((zero_matrix_CH4_S2nu, subs_matrix_CH4_S2nu))
print(subs_matrix_CH4_S2nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S2nu = (tf,1)
decomp_tot_CH4_S2nu = np.zeros(matrix_tot_CH4_S2nu)
i = 0
while i < tf:
decomp_tot_CH4_S2nu[:,0] = decomp_tot_CH4_S2nu[:,0] + subs_matrix_CH4_S2nu[:,i]
i = i + 1
print(decomp_tot_CH4_S2nu[:,0])
#S2pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
tf = 201
t = np.arange(tf)
def decomp_CH4_S2pl(t,remainAGB_CH4_S2pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S2pl
#set zero matrix
output_decomp_CH4_S2pl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S2pl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S2pl[i:,i] = decomp_CH4_S2pl(t[:len(t)-i],remain_part_CH4_S2pl)
print(output_decomp_CH4_S2pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S2pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S2pl[:,i] = np.diff(output_decomp_CH4_S2pl[:,i])
i = i + 1
print(subs_matrix_CH4_S2pl[:,:4])
print(len(subs_matrix_CH4_S2pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S2pl = subs_matrix_CH4_S2pl.clip(max=0)
print(subs_matrix_CH4_S2pl[:,:4])
#make the results as absolute values
subs_matrix_CH4_S2pl = abs(subs_matrix_CH4_S2pl)
print(subs_matrix_CH4_S2pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S2pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S2pl)
subs_matrix_CH4_S2pl = np.vstack((zero_matrix_CH4_S2pl, subs_matrix_CH4_S2pl))
print(subs_matrix_CH4_S2pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S2pl = (tf,1)
decomp_tot_CH4_S2pl = np.zeros(matrix_tot_CH4_S2pl)
i = 0
while i < tf:
decomp_tot_CH4_S2pl[:,0] = decomp_tot_CH4_S2pl[:,0] + subs_matrix_CH4_S2pl[:,i]
i = i + 1
print(decomp_tot_CH4_S2pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CH4_Enu(t,remainAGB_CH4_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Enu
#set zero matrix
output_decomp_CH4_Enu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Enu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Enu[i:,i] = decomp_CH4_Enu(t[:len(t)-i],remain_part_CH4_Enu)
print(output_decomp_CH4_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Enu[:,i] = np.diff(output_decomp_CH4_Enu[:,i])
i = i + 1
print(subs_matrix_CH4_Enu[:,:4])
print(len(subs_matrix_CH4_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Enu = subs_matrix_CH4_Enu.clip(max=0)
print(subs_matrix_CH4_Enu[:,:4])
#make the results as absolute values
subs_matrix_CH4_Enu = abs(subs_matrix_CH4_Enu)
print(subs_matrix_CH4_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Enu)
subs_matrix_CH4_Enu = np.vstack((zero_matrix_CH4_Enu, subs_matrix_CH4_Enu))
print(subs_matrix_CH4_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Enu = (tf,1)
decomp_tot_CH4_Enu= np.zeros(matrix_tot_CH4_Enu)
i = 0
while i < tf:
decomp_tot_CH4_Enu[:,0] = decomp_tot_CH4_Enu[:,0] + subs_matrix_CH4_Enu[:,i]
i = i + 1
print(decomp_tot_CH4_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CH4_Epl(t,remainAGB_CH4_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Epl
#set zero matrix
output_decomp_CH4_Epl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Epl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Epl[i:,i] = decomp_CH4_Epl(t[:len(t)-i],remain_part_CH4_Epl)
print(output_decomp_CH4_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Epl[:,i] = np.diff(output_decomp_CH4_Epl[:,i])
i = i + 1
print(subs_matrix_CH4_Epl[:,:4])
print(len(subs_matrix_CH4_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Epl = subs_matrix_CH4_Epl.clip(max=0)
print(subs_matrix_CH4_Epl[:,:4])
#make the results as absolute values
subs_matrix_CH4_Epl = abs(subs_matrix_CH4_Epl)
print(subs_matrix_CH4_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Epl)
subs_matrix_CH4_Epl = np.vstack((zero_matrix_CH4_Epl, subs_matrix_CH4_Epl))
print(subs_matrix_CH4_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Epl = (tf,1)
decomp_tot_CH4_Epl = np.zeros(matrix_tot_CH4_Epl)
i = 0
while i < tf:
decomp_tot_CH4_Epl[:,0] = decomp_tot_CH4_Epl[:,0] + subs_matrix_CH4_Epl[:,i]
i = i + 1
print(decomp_tot_CH4_Epl[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S2nu,label='CH4_S2nu')
plt.plot(t,decomp_tot_CH4_S2pl,label='CH4_S2pl')
plt.plot(t,decomp_tot_CH4_Enu,label='CH4_Enu')
plt.plot(t,decomp_tot_CH4_Epl,label='CH4_Epl')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S2nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
tf = 201
t = np.arange(tf)
def decomp_CO2_S2nu(t,remainAGB_CO2_S2nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S2nu
#set zero matrix
output_decomp_CO2_S2nu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S2nu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S2nu[i:,i] = decomp_CO2_S2nu(t[:len(t)-i],remain_part_CO2_S2nu)
print(output_decomp_CO2_S2nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S2nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S2nu[:,i] = np.diff(output_decomp_CO2_S2nu[:,i])
i = i + 1
print(subs_matrix_CO2_S2nu[:,:4])
print(len(subs_matrix_CO2_S2nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S2nu = subs_matrix_CO2_S2nu.clip(max=0)
print(subs_matrix_CO2_S2nu[:,:4])
#make the results as absolute values
subs_matrix_CO2_S2nu = abs(subs_matrix_CO2_S2nu)
print(subs_matrix_CO2_S2nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S2nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S2nu)
subs_matrix_CO2_S2nu = np.vstack((zero_matrix_CO2_S2nu, subs_matrix_CO2_S2nu))
print(subs_matrix_CO2_S2nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S2nu = (tf,1)
decomp_tot_CO2_S2nu = np.zeros(matrix_tot_CO2_S2nu)
i = 0
while i < tf:
decomp_tot_CO2_S2nu[:,0] = decomp_tot_CO2_S2nu[:,0] + subs_matrix_CO2_S2nu[:,i]
i = i + 1
print(decomp_tot_CO2_S2nu[:,0])
#S2pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
tf = 201
t = np.arange(tf)
def decomp_CO2_S2pl(t,remainAGB_CO2_S2pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S2pl
#set zero matrix
output_decomp_CO2_S2pl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S2pl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S2pl[i:,i] = decomp_CO2_S2pl(t[:len(t)-i],remain_part_CO2_S2pl)
print(output_decomp_CO2_S2pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S2pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S2pl[:,i] = np.diff(output_decomp_CO2_S2pl[:,i])
i = i + 1
print(subs_matrix_CO2_S2pl[:,:4])
print(len(subs_matrix_CO2_S2pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S2pl = subs_matrix_CO2_S2pl.clip(max=0)
print(subs_matrix_CO2_S2pl[:,:4])
#make the results as absolute values
subs_matrix_CO2_S2pl = abs(subs_matrix_CO2_S2pl)
print(subs_matrix_CO2_S2pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S2pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S2pl)
subs_matrix_CO2_S2pl = np.vstack((zero_matrix_CO2_S2pl, subs_matrix_CO2_S2pl))
print(subs_matrix_CO2_S2pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S2pl = (tf,1)
decomp_tot_CO2_S2pl = np.zeros(matrix_tot_CO2_S2pl)
i = 0
while i < tf:
decomp_tot_CO2_S2pl[:,0] = decomp_tot_CO2_S2pl[:,0] + subs_matrix_CO2_S2pl[:,i]
i = i + 1
print(decomp_tot_CO2_S2pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CO2_Enu(t,remainAGB_CO2_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Enu
#set zero matrix
output_decomp_CO2_Enu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Enu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Enu[i:,i] = decomp_CO2_Enu(t[:len(t)-i],remain_part_CO2_Enu)
print(output_decomp_CO2_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Enu[:,i] = np.diff(output_decomp_CO2_Enu[:,i])
i = i + 1
print(subs_matrix_CO2_Enu[:,:4])
print(len(subs_matrix_CO2_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Enu = subs_matrix_CO2_Enu.clip(max=0)
print(subs_matrix_CO2_Enu[:,:4])
#make the results as absolute values
subs_matrix_CO2_Enu = abs(subs_matrix_CO2_Enu)
print(subs_matrix_CO2_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Enu)
subs_matrix_CO2_Enu = np.vstack((zero_matrix_CO2_Enu, subs_matrix_CO2_Enu))
print(subs_matrix_CO2_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Enu = (tf,1)
decomp_tot_CO2_Enu= np.zeros(matrix_tot_CO2_Enu)
i = 0
while i < tf:
decomp_tot_CO2_Enu[:,0] = decomp_tot_CO2_Enu[:,0] + subs_matrix_CO2_Enu[:,i]
i = i + 1
print(decomp_tot_CO2_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CO2_Epl(t,remainAGB_CO2_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Epl
#set zero matrix
output_decomp_CO2_Epl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Epl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Epl[i:,i] = decomp_CO2_Epl(t[:len(t)-i],remain_part_CO2_Epl)
print(output_decomp_CO2_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Epl[:,i] = np.diff(output_decomp_CO2_Epl[:,i])
i = i + 1
print(subs_matrix_CO2_Epl[:,:4])
print(len(subs_matrix_CO2_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Epl = subs_matrix_CO2_Epl.clip(max=0)
print(subs_matrix_CO2_Epl[:,:4])
#make the results as absolute values
subs_matrix_CO2_Epl = abs(subs_matrix_CO2_Epl)
print(subs_matrix_CO2_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Epl)
subs_matrix_CO2_Epl = np.vstack((zero_matrix_CO2_Epl, subs_matrix_CO2_Epl))
print(subs_matrix_CO2_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Epl = (tf,1)
decomp_tot_CO2_Epl =
|
np.zeros(matrix_tot_CO2_Epl)
|
numpy.zeros
|
import sys
import os
sys.path.append(os.path.relpath("./scripts"))
from lib import *
import numpy as np
import matplotlib.pyplot as plt
SMALL_SIZE = 22
MEDIUM_SIZE = 24
BIGGER_SIZE = 26
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['font.family'] = 'STIXGeneral'
def get_throughputs(confs, format_str):
y_throughputs = []
for rate, nodes in confs:
logs_dir = os.path.join('archive', format_str.format(rate, nodes))
# logs_dir = os.path.join('archive', 'outlier_detection_{}_{}_optimizer_greedy_ns3'.format(rate, nodes))
# logs_dir = os.path.join('logs')
_, latencies = read_preprocess_latency_data(logs_dir)
_, throughputs = read_preprocess_throughput_data(logs_dir)
median_latency = np.percentile(latencies, 50)
ten_latency =
|
np.percentile(latencies, 10)
|
numpy.percentile
|
#coding=utf-8
# Copyright 2017 - 2018 Baidu Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FGSM tutorial on mnist using advbox tool.
FGSM method is non-targeted attack while FGSMT is targeted attack.
"""
import sys
sys.path.append("..")
import logging
logging.basicConfig(level=logging.INFO,format="%(filename)s[line:%(lineno)d] %(levelname)s %(message)s")
logger=logging.getLogger(__name__)
#import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
#pip install Pillow
from advbox.adversary import Adversary
from advbox.attacks.deepfool import DeepFoolAttack
from advbox.models.keras import KerasModel
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.preprocessing.image import img_to_array,array_to_img
from keras.applications.resnet50 import decode_predictions
import keras
#pip install keras==2.1
def main(modulename,imagename):
'''
Kera的应用模块Application提供了带有预训练权重的Keras模型,这些模型可以用来进行预测、特征提取和finetune
模型的预训练权重将下载到~/.keras/models/并在载入模型时自动载入
'''
# 设置为测试模式
keras.backend.set_learning_phase(0)
model = ResNet50(weights=modulename)
logging.info(model.summary())
img = image.load_img(imagename, target_size=(224, 224))
imagedata = image.img_to_array(img)
#imagedata=imagedata[:, :, ::-1]
imagedata = np.expand_dims(imagedata, axis=0)
#logit fc1000
logits=model.get_layer('fc1000').output
#keras中获取指定层的方法为:
#base_model.get_layer('block4_pool').output)
# advbox demo
# 因为原始数据没有归一化 所以bounds=(0, 255) KerasMode内部在进行预测和计算梯度时会进行预处理
# imagenet数据集归一化时 标准差为1 mean为[104, 116, 123]
m = KerasModel(
model,
model.input,
None,
logits,
None,
bounds=(0, 255),
channel_axis=3,
preprocess=([104, 116, 123],1),
featurefqueezing_bit_depth=8)
attack = DeepFoolAttack(m)
attack_config = {"iterations": 100, "overshoot": 10}
#y设置为空 会自动计算
adversary = Adversary(imagedata[:, :, ::-1],None)
# deepfool non-targeted attack
adversary = attack(adversary, **attack_config)
if adversary.is_successful():
print(
'attack success, adversarial_label=%d'
% (adversary.adversarial_label) )
#对抗样本保存在adversary.adversarial_example
adversary_image=np.copy(adversary.adversarial_example)
#强制类型转换 之前是float 现在要转换成iunt8
#::-1 reverses the color channels, because Keras ResNet50 expects BGR instead of RGB
adversary_image=adversary_image[:,:,::-1]
adversary_image = np.array(adversary_image).astype("uint8").reshape([224,224,3])
logging.info(adversary_image - imagedata)
img=array_to_img(adversary_image)
img.save('adversary_image_nontarget.jpg')
print("deepfool non-target attack done")
attack = DeepFoolAttack(m)
attack_config = {"iterations": 100, "overshoot": 10}
adversary = Adversary(imagedata[:, :, ::-1],None)
tlabel = 489
adversary.set_target(is_targeted_attack=True, target_label=tlabel)
# deepfool targeted attack
adversary = attack(adversary, **attack_config)
if adversary.is_successful():
print(
'attack success, adversarial_label=%d'
% (adversary.adversarial_label) )
#对抗样本保存在adversary.adversarial_example
adversary_image=
|
np.copy(adversary.adversarial_example)
|
numpy.copy
|
import os
import glob
import numpy as np
from sklearn import svm
from sklearn import neural_network
from sklearn.externals import joblib
import cv2
import preprocessing
from defs import *
def load_data(desc, piece):
X = None
Y = None
ratio = piece_to_ratio[piece]
for piece_dir in pieces:
piece_class = 0
if piece == piece_dir:
piece_class = 1
for filename in glob.glob(os.path.join("feature_data", desc, str(ratio),
piece_dir, "*.npy")):
data = np.load(filename)
if X is None:
X = np.array(data)
Y = np.array([piece_class])
else:
X = np.vstack( (X, data) )
Y = np.hstack( (Y, [piece_class]) )
return (X, Y)
########################################################
#### ####
#### SIFT ####
#### ####
########################################################
def train_sift():
for piece in pieces:
X, Y = load_data("SIFT", piece)
clf = svm.SVC(class_weight=piece_weights[piece], probability=True)
clf.fit(X, Y)
joblib.dump(clf, "classifiers/classifier_sift_" + piece + ".pkl")
########################################################
#### ####
#### Dense SIFT ####
#### ####
########################################################
def train_dsift():
for piece in pieces:
X, Y = load_data("DSIFT", piece)
clf = svm.SVC(class_weight={0: 1, 1: 2})
clf.fit(X, Y)
joblib.dump(clf, "classifiers/classifier_dsift_" + piece + ".pkl")
########################################################
#### ####
#### HOG ####
#### ####
########################################################
def train_hog():
for piece in pieces:
X, Y = load_data_hog(piece)
clf = svm.SVC(class_weight=piece_weights[piece], probability=True)
clf.fit(X, Y)
joblib.dump(clf, "classifiers/classifier_hog_" + piece + ".pkl")
def load_data_hog(piece):
X = None
Y = None
ratio = piece_to_ratio[piece]
for piece_dir in pieces:
piece_class = 0
if piece == piece_dir:
piece_class = 1
for filename in glob.glob(os.path.join("feature_data", "HOG", str(ratio),
piece_dir, "*.npy")):
data =
|
np.load(filename)
|
numpy.load
|
#!/usr/bin/env python
import argparse
from cvfit import cvfit
import cv2
import cv2.ximgproc
import numpy as np
import os
import skimage
import skimage.color
import sys
# construct the argument parser and parse the arguments
def parse_args():
parser = argparse.ArgumentParser(prog=sys.argv[0], description="A command-line utility to test functionality of 3-D color homography color transfer model algorithm based on" \
"<NAME>., <NAME>., <NAME>. and <NAME>., 2017. 3D color homography model for photo-realistic color transfer re-coding. The Visual Computer, pp.1-11.")
parser.add_argument('-s', '--source', action='store', required=True, help="Source image filepath.")
parser.add_argument('-t', '--target', action='store', help="Target image filepath.")
parser.add_argument('-c', '--convert', action='store_true', help="Flag to indicate that we are simply converting the source image using precalculated homography matrix + shading LUT.")
parser.add_argument('-H', '--homography', action='store', required=True, help="Homography matrix (chromacity + shade-mapping interpolator function) filename, stored as compressed numpy archive.")
parser.add_argument('-r', '--rescale', type=float, default=1.0, help="Factor to scale images by before calculating homography matrix H and shading map matrix D.")
parser.add_argument('-o', '--output', action='store', help="Color-correct image filename if specified.")
parsed = parser.parse_args(sys.argv[1:])
return(parsed)
#cf_3D_H estimates a 2-D color homography color transfer model.
#
# CF_3D_H() returns the colour transfered source and the homography matrix
# image source according to the target image target
#
# Options (opt.*):
# * downsampling_res: specify the resolution of downsampled images
# for faster color transfer approximation. [] for disabling.
# * use_denoise: enable deniose by bilaterial filtering.
# * use_curve: enable tone-mapping esimation by polynomial models.
#
# Copyright 2018 <NAME> <<EMAIL>>, University of East
# Anglia.
# References:
# <NAME>., <NAME>., <NAME>.: Recoding color transfer as a
# color homography. In: British Machine Vision Conference. BMVA (2016)
def cf_3D_H(source, target, rescale=1.0, use_curve = True, use_denoise = True):
osource = source.reshape((-1,3)) #The original image in flattened n-pixel RGB format
osourceT = osource.T
if rescale != 1.0:
# downsampling
ssource = cv2.resize(source, (0,0), fx=rescale, fy=rescale)
starget = cv2.resize(target, (0,0), fx=rescale, fy=rescale)
else:
# use full res-images
ssource = source.copy()
starget = target.copy()
ssource = ssource.reshape((-1,3)) # Reshape to flat n-pixel RGB image
starget = starget.reshape((-1,3)) # Reshape to flat n-pixel RGB image
sshape = ssource.shape
ssourceT = ssource.T
stargetT = starget.T
ssshape = sshape
#Estimate 3D homography
P = np.vstack((ssourceT, np.ones((1,ssourceT.shape[1])))) #Stack row-wise
Q = np.vstack((stargetT, np.ones((1,stargetT.shape[1])))) #Stack row-wise
msk = (np.min(P,axis=0) > 1/255) & (np.min(Q,axis=0) > 1/255)
(H,err,d) = uea_H_from_x_als(P[:,msk],Q[:,msk],10)
#Apply 3D homography
Pe = H @ P #Apply chromatic homography projection
Pe = Pe[0:3,:]/Pe[3,:]
Pe = np.maximum(Pe,0) #Element-wise max - zeros-out under-saturated pixels
Pe = np.minimum(Pe,1) #Element-wise max - one-out over-saturated pixels
#Brightness transfer
PeMean = np.mean(Pe[:,msk],axis=0).T # transformed brightness
TeMean = np.mean(stargetT[:,msk],axis=0).T # target brightness
if use_curve:
# estimate brightness transfer
pp = cvfit(PeMean,TeMean,'quad') # b-b mapping
else:
# histogram matching
pp = cvfit(PeMean,TeMean,'hist') # b-b mapping
#Re-apply to a higher res image
Pe = H @ np.vstack((osourceT, np.ones((1,osourceT.shape[1]))))
Pe = Pe[0:3,:]/Pe[3,:]
Pe = np.maximum(Pe,0) #Element-wise max - zeros-out under-saturated pixels
Pe = np.minimum(Pe,1) #Element-wise max - one-out over-saturated pixels
n = Pe.shape[1] #Number of pixels (or columns)
PeMean = np.mean(Pe,axis=0).T # transformed brightness
luIdx = (1+np.floor(PeMean*999)).astype('uint') # Need to convert to integer to be used as index to lookup table
FMean = pp[luIdx]
FMean = np.maximum(FMean,0) #Element-wise max - one-out over-saturated pixels
D = FMean/(PeMean.reshape((-1,1))) # convert brightness change to shading - scaling factors
D[PeMean < (1/255)] = 1 # discard dark pixel shadings -- or scaling factor is equal to 1
Ei = Pe.T.reshape(source.shape)
ImD = D.reshape(source.shape[0:2]) #Reshape to source image size
if use_denoise: # denoise the shading field
grey = skimage.color.rgb2gray(source)
#https://people.csail.mit.edu/sparis/bf_course/slides/03_definition_bf.pdf
#Need to convert fields to 32-bit floats, otherwise stupid cv2 will error
ImD = cv2.ximgproc.jointBilateralFilter(im2float(grey), im2float(ImD), d=-1, sigmaColor=0.1, sigmaSpace=len(D)/16) #Heuristics for sigmaSpatial are 2% of length of image diagonal -- sigma color depends on mean/median of image gradients
ImD = im2double(ImD)
#Manually broadcast and reshape, otherwise it appears that the broadcasting doesn't happen the way I expect
ImD = np.repeat(ImD, 3).reshape((*ImD.shape,3))
Ei = np.minimum(np.maximum(Ei*ImD,0),1) #Now apply shading
return (Ei,H,pp)
def cf_3D_convert(source, H, pp, use_denoise=True):
#Re-apply to a higher res image
ssource = source.reshape((-1,3)) #Reshape to n-pixels by 3 columns for RGB
ssourceT = ssource.T
Pe = H @ np.vstack((ssourceT, np.ones((1,ssourceT.shape[1]))))
Pe = Pe[0:3,:]/Pe[3,:]
Pe = np.maximum(Pe,0) #Element-wise max - zeros-out under-saturated pixels
Pe =
|
np.minimum(Pe,1)
|
numpy.minimum
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 10 13:20:30 2019
recipes:
use :
cvr=run_ML(RI_proc=False,model_name='KRR',time_period=['2005','2019'],
cv={'rkfold':[5,5]},regressors=['ch4', 'anom_nino3p4', 'qbo_cdas'],
lms=None,plevels=82,swoosh_latlon=True, gridsearch=True)
to run a non-linear KRR model gridsearch with reapeated kfolds
get best params and best estimator:
best_params = cvr.best_params_
best_model = cvr.best_estimator_
@author: shlomi
"""
# TODO: build GridSearchCV support directley like ultioutput regressors
# for various models
from strat_paths import work_chaim
from strat_paths import adams_path
from sklearn_xarray import RegressorWrapper
from xarray.core.dataset import Dataset
from pathlib import Path
# import warnings filter
from warnings import simplefilter
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
sound_path = work_chaim / 'sounding'
def train_model_and_apply(train_period=['2004-08', '2019'],
test_period=['1984', '2004-07'], model_name='KRR',
regressors=['ch4', 'anom_nino3p4', 'qbo_cdas'],
param_grid=None, skip_cv=False, plevels=None,
model_params='KRR', lms=None, return_just_test=False):
import xarray as xr
from sklearn.metrics import r2_score
from aux_functions_strat import xr_order
from aux_functions_strat import dim_intersection
# first run grid search on train set:
train_args = dict(species='h2o', swoosh_field='combinedanomfillanom',
model_name=model_name,
RI_proc=False, time_period=train_period, cv={'rkfold': [5, 5]},
regressors=regressors, lms=lms, gridsearch=True,
lat_slice=[-60, 60], swoosh_latlon=False,
param_grid=param_grid, plevels=plevels,
data_file='swoosh_latpress-2.5deg.nc')
if not skip_cv:
print('running GridSearchCV first:')
cvr = run_ML(**train_args)
if lms is not None:
# for now support for just one level
model = cvr['best_model'].item()
else:
model = cvr.best_estimator_
ml_params = model.get_params()
args = train_args.copy()
args.update(cv=None, gridsearch=False, ml_params=ml_params)
rds_train = run_ML(**args)
model = rds_train
else:
ml_params_krr=dict(alpha=2.0, coef0=1, degree=2, gamma=1.0, kernel='rbf',
kernel_params=None)
args = train_args.copy()
if model_params is not None:
if model_params == 'KRR':
ml_params = ml_params_krr
else:
ml_params = None
args.update(cv=None, gridsearch=False, ml_params=ml_params)
rds_train = run_ML(**args)
model = rds_train
test_args = train_args.copy()
test_args.update(cv=None, gridsearch=False,
time_period=test_period)
Pset = PredictorSet(**test_args, loadpath=Path().cwd() / 'regressors')
X = Pset.pre_process()
Target = TargetArray(**test_args, loadpath=work_chaim)
y = Target.pre_process()
# if not skip_cv:
# predict = y.copy(data=model.predict(X))
# else:
if lms is not None:
predict = run_model_with_shifted_plevels(model, X, y, Target,
plevel=plevels, lms=lms,
just_predict=True)
predict = predict.rename({'regressors': 'lat'})
predict['lat'] = y.unstack('samples')['lat']
predict['level'] = y.unstack('samples')['level']
predict = predict.stack(regressors=['lat', 'level'])
else:
predict = model.predict(X)
predict = predict.rename({'regressors': 'samples'})
times = dim_intersection([predict, y], dropna=True)
y = y.sel(time=times)
predict = predict.sel(time=times)
rds_test = xr.Dataset()
rds_test['original'] = y
rds_test['predict'] = predict
r2 = r2_score(y, predict, multioutput='raw_values')
rds_test['r2'] = xr.DataArray(r2, dims=['samples'])
if not isinstance(model, ImprovedRegressor):
rds_test['params'] = xr.DataArray(model.coef_, dims=['samples', 'regressors'])
r2_adj = 1.0 - (1.0 - rds_test['r2']) * (len(y) - 1.0) / \
(len(y) - X.shape[1])
rds_test['r2_adj'] = r2_adj
rds_test['predict'].attrs = y.attrs
rds_test['resid'] = y - rds_test['predict']
rds_test['resid'].attrs = y.attrs
rds_test['resid'].attrs['long_name'] = 'Residuals'
rds_test = rds_test.unstack('samples')
# order:
rds_test = xr_order(rds_test)
# put coords attrs back:
for coord, attr in y.attrs['coords_attrs'].items():
rds_test[coord].attrs = attr
# remove coords attrs from original, predict and resid:
rds_test.original.attrs.pop('coords_attrs')
rds_test.predict.attrs.pop('coords_attrs')
rds_test.resid.attrs.pop('coords_attrs')
all_var_names = [x for x in rds_test.data_vars.keys()]
sample_types = [x for x in rds_test.data_vars.keys()
if 'time' in rds_test[x].dims]
feature_types = [x for x in rds_test.data_vars.keys()
if 'regressors' in rds_test[x].dims]
error_types = list(set(all_var_names) - set(sample_types +
feature_types))
rds_test.attrs['sample_types'] = sample_types
rds_test.attrs['feature_types'] = feature_types
rds_test.attrs['error_types'] = error_types
rds_test.attrs['sample_dim'] = 'time'
rds_test.attrs['feature_dim'] = 'regressors'
# add X to results:
rds_test['X'] = X
print(model)
das = [x for x in rds_train.results_ if x in rds_test]
# don't use r2...
rds_train = rds_train.results_[das]
rds = xr.concat([rds_train, rds_test], 'time')
rds = rds.sortby('time')
if return_just_test:
return rds_test
else:
return rds
class Parameters:
"""a parameters class for stratosphere gases modelling using ML methods"""
def __init__(self,
model_name='LR',
season=None,
# regressors_file='Regressors.nc',
swoosh_field='combinedanomfillanom',
regressors=None, # default None means all regressors
# reg_add_sub=None,
poly_features=None,
special_run=None,
reg_time_shift=None,
add_poly_reg=None,
data_name='swoosh',
species='h2o',
time_period=['1994', '2019'],
area_mean=False,
lat_slice=[-20, 20],
plevels=None,
# original_data_file='swoosh_lonlatpress-20deg-5deg.nc',
data_file='swoosh_latpress-2.5deg.nc'
,**kwagrs):
self.filing_order = ['data_name', 'field', 'model_name', 'season',
'reg_selection', 'special_run']
self.delimeter = '_'
self.model_name = model_name
self.season = season
self.lat_slice = lat_slice
self.plevels = plevels
self.reg_time_shift = reg_time_shift
self.poly_features = poly_features
# self.reg_add_sub = reg_add_sub
self.regressors = regressors
self.special_run = special_run
self.add_poly_reg = add_poly_reg
# self.regressors_file = regressors_file
# self.sw_field_list = ['combinedanomfillanom', 'combinedanomfill',
# 'combinedanom', 'combinedeqfillanom',
# 'combinedeqfill', 'combinedeqfillseas',
# 'combinedseas', 'combined']
self.swoosh_field = swoosh_field
self.run_on_cluster = False # False to run locally mainly to test stuff
self.data_name = data_name # merra, era5
self.species = species # can be T or phi for era5
self.time_period = time_period
self.area_mean = area_mean
self.data_file = data_file # original data filename (in work_path)
self.work_path = work_chaim
self.cluster_path = adams_path
# update attrs from dict containing keys as attrs and vals as attrs vals
# to be updated
def from_dict(self, d):
self.__dict__.update(d)
return self
def show(self, name='all'):
from aux_functions_strat import text_blue
if name == 'all':
for attr, value in vars(self).items():
text_blue(attr, end=" "), print('=', value, end=" ")
print('')
elif hasattr(self, name):
text_blue(name, end=" "), print('=', getattr(self, name),
end=" ")
print('')
# def load(self, name='regressors'):
# import xarray as xr
# if name == 'regressors':
# data = xr.open_dataset(self.regressors_file)
# elif name == 'original':
# data = xr.open_dataset(self.work_path + self.original_data_file)
# return data
def load_regressors(self):
from make_regressors import load_all_regressors
return load_all_regressors()
def select_model(self, model_name=None, ml_params=None):
# pick ml model from ML_models class dict:
ml = ML_Switcher()
if model_name is not None:
ml_model = ml.pick_model(model_name)
self.model_name = model_name
else:
ml_model = ml.pick_model(self.model_name)
# set external parameters if i want:
if ml_params is not None:
ml_model.set_params(**ml_params)
self.param_grid = ml.param_grid
return ml_model
class ML_Switcher(object):
def pick_model(self, model_name):
"""Dispatch method"""
# from sklearn.model_selection import GridSearchCV
self.param_grid = None
method_name = str(model_name)
# Get the method from 'self'. Default to a lambda.
method = getattr(self, method_name, lambda: "Invalid ML Model")
# if gridsearch:
# return(GridSearchCV(method(), self.param_grid, n_jobs=-1,
# return_train_score=True))
# else:
# Call the method as we return it
return method()
def LR(self):
from sklearn.linear_model import LinearRegression
return LinearRegression(n_jobs=-1, copy_X=True)
def RANSAC(self):
from sklearn.linear_model import RANSACRegressor
return RANSACRegressor(random_state=42)
def GPSR(self):
from gplearn.genetic import SymbolicRegressor
return SymbolicRegressor(random_state=42, n_jobs=1, metric='mse')
def LASSOCV(self):
from sklearn.linear_model import LassoCV
import numpy as np
return LassoCV(random_state=42, cv=5, n_jobs=-1,
alphas=
|
np.logspace(-5, 1, 60)
|
numpy.logspace
|
"""
Simple two-level AMR using TF
"""
import numpy as np
np.set_printoptions(edgeitems=30, linewidth=200, formatter=dict(float=lambda x: "%.3g" % x))
import tensorflow.compat.v1 as tf
from my_libs.utils import grid_points, int2digits, digits2int, linear_interp_coeff
import matplotlib.pyplot as plt
from matplotlib import tri as mtri
from matplotlib import collections as mc
from matplotlib.backends.backend_pdf import PdfPages
def my_show():
try:
pdf.savefig()
except:
plt.show()
def plot_amr(mesh, field, edges, colorbar=False):
fig1, ax1 = plt.subplots(figsize=(7,7))
triang = mtri.Triangulation(mesh[:,0], mesh[:,1], None)
ax1.set_aspect(1)
tpc = ax1.tripcolor(triang, field.ravel(), vmin=0, vmax=1)
if colorbar:
fig1.colorbar(tpc)
lines = mesh[edges]
pbc_flag = np.where(np.linalg.norm(lines[:,0]-lines[:,1],axis=1)< (N1+1))
lines = lines[pbc_flag]
ax1.add_collection(mc.LineCollection(lines, colors='r', linewidths=0.7)) #, colors=np.random.rand([len(edges),3]), linewidths=1.1))
my_show()
class amr_state_variables:
# def __init__(self, dim, shape_all, shape0, mask1, mask1_where, field0, field1, shared_interface, shared_edge3d, mask_all, field_all, edge_all, type_all=None, refine_threshold=0.001, periodic=True):
def __init__(self, dim, shape_all, shape0, field_all, type_all=None, refine_threshold=0.001, periodic=True, buffer=0, eval_freq=1):
# """"
# shared_interface: flag of shape [shape0, dim, 2]
# shared_edge3d: [shape0, dim, dim, 2, 2]
# """"
assert periodic, 'Must be periodic'
self.dim = dim
self.shape_all = tuple(shape_all)
self.n_all = np.prod(shape_all)
self.ijk2int = tf.convert_to_tensor([shape_all[1],1] if dim==2 else [shape_all[1]*shape_all[2],shape_all[2],1], dtype=tf.int64)
# edge_all = tf.tensordot(edge_all, ijk2int, 1)
self.shape0 = tuple(shape0)
shape0 = np.array(shape0)
self.n0 = np.prod(shape0)
shape1 = np.array(shape_all) // np.array(shape0)
self.shape1 = tuple(shape1)
self.shape1_plus1 = (-1,) + tuple(np.array(shape1)+1)
assert np.all(np.array(shape_all) == (np.array(shape0)*shape1)), 'Incompatible shapes'
self.interpolator1 = tf.constant(linear_interp_coeff(self.shape1).astype(np.float32))
# mesh0 = np.reshape(np.stack(np.meshgrid(*[np.arange(0,shape_all[d],shape1[d]) for d in range(dim)], indexing='ij'),-1), (-1, dim))
mesh0_0 = grid_points(shape0-1)
mesh0 = mesh0_0*shape1
self.mesh0 = tf.convert_to_tensor(mesh0)
# indices_per1_corner = np.reshape(np.stack(np.meshgrid(*[[0,N] for N in shape1], indexing='ij'),-1), (1,-1, dim))
indices_per1_corner = grid_points([1]*dim)*shape1
self.n_per1_corner = 2**dim
# self.mesh0_corner = tf.convert_to_tensor(((mesh0[:,None] + indices_per1_corner) % shape_all).reshape((-1,dim)))
self.mesh0_corner_ = tf.convert_to_tensor(((mesh0[:,None] + indices_per1_corner) % shape_all))
# indices_per1_whole = np.reshape(np.stack(np.meshgrid(*[np.arange(N+1) for N in shape1], indexing='ij'),-1), (1,-1, dim))
indices_per1_whole = grid_points(shape1)
self.indices_per1_whole = tf.constant(indices_per1_whole)
self.n_per1_whole = indices_per1_whole.shape[-2]
self.mesh0_whole = tf.convert_to_tensor(((mesh0[:,None] + indices_per1_whole) % shape_all))
# print(f'debug mesh0 {self.mesh0.shape} mesh0corner {self.mesh0_corner.shape} mesh0_whole {self.mesh0_whole.shape}')
self.field_all = tf.convert_to_tensor(field_all)
assert self.field_all.shape[-1] == 1, ValueError(f'Only one field implemented so far, got {self.field_all.shape[-1]}')
# self.mask1 = mask1
# self.mask1_where = mask1_where
# self.field0 = field0
# self.field1 = field1
# self.shared_interface = shared_interface
# self.shared_edge3d = shared_edge3d
self.periodic = periodic
self.buffer = buffer
self.eval_freq = eval_freq
# self.n1 = mask1_where.shape[0]
# self.n1_all = np.prod(shape0)
# self.mask_all = mask_all
# self.field_all = field_all
# self.edge_all = edge_all
type_all = np.zeros(self.shape_all+(1,), dtype=np.int32) if type_all is None else np.reshape(np.array(type_all, dtype=np.int32), self.shape_all+(1,))
self.type_all = tf.convert_to_tensor(type_all)
self.refine_threshold = refine_threshold
# mesh_all = grid_points(np.array(shape_all)-1)# np.reshape(np.stack(np.meshgrid(*[np.arange(N) for N in shape_all], indexing='ij'),-1), (-1, dim))
mesh_all = grid_points(
|
np.array(shape_all)
|
numpy.array
|
import numpy as np
from os import path
import matplotlib.pyplot as plt
import sklearn
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import torch
import torch.nn as nn
import torch.optim as optim
from captum.attr import LayerConductance, LayerActivation, LayerIntegratedGradients
from captum.attr import IntegratedGradients, DeepLift, GradientShap, NoiseTunnel, FeatureAblation
boston = load_boston()
feature_names = boston.feature_names
X = boston.data
y = boston.target
torch.manual_seed(1234)
np.random.seed(1234)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
fig, axs = plt.subplots(nrows=3, ncols=5, figsize=(30, 20))
for i, (ax, col) in enumerate(zip(axs.flat, feature_names)):
x = X[:, i]
pf = np.polyfit(x, y, 1)
p = np.poly1d(pf)
ax.plot(x, y, "o")
ax.plot(x, p(x), "r--")
ax.set_title(col + " vs Prices")
ax.set_xlabel(col)
ax.set_ylabel("Prices")
X_train = torch.tensor(X_train).float()
y_train = torch.tensor(y_train).view(-1, 1).float()
X_test = torch.tensor(X_test).float()
y_test = torch.tensor(y_test).view(-1, 1).float()
datasets = torch.utils.data.TensorDataset(X_train, y_train)
train_iter = torch.utils.data.DataLoader(datasets, batch_size=10, shuffle=True)
batch_size = 50
num_epochs = 200
learning_rate = 0.0001
size_hidden1 = 100
size_hidden2 = 50
size_hidden3 = 10
size_hidden4 = 1
class BostonModel(nn.Module):
def __init__(self):
super().__init__()
self.lin1 = nn.Linear(13, size_hidden1)
self.relu1 = nn.ReLU()
self.lin2 = nn.Linear(size_hidden1, size_hidden2)
self.relu2 = nn.ReLU()
self.lin3 = nn.Linear(size_hidden2, size_hidden3)
self.relu3 = nn.ReLU()
self.lin4 = nn.Linear(size_hidden3, size_hidden4)
def forward(self, input):
return self.lin4(self.relu3(self.lin3(self.relu2(self.lin2(self.relu1(self.lin1(input)))))))
model = BostonModel()
model.train()
criterion = nn.MSELoss(reduction="sum")
def train(model_inp, num_epochs=num_epochs):
optimizer = torch.optim.RMSprop(model_inp.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
running_loss = 0.0
for inputs, labels in train_iter:
outputs = model_inp(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
running_loss += loss.item()
optimizer.step()
if epoch % 20 == 0:
print(
"Epoch [%d]/[%d] running accumulative loss across all batches: %.3f"
% (epoch + 1, num_epochs, running_loss)
)
running_loss = 0.0
def train_load_save_model(model_obj, model_path):
if path.isfile(model_path):
print("Loading pre-trained model from: {}".format(model_path))
model_obj.load_state_dict(torch.load(model_path))
else:
train(model_obj)
print("Finished training the model. Saving the model to the path: {}".format(model_path))
torch.save(model_obj.state_dict(), model_path)
SAVED_MODEL_PATH = ".save/boston_model.pt"
train_load_save_model(model, SAVED_MODEL_PATH)
model.eval()
outputs = model(X_test)
err = np.sqrt(mean_squared_error(outputs.detach().numpy(), y_test.detach().numpy()))
print("model err: ", err)
ig = IntegratedGradients(model)
ig_nt = NoiseTunnel(ig)
dl = DeepLift(model)
gs = GradientShap(model)
fa = FeatureAblation(model)
ig_attr_test = ig.attribute(X_test, n_steps=50)
ig_nt_attr_test = ig_nt.attribute(X_test)
dl_attr_test = dl.attribute(X_test)
gs_attr_test = gs.attribute(X_test, X_train)
fa_attr_test = fa.attribute(X_test)
x_axis_data = np.arange(X_test.shape[1])
x_axis_data_labels = list(map(lambda idx: feature_names[idx], x_axis_data))
ig_attr_test_sum = ig_attr_test.detach().numpy().sum(0)
ig_attr_test_norm_sum = ig_attr_test_sum / np.linalg.norm(ig_attr_test_sum, ord=1)
ig_nt_attr_test_sum = ig_nt_attr_test.detach().numpy().sum(0)
ig_nt_attr_test_norm_sum = ig_nt_attr_test_sum / np.linalg.norm(ig_nt_attr_test_sum, ord=1)
dl_attr_test_sum = dl_attr_test.detach().numpy().sum(0)
dl_attr_test_norm_sum = dl_attr_test_sum / np.linalg.norm(dl_attr_test_sum, ord=1)
gs_attr_test_sum = gs_attr_test.detach().numpy().sum(0)
gs_attr_test_norm_sum = gs_attr_test_sum / np.linalg.norm(gs_attr_test_sum, ord=1)
fa_attr_test_sum = fa_attr_test.detach().numpy().sum(0)
fa_attr_test_norm_sum = fa_attr_test_sum /
|
np.linalg.norm(fa_attr_test_sum, ord=1)
|
numpy.linalg.norm
|
import numpy
import matplotlib.pyplot as plt
from firmament import Renderer
from firmament.planets import earth
config = {
'size_x': 1000,
'size_y': 1000,
'ray_samples': 16,
'light_samples': 8,
'exposure': 2.0,
'zoom': 2.5,
'eye_pos': numpy.array([0, -3, 1.1]),
'eye_dir': numpy.array([0, 1, -0.35])
}
sun_range = (0, 360, 10)
renderer = Renderer(config, earth)
for i in numpy.arange(*sun_range):
sun = (
|
numpy.cos(i*2*numpy.pi/360)
|
numpy.cos
|
import numpy as np
from numba import jit
from netket.operator import LocalOperator
import numbers
class FermionLocalOperator(LocalOperator):
@staticmethod
@jit(nopython=True)
def _get_conn_flattened_kernel(
x,
sections,
local_states,
basis,
constant,
diag_mels,
n_conns,
all_mels,
all_x_prime,
acting_on,
acting_size,
pad=False,
):
batch_size = x.shape[0]
n_sites = x.shape[1]
assert sections.shape[0] == batch_size
n_operators = n_conns.shape[0]
xs_n = np.empty((batch_size, n_operators), dtype=np.intp)
tot_conn = 0
max_conn = 0
for b in range(batch_size):
# diagonal element
conn_b = 1
# counting the off-diagonal elements
for i in range(n_operators):
acting_size_i = acting_size[i]
xs_n[b, i] = 0
x_b = x[b]
x_i = x_b[acting_on[i, :acting_size_i]]
for k in range(acting_size_i):
xs_n[b, i] += (
|
np.searchsorted(local_states, x_i[acting_size_i - k - 1])
|
numpy.searchsorted
|
import mpsort
from runtests.mpi import MPITest, MPITestFixture
import numpy
from numpy.testing import assert_array_equal
from itertools import product
import pytest
def split(array, comm, localsize=None):
array = comm.bcast(array)
if localsize is None:
sp = numpy.array_split(array, comm.size)
return comm.scatter(sp)
else:
g = comm.allgather(localsize)
return comm.scatter(numpy.array_split(array, numpy.cumsum(g)[:-1]))
def heal(array, comm):
a = comm.allgather(array)
a = numpy.concatenate(a, axis=0)
return a
def adjustsize(size, comm):
ressize = size + 1 - 2 * ((comm.rank) % 2)
if comm.size % 2 == 1:
if comm.rank == 0:
ressize = size
return ressize
@MPITest(commsize=(1, 2, 3, 4))
def test_sort_i4(comm):
s = numpy.int32(numpy.random.random(size=1000) * 1000 - 400)
local = split(s, comm)
s = heal(local, comm)
mpsort.sort(local, orderby=None, out=None, comm=comm)
r = heal(local, comm)
s.sort()
assert_array_equal(s, r)
@MPITest(commsize=(1, 2, 3, 4))
def test_sort_i8(comm):
s = numpy.int64(numpy.random.random(size=1000) * 1000 - 400)
local = split(s, comm)
s = heal(local, comm)
mpsort.sort(local, orderby=None, out=None, comm=comm)
r = heal(local, comm)
s.sort()
assert_array_equal(s, r)
@MPITest(commsize=(1, 2, 3, 4))
def test_sort_u8(comm):
s = numpy.uint64(numpy.random.random(size=1000) * 1000 - 400)
local = split(s, comm)
s = heal(local, comm)
mpsort.sort(local, orderby=None, out=None, comm=comm)
r = heal(local, comm)
s.sort()
assert_array_equal(s, r)
@MPITest(commsize=(1, 2, 3, 4))
def test_sort_u4(comm):
s = numpy.uint32(numpy.random.random(size=1000) * 1000 - 400)
local = split(s, comm)
s = heal(local, comm)
mpsort.sort(local, orderby=None, out=None, comm=comm)
r = heal(local, comm)
s.sort()
assert_array_equal(s, r)
TUNINGS = [
[],
['DISABLE_SPARSE_ALLTOALLV'],
['REQUIRE_SPARSE_ALLTOALLV'],
['REQUIRE_GATHER_SORT'],
['DISABLE_GATHER_SORT'],
]
comm = MPITestFixture([1, 2, 3, 4], scope='function')
@pytest.mark.parametrize("tuning", TUNINGS)
def test_sort_tunings(comm, tuning):
s = numpy.int32(numpy.random.random(size=1000) * 1000)
local = split(s, comm)
s = heal(local, comm)
g = comm.allgather(local.size)
mpsort.sort(local, orderby=None, out=None, comm=comm, tuning=tuning)
r = heal(local, comm)
s.sort()
assert_array_equal(s, r)
@MPITest(commsize=(1, 2, 3, 4))
def test_sort_inplace(comm):
s = numpy.int32(numpy.random.random(size=1000) * 1000)
local = split(s, comm)
s = heal(local, comm)
g = comm.allgather(local.size)
mpsort.sort(local, local, out=None, comm=comm)
r = heal(local, comm)
s.sort()
assert_array_equal(s, r)
@MPITest(commsize=(4))
def test_sort_mismatched_zeros(comm):
s = numpy.int32(numpy.random.random(size=1000) * 1000)
local = split(s, comm, [0, 400, 0, 600][comm.rank])
s = heal(local, comm)
res = split(s, comm, [200, 200, 0, 600][comm.rank])
res[:] = numpy.int32(numpy.random.random(size=res.size) * 1000)
mpsort.sort(local, local, out=res, comm=comm, tuning=['REQUIRE_GATHER_SORT'])
s.sort()
r = heal(res, comm)
assert_array_equal(s, r)
@MPITest(commsize=(1, 2, 3, 4))
def test_sort_outplace(comm):
s = numpy.int32(numpy.random.random(size=1000) * 1000)
local = split(s, comm)
s = heal(local, comm)
res = numpy.zeros(adjustsize(local.size, comm), dtype=local.dtype)
mpsort.sort(local, local, out=res, comm=comm)
s.sort()
r = heal(res, comm)
assert_array_equal(s, r)
@MPITest(commsize=(1, 2, 3, 4))
def test_sort_flatiter(comm):
s = numpy.int32(numpy.random.random(size=1000) * 1000)
local = split(s, comm)
s = heal(local, comm)
res = numpy.zeros(adjustsize(local.size, comm), dtype=local.dtype)
mpsort.sort(local.flat, local.flat, out=res.flat, comm=comm)
s.sort()
r = heal(res, comm)
assert_array_equal(s, r)
@MPITest(commsize=(1, 2, 3, 4))
def test_sort_struct(comm):
s = numpy.empty(10, dtype=[
('value', 'i8'),
('key', 'i8')])
numpy.random.seed(1234)
s['value'] = numpy.int32(numpy.random.random(size=10) * 1000-400)
s['key'] = s['value']
backup = s.copy()
local = split(s, comm)
s = heal(local, comm)
res = numpy.zeros_like(local)
mpsort.sort(local, 'key', out=res, comm=comm)
r = heal(res, comm)
backup.sort(order='key')
assert_array_equal(backup['value'], r['value'])
@MPITest(commsize=(1, 2, 3, 4))
def test_sort_struct_vector(comm):
s = numpy.empty(10, dtype=[
('value', 'i8'),
('key', 'i8'),
('vkey', ('i8', 2))])
s['value'] = numpy.int32(numpy.random.random(size=len(s)) * 1000)
# use a scalar key to trick numpy
# numpy sorts as byte streams for vector fields.
s['key'][:][...] = s['value']
s['vkey'][:, 0][...] = s['value']
s['vkey'][:, 1][...] = s['value']
local = split(s, comm)
s = heal(local, comm)
res = numpy.zeros_like(local)
mpsort.sort(local, 'vkey', out=res, comm=comm)
r = heal(res, comm)
s.sort(order='key')
|
assert_array_equal(s['value'], r['value'])
|
numpy.testing.assert_array_equal
|
import sys
import os
sys.path.append(os.pardir)
import numpy as np
import unittest
import coropy.utils as cu
class TestUtils(unittest.TestCase):
"""Class for `utils.py` tests."""
def test_normalize(self):
self.assertIsNone(
np.testing.assert_array_equal(cu.normalize([10, 50]), [0., 1.]))
self.assertIsNone(
np.testing.assert_array_almost_equal(
cu.normalize(range(100)), np.linspace(0, 1, 100), decimal=2))
self.assertIsNone(
np.testing.assert_array_equal(cu.normalize([-1, 1]), [0., 1.]))
self.assertRaises(ValueError, cu.normalize, 5.)
self.assertRaises(AssertionError, cu.normalize, [[1, 2, 3]])
self.assertRaises(AssertionError, cu.normalize, [5])
self.assertRaises(ValueError, cu.normalize, [-np.nan, 50])
self.assertWarns(RuntimeWarning, cu.normalize, [0, 0, 0])
def test_restore(self):
self.assertIsNone(
np.testing.assert_array_equal(
cu.restore([0., 1.], [10, 50]), [10., 50.]))
self.assertIsNone(
np.testing.assert_array_almost_equal(
cu.restore(np.linspace(0, 1, 100), range(100)),
range(100),
decimal=2))
self.assertRaises(ValueError, cu.restore, [5.], 0)
self.assertRaises(AssertionError, cu.restore, [[1, 2, 3]], [0, 3])
self.assertRaises(ValueError, cu.restore, [0, 1], [-np.nan, 50])
def test_moving_average(self):
a = np.array([1, 2, 3, 4, 5])
a_2 =
|
np.array([1.5, 2.5, 3.5, 4.5])
|
numpy.array
|
import tensorflow as tf
import mesh_renderer
from scipy.io import loadmat,savemat
import numpy as np
# define facemodel for reconstruction
class BFM():
def __init__(self):
model_path = './BFM/BFM_model_front.mat'
model = loadmat(model_path)
self.meanshape = model['meanshape'] # mean face shape
self.idBase = model['idBase'] # identity basis
self.exBase = model['exBase'] # expression basis
self.meantex = model['meantex'] # mean face texture
self.texBase = model['texBase'] # texture basis
self.point_buf = model['point_buf'] # adjacent face index for each vertex, starts from 1 (only used for calculating face normal)
self.tri = model['tri'] # vertex index for each triangle face, starts from 1
self.keypoints = np.squeeze(model['keypoints']).astype(np.int32) - 1 # 68 face landmark index, starts from 0
# compute vertex normal using one-ring neighborhood
# input: face_shape with shape [1,N,3]
# output: v_norm with shape [1,N,3]
def Compute_norm(face_shape,facemodel):
face_id = facemodel.tri # vertex index for each triangle face, with shape [F,3], F is number of faces
point_id = facemodel.point_buf # adjacent face index for each vertex, with shape [N,8], N is number of vertex
shape = face_shape
face_id = (face_id - 1).astype(np.int32)
point_id = (point_id - 1).astype(np.int32)
v1 = shape[:,face_id[:,0],:]
v2 = shape[:,face_id[:,1],:]
v3 = shape[:,face_id[:,2],:]
e1 = v1 - v2
e2 = v2 - v3
face_norm = np.cross(e1,e2) # compute normal for each face
face_norm = np.concatenate([face_norm,np.zeros([1,1,3])], axis = 1) # concat face_normal with a zero vector at the end
v_norm = np.sum(face_norm[:,point_id,:], axis = 2) # compute vertex normal using one-ring neighborhood
v_norm = v_norm/np.expand_dims(np.linalg.norm(v_norm,axis = 2),2) # normalize normal vectors
return v_norm
# input: coeff with shape [1,257]
def Split_coeff(coeff):
id_coeff = coeff[:,:80] # identity(shape) coeff of dim 80
ex_coeff = coeff[:,80:144] # expression coeff of dim 64
tex_coeff = coeff[:,144:224] # texture(albedo) coeff of dim 80
angles = coeff[:,224:227] # ruler angles(x,y,z) for rotation of dim 3
gamma = coeff[:,227:254] # lighting coeff for 3 channel SH function of dim 27
translation = coeff[:,254:] # translation coeff of dim 3
return id_coeff,ex_coeff,tex_coeff,angles,gamma,translation
# compute vertex texture(albedo) with tex_coeff
# input: tex_coeff with shape [1,N,3]
# output: face_texture with shape [1,N,3], RGB order, range from 0-255
def Texture_formation(tex_coeff,facemodel):
face_texture = np.einsum('ij,aj->ai',facemodel.texBase,tex_coeff) + facemodel.meantex
face_texture = np.reshape(face_texture,[1,-1,3])
return face_texture
# compute rotation matrix based on 3 ruler angles
# input: angles with shape [1,3]
# output: rotation matrix with shape [1,3,3]
def Compute_rotation_matrix(angles):
angle_x = angles[:,0][0]
angle_y = angles[:,1][0]
angle_z = angles[:,2][0]
# compute rotation matrix for X,Y,Z axis respectively
rotation_X = np.array([1.0,0,0,\
0,np.cos(angle_x),-np.sin(angle_x),\
0,np.sin(angle_x),np.cos(angle_x)])
rotation_Y = np.array([np.cos(angle_y),0,np.sin(angle_y),\
0,1,0,\
-np.sin(angle_y),0,np.cos(angle_y)])
rotation_Z = np.array([np.cos(angle_z),-np.sin(angle_z),0,\
np.sin(angle_z),np.cos(angle_z),0,\
0,0,1])
rotation_X = np.reshape(rotation_X,[1,3,3])
rotation_Y = np.reshape(rotation_Y,[1,3,3])
rotation_Z = np.reshape(rotation_Z,[1,3,3])
rotation = np.matmul(np.matmul(rotation_Z,rotation_Y),rotation_X)
rotation = np.transpose(rotation, axes = [0,2,1]) #transpose row and column (dimension 1 and 2)
return rotation
# compute vertex color using face_texture and SH function lighting approximation
# input: face_texture with shape [1,N,3]
# norm with shape [1,N,3]
# gamma with shape [1,27]
# output: face_color with shape [1,N,3], RGB order, range from 0-255
# lighting with shape [1,N,3], color under uniform texture
def Illumination_layer(face_texture,norm,gamma):
num_vertex = np.shape(face_texture)[1]
init_lit = np.array([0.8,0,0,0,0,0,0,0,0])
gamma = np.reshape(gamma,[-1,3,9])
gamma = gamma + np.reshape(init_lit,[1,1,9])
# parameter of 9 SH function
a0 = np.pi
a1 = 2*np.pi/np.sqrt(3.0)
a2 = 2*np.pi/np.sqrt(8.0)
c0 = 1/np.sqrt(4*np.pi)
c1 = np.sqrt(3.0)/np.sqrt(4*np.pi)
c2 = 3*np.sqrt(5.0)/
|
np.sqrt(12*np.pi)
|
numpy.sqrt
|
from dataclasses import dataclass
import collections
import collections.abc
import math
from abc import abstractmethod
from functools import reduce
import pandas as pd
import numpy as np
import matplotlib
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mtick
import matplotlib.patches as mptch
import matplotlib.gridspec as gridspec
import matplotlib.path as path
import matplotlib.cm as cm
from matplotlib.colors import BoundaryNorm
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 # not directly used but need to import to plot 3d
from scipy.interpolate import griddata
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pyqstrat.pq_utils import series_to_array, strtup2date, has_display, resample_ts, resample_trade_bars
from pyqstrat.pq_types import ReasonCode, Trade
from typing import Sequence, Tuple, Mapping, Union, MutableMapping, List, Optional
# set_defaults()
class DateFormatter(mtick.Formatter):
'''
Formats timestamps on plot axes. See matplotlib Formatter
'''
def __init__(self, timestamps: np.ndarray, fmt: str) -> None:
self.timestamps = timestamps
self.fmt = fmt
def __call__(self, x, pos: int = 0):
'Return the label for time x at position pos'
ind = int(np.round(x))
if ind >= len(self.timestamps) or ind < 0: return ''
return mdates.num2date(self.timestamps[ind]).strftime(self.fmt)
class HorizontalLine:
'''Draws a horizontal line on a subplot'''
def __init__(self, y: float, name: str = None, line_type: str = 'dashed', color: str = None) -> None:
self.y = y
self.name = name
self.line_type = line_type
self.color = color
class VerticalLine:
'''Draws a vertical line on a subplot where x axis is not a date-time axis'''
def __init__(self, x: float, name: str = None, line_type: str = 'dashed', color: str = None) -> None:
self.x = x
self.name = name
self.line_type = line_type
self.color = color
class DateLine:
'''Draw a vertical line on a plot with a datetime x-axis'''
def __init__(self, date: np.datetime64, name: str = None, line_type: str = 'dashed', color: str = None) -> None:
self.date = date
self.name = name
self.line_type = line_type
self.color = color
class DisplayAttributes:
pass
@dataclass
class BoxPlotAttributes(DisplayAttributes):
'''
Attributes:
proportional_widths: if set to True, the width each box in the boxplot will be proportional
to the number of items in its corresponding array
show_means: Whether to display a marker where the mean is for each array
show_outliers: Whether to show markers for outliers that are outside the whiskers.
Box is at Q1 = 25%, Q3 = 75% quantiles, whiskers are at Q1 - 1.5 * (Q3 - Q1), Q3 + 1.5 * (Q3 - Q1)
notched: Whether to show notches indicating the confidence interval around the median
'''
proportional_widths: bool = True
show_means: bool = True
show_all: bool = True
show_outliers: bool = False
notched: bool = False
@dataclass
class LinePlotAttributes(DisplayAttributes):
line_type: Optional[str] = 'solid'
line_width: Optional[int] = None
color: Optional[str] = None
marker: Optional[str] = None
marker_size: Optional[int] = None
marker_color: Optional[str] = None
@dataclass
class ScatterPlotAttributes(DisplayAttributes):
marker: str = 'X'
marker_size: int = 50
marker_color: str = 'red'
@dataclass
class SurfacePlotAttributes(DisplayAttributes):
'''
Attributes:
marker: Adds a marker to each point in x, y, z to show the actual data used for interpolation.
You can set this to None to turn markers off.
interpolation: Can be ‘linear’, ‘nearest’ or ‘cubic’ for plotting z points between the ones passed in.
See scipy.interpolate.griddata for details
cmap: Colormap to use (default matplotlib.cm.RdBu_r). See matplotlib colormap for details
'''
marker: str = 'X'
marker_size: int = 50
marker_color: str = 'red'
interpolation: str = 'linear'
cmap: matplotlib.colors.Colormap = matplotlib.cm.RdBu_r
@dataclass
class ContourPlotAttributes(DisplayAttributes):
marker: str = 'X'
marker_size: int = 50
marker_color: str = 'red'
interpolation: str = 'linear'
cmap: matplotlib.colors.Colormap = matplotlib.cm.RdBu_r
min_level: float = math.nan
max_level: float = math.nan
@dataclass
class CandleStickPlotAttributes(DisplayAttributes):
colorup: str = 'darkgreen'
colordown: str = '#F2583E'
@dataclass
class BarPlotAttributes(DisplayAttributes):
color: str = 'red'
@dataclass
class FilledLinePlotAttributes(DisplayAttributes):
'''
colorup: Color for bars where close >= open. Default "darkgreen"
colordown: Color for bars where open < close. Default "#F2583E"
'''
positive_color: str = 'blue'
negative_color: str = 'red'
class PlotData:
name: str
display_attributes: DisplayAttributes
class TimePlotData(PlotData):
timestamps: np.ndarray
@abstractmethod
def reindex(self, timestamps: np.ndarray, fill: bool) -> None:
pass
class BucketedValues(PlotData):
'''
Data in a subplot where we summarize properties of a numpy array.
For example, drawing a boxplot with percentiles. x axis is a categorical
'''
def __init__(self, name: str,
bucket_names: Sequence[str],
bucket_values: Sequence[np.ndarray],
display_attributes: DisplayAttributes = None) -> None:
'''
Args:
name: name used for this data in a plot legend
bucket_names: list of strings used on x axis labels
bucket_values: list of numpy arrays that are summarized in this plot
'''
assert isinstance(bucket_names, list) and isinstance(bucket_values, list) and len(bucket_names) == len(bucket_values)
self.display_attributes = BoxPlotAttributes()
self.name = name
self.bucket_names = bucket_names
self.bucket_values = series_to_array(bucket_values)
if display_attributes is None: display_attributes = BoxPlotAttributes()
self.display_attributes = display_attributes
class XYData(PlotData):
'''Data in a subplot that has x and y values that are both arrays of floats'''
def __init__(self,
name: str,
x: Union[np.ndarray, pd.Series],
y: Union[np.ndarray, pd.Series],
display_attributes: DisplayAttributes = None) -> None:
self.name = name
self.x = series_to_array(x)
self.y = series_to_array(y)
if display_attributes is None: display_attributes = LinePlotAttributes()
self.display_attributes = display_attributes
class XYZData(PlotData):
'''Data in a subplot that has x, y and z values that are all floats'''
def __init__(self,
name: str,
x: Union[np.ndarray, pd.Series],
y: Union[np.ndarray, pd.Series],
z: Union[np.ndarray, pd.Series],
display_attributes: DisplayAttributes = None) -> None:
'''
Args:
name: Name to show in plot legend
'''
self.name = name
self.x = x
self.y = y
self.z = z
if display_attributes is None: display_attributes = ContourPlotAttributes()
self.display_attributes = display_attributes
class TimeSeries(TimePlotData):
'''Data in a subplot where x is an array of numpy datetimes and y is a numpy array of floats'''
def __init__(self,
name: str,
timestamps: Union[pd.Series, np.ndarray],
values: Union[pd.Series, np.ndarray],
display_attributes: DisplayAttributes = None) -> None:
'''
Args:
name: Name to show in plot legend
'''
self.name = name
self.timestamps = series_to_array(timestamps)
self.values = series_to_array(values)
if display_attributes is None: display_attributes = LinePlotAttributes()
self.display_attributes = display_attributes
def reindex(self, timestamps: np.ndarray, fill: bool) -> None:
'''Reindex this series given a new array of timestamps, forward filling holes if fill is set to True'''
s = pd.Series(self.values, index=self.timestamps)
s = s.reindex(timestamps, method='ffill' if fill else None)
self.timestamps = s.index.values
self.values = s.values
class TradeBarSeries(TimePlotData):
'''
Data in a subplot that contains open, high, low, close, volume bars. volume is optional.
'''
def __init__(self,
name: str,
timestamps: np.ndarray,
o: Optional[np.ndarray],
h: Optional[np.ndarray],
l: Optional[np.ndarray], # noqa: E741: ignore # l ambiguous
c: Optional[np.ndarray],
v: np.ndarray = None,
vwap: np.ndarray = None,
display_attributes: DisplayAttributes = None) -> None:
'''
Args:
name: Name to show in a legend
'''
self.name = name
self.timestamps = timestamps
self.o = o
self.h = h
self.l = l # noqa: E741: ignore # l ambiguous
self.c = c
self.v = np.ones(len(self.timestamps), dtype=float) * np.nan if v is None else v
self.vwap = np.ones(len(self.timestamps), dtype=float) * np.nan if vwap is None else vwap
if display_attributes is None: display_attributes = CandleStickPlotAttributes()
self.display_attributes = display_attributes
def df(self) -> pd.DataFrame:
return pd.DataFrame({'o': self.o, 'h': self.h, 'l': self.l, 'c': self.c, 'v': self.v, 'vwap': self.vwap}, # type: ignore # l ambiguous
index=self.timestamps)[['o', 'h', 'l', 'c', 'v', 'vwap']]
def reindex(self, all_timestamps: np.ndarray, fill: bool) -> None:
df = self.df()
df = df.reindex(all_timestamps)
self.timestamps = all_timestamps
for col in df.columns:
setattr(self, col, df[col].values)
class TradeSet(TimePlotData):
'''Data for subplot that contains a set of trades along with marker properties for these trades'''
def __init__(self,
name: str,
trades: Sequence[Trade],
display_attributes: DisplayAttributes = None) -> None:
'''
Args:
name: String to display in a subplot legend
trades: List of Trade objects to plot
'''
self.name = name
self.trades = trades
self.timestamps = np.array([trade.timestamp for trade in trades], dtype='M8[ns]')
self.values = np.array([trade.price for trade in trades], dtype=float)
if display_attributes is None:
display_attributes = ScatterPlotAttributes(marker='P', marker_color='red', marker_size=50)
self.display_attributes = display_attributes
def reindex(self, all_timestamps: np.ndarray, fill: bool) -> None:
s = pd.Series(self.values, index=self.timestamps)
s = s.reindex(all_timestamps, method='ffill' if fill else None)
self.timestamps = s.index.values
self.values = s.values
def __repr__(self) -> str:
s = ''
for trade in self.trades:
s += f'{trade.timestamp} {trade.qty} {trade.price}\n'
return s
def draw_poly(ax: mpl.axes.Axes,
left: np.ndarray,
bottom: np.ndarray,
top: np.ndarray,
right: np.ndarray,
facecolor: str,
edgecolor: str,
zorder: int) -> None:
'''Draw a set of polygrams given parrallel numpy arrays of left, bottom, top, right points'''
XY = np.array([[left, left, right, right], [bottom, top, top, bottom]]).T
barpath = path.Path.make_compound_path_from_polys(XY)
# Clean path to get rid of 0, 0 points. Seems to be a matplotlib bug. If we don't ylim lower bound is set to 0
v = []
c = []
for seg in barpath.iter_segments():
vertices, command = seg
if not (vertices[0] == 0. and vertices[1] == 0.):
v.append(vertices)
c.append(command)
cleaned_path = path.Path(v, c)
patch = mptch.PathPatch(cleaned_path, facecolor=facecolor, edgecolor=edgecolor, zorder=zorder)
ax.add_patch(patch)
def draw_candlestick(ax: mpl.axes.Axes,
index: np.ndarray,
o: np.ndarray,
h: np.ndarray,
l: np.ndarray, # noqa: E741: ignore # l ambiguous
c: np.ndarray,
v: Optional[np.ndarray],
vwap: np.ndarray,
colorup: str = 'darkgreen',
colordown: str = '#F2583E') -> None:
'''Draw candlesticks given parrallel numpy arrays of o, h, l, c, v values. v is optional.
See TradeBarSeries class __init__ for argument descriptions.'''
width = 0.5
# Have to do volume first because of a mpl bug with axes fonts if we use make_axes_locatable after plotting on top axis
if v is not None and not np.isnan(v).all():
divider = make_axes_locatable(ax)
vol_ax = divider.append_axes('bottom', size='25%', sharex=ax, pad=0)
_c = np.nan_to_num(c)
_o = np.nan_to_num(o)
pos = _c >= _o
neg = _c < _o
vol_ax.bar(index[pos], v[pos], color=colorup, width=width)
vol_ax.bar(index[neg], v[neg], color=colordown, width=width)
offset = width / 2.0
mask = ~np.isnan(c) & ~np.isnan(o)
mask[mask] &= c[mask] < o[mask]
left = index - offset
bottom = np.where(mask, o, c)
top = np.where(mask, c, o)
right = left + width
draw_poly(ax, left[mask], bottom[mask], top[mask], right[mask], colordown, 'k', 100)
draw_poly(ax, left[~mask], bottom[~mask], top[~mask], right[~mask], colorup, 'k', 100)
draw_poly(ax, left + offset, l, h, left + offset, 'k', 'k', 1)
if vwap is not None:
ax.scatter(index, vwap, marker='o', color='orange', zorder=110)
def draw_boxplot(ax: mpl.axes.Axes,
names: str,
values: Sequence[np.ndarray],
proportional_widths: bool = True,
notched: bool = False,
show_outliers: bool = True,
show_means: bool = True,
show_all: bool = True) -> None:
'''Draw a boxplot. See BucketedValues class for explanation of arguments'''
outliers = None if show_outliers else ''
meanpointprops = dict(marker='D')
assert(isinstance(values, list) and isinstance(names, list) and len(values) == len(names))
widths = None
if show_all:
all_values = np.concatenate(values)
values.append(all_values)
names.append('all')
if proportional_widths:
counts = [len(v) for v in values]
total = float(sum(counts))
widths = [c / total for c in counts]
ax.boxplot(values, notch=notched, sym=outliers, showmeans=show_means, meanprops=meanpointprops, widths=widths)
ax.set_xticklabels(names)
def draw_3d_plot(ax: mpl.axes.Axes,
x: np.ndarray,
y: np.ndarray,
z: np.ndarray,
plot_type: str = 'contour',
marker: str = 'X',
marker_size: int = 50,
marker_color: str = 'red',
interpolation: str = 'linear',
cmap: matplotlib.colors.Colormap = matplotlib.cm.RdBu_r,
min_level: float = math.nan,
max_level: float = math.nan) -> None:
'''Draw a 3d plot. See XYZData class for explanation of arguments
>>> points = np.random.rand(1000, 2)
>>> x = np.random.rand(10)
>>> y = np.random.rand(10)
>>> z = x ** 2 + y ** 2
>>> if has_display():
... fig, ax = plt.subplots()
... draw_3d_plot(ax, x = x, y = y, z = z, plot_type = 'contour', interpolation = 'linear');
'''
xi = np.linspace(min(x), max(x))
yi = np.linspace(min(y), max(y))
X, Y = np.meshgrid(xi, yi)
Z = griddata((x, y), z, (xi[None, :], yi[:, None]), method=interpolation)
Z = np.nan_to_num(Z)
if plot_type == 'surface':
ax.plot_surface(X, Y, Z, cmap=cmap)
if marker is not None:
ax.scatter(x, y, z, marker=marker, s=marker_size, c=marker_color)
m = cm.ScalarMappable(cmap=cmap)
m.set_array(Z)
plt.colorbar(m, ax=ax)
elif plot_type == 'contour':
# extract all colors from the map
cmaplist = [cmap(i) for i in range(cmap.N)]
# create the new map
cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
Z = np.ma.masked_array(Z, mask=~np.isfinite(Z))
if math.isnan(min_level): min_level = np.min(Z)
if math.isnan(max_level): max_level = np.max(Z)
# define the bins and normalize and forcing 0 to be part of the colorbar!
bounds = np.arange(min_level, max_level, (max_level - min_level) / cmap.N)
idx = np.searchsorted(bounds, 0)
bounds = np.insert(bounds, idx, 0)
norm = BoundaryNorm(bounds, cmap.N)
cs = ax.contourf(X, Y, Z, cmap=cmap, norm=norm)
if marker is not None:
x = x[np.isfinite(z)]
y = y[np.isfinite(z)]
ax.scatter(x, y, marker=marker, s=marker_size, c=z[np.isfinite(z)], zorder=10, cmap=cmap)
LABEL_SIZE = 16
ax.tick_params(axis='both', which='major', labelsize=LABEL_SIZE)
ax.tick_params(axis='both', which='minor', labelsize=LABEL_SIZE)
cbar = plt.colorbar(cs, ax=ax)
cbar.ax.tick_params(labelsize=LABEL_SIZE)
else:
raise Exception(f'unknown plot type: {plot_type}')
def _adjust_axis_limit(lim: Tuple[float, float], values: Union[List, np.ndarray]) -> Tuple[float, float]:
'''If values + 10% buffer are outside current xlim or ylim, return expanded xlim or ylim for subplot'''
if isinstance(values, list):
values = np.array(values)
if values.dtype == np.bool_:
values = values.astype(float)
min_val, max_val = np.nanmin(values), np.nanmax(values)
val_range = max_val - min_val
lim_min = np.nanmin(values) - .1 * val_range
lim_max = np.nanmax(values) - .1 * val_range
return (min(lim[0], lim_min), max(lim[1], lim_max))
def _plot_data(ax: mpl.axes.Axes, data: PlotData) -> Optional[List[mpl.lines.Line2D]]:
lines = None # Return line objects so we can add legends
disp = data.display_attributes
if isinstance(data, XYData) or isinstance(data, TimeSeries):
x, y = (data.x, data.y) if isinstance(data, XYData) else (np.arange(len(data.timestamps)), data.values)
if isinstance(disp, LinePlotAttributes):
lines, = ax.plot(x, y, linestyle=disp.line_type, linewidth=disp.line_width, color=disp.color)
if disp.marker is not None: # type: ignore
ax.scatter(x, y, marker=disp.marker, c=disp.marker_color, s=disp.marker_size, zorder=100)
elif isinstance(disp, ScatterPlotAttributes):
lines = ax.scatter(x, y, marker=disp.marker, c=disp.marker_color, s=disp.marker_size, zorder=100)
elif isinstance(disp, BarPlotAttributes):
lines = ax.bar(x, y, color=disp.color) # type: ignore
elif isinstance(disp, FilledLinePlotAttributes):
x, y = np.nan_to_num(x), np.nan_to_num(y)
pos_values = np.where(y > 0, y, 0)
neg_values = np.where(y < 0, y, 0)
ax.fill_between(x, pos_values, color=disp.positive_color, step='post', linewidth=0.0)
ax.fill_between(x, neg_values, color=disp.negative_color, step='post', linewidth=0.0)
else:
raise Exception(f'unknown plot combination: {type(data)} {type(disp)}')
# For scatter and filled line, xlim and ylim does not seem to get set automatically
if isinstance(disp, ScatterPlotAttributes) or isinstance(disp, FilledLinePlotAttributes):
xmin, xmax = _adjust_axis_limit(ax.get_xlim(), x)
if not np.isnan(xmin) and not np.isnan(xmax): ax.set_xlim((xmin, xmax))
ymin, ymax = _adjust_axis_limit(ax.get_ylim(), y)
if not np.isnan(ymin) and not np.isnan(ymax): ax.set_ylim((ymin, ymax))
elif isinstance(data, TradeSet) and isinstance(disp, ScatterPlotAttributes):
lines = ax.scatter(np.arange(len(data.timestamps)), data.values, marker=disp.marker, c=disp.marker_color, s=disp.marker_size, zorder=100)
elif isinstance(data, TradeBarSeries) and isinstance(disp, CandleStickPlotAttributes):
if not (data.o is None or data.h is None or data.l is None or data.c is None):
draw_candlestick(ax, np.arange(len(data.timestamps)), data.o, data.h, data.l, data.c,
data.v, data.vwap, colorup=disp.colorup, colordown=disp.colordown)
elif isinstance(data, BucketedValues) and isinstance(disp, BoxPlotAttributes):
draw_boxplot(
ax, data.bucket_names, data.bucket_values, disp.proportional_widths, disp.notched, # type: ignore
disp.show_outliers, disp.show_means, disp.show_all) # type: ignore
elif isinstance(data, XYZData) and (isinstance(disp, SurfacePlotAttributes) or isinstance(disp, ContourPlotAttributes)):
display_type: str = 'contour' if isinstance(disp, ContourPlotAttributes) else 'surface'
draw_3d_plot(ax, data.x, data.y, data.z, display_type, disp.marker, disp.marker_size,
disp.marker_color, disp.interpolation, disp.cmap)
else:
raise Exception(f'unknown plot combination: {type(data)} {type(disp)}')
return lines
def _draw_date_gap_lines(ax: mpl.axes.Axes, plot_timestamps: np.ndarray) -> None:
'''
Draw vertical lines wherever there are gaps between two timestamps.
i.e., the gap between two adjacent timestamps is more than the minimum gap in the series.
'''
timestamps = mdates.date2num(plot_timestamps)
freq = np.nanmin(np.diff(timestamps))
if freq <= 0: raise Exception('could not infer date frequency')
date_index = np.arange(len(timestamps))
date_diff = np.diff(timestamps)
xs = []
for i in date_index:
if i < len(date_diff) and date_diff[i] > (freq + 0.000000001):
xs.append(i + 0.5)
if len(xs) > 20:
return # Too many lines will clutter the graph
for x in xs:
ax.axvline(x, linestyle='dashed', color='0.5')
def draw_date_line(ax: mpl.axes.Axes,
plot_timestamps: np.ndarray,
date: np.datetime64,
linestyle: str,
color: Optional[str]) -> mpl.lines.Line2D:
'''Draw vertical line on a subplot with datetime x axis'''
closest_index = (np.abs(plot_timestamps - date)).argmin()
return ax.axvline(x=closest_index, linestyle=linestyle, color=color)
def draw_horizontal_line(ax: mpl.axes.Axes, y: float, linestyle: str, color: Optional[str]) -> mpl.lines.Line2D:
'''Draw horizontal line on a subplot'''
return ax.axhline(y=y, linestyle=linestyle, color=color)
def draw_vertical_line(ax: mpl.axes.Axes, x: float, linestyle: str, color: Optional[str]) -> mpl.lines.Line2D:
'''Draw vertical line on a subplot'''
return ax.axvline(x=x, linestyle=linestyle, color=color)
def get_date_formatter(plot_timestamps: np.ndarray, date_format: Optional[str]) -> DateFormatter:
'''Create an appropriate DateFormatter for x axis labels.
If date_format is set to None, figures out an appropriate date format based on the range of timestamps passed in'''
num_timestamps = mdates.date2num(plot_timestamps)
if date_format is not None: return DateFormatter(num_timestamps, fmt=date_format)
date_range = num_timestamps[-1] - num_timestamps[0]
if date_range > 252:
date_format = '%d-%b-%Y'
elif date_range > 7:
date_format = '%b %d'
elif date_range > 1:
date_format = '%d %H:%M'
else:
date_format = '%H:%M:%S'
formatter = DateFormatter(num_timestamps, fmt=date_format)
return formatter
class Subplot:
'''A top level plot contains a list of subplots, each of which contain a list of data objects to draw'''
def __init__(self,
data_list: Union[PlotData, Sequence[PlotData]],
secondary_y: Sequence[str] = None,
title: str = None,
xlabel: str = None,
ylabel: str = None,
zlabel: str = None,
date_lines: Sequence[DateLine] = None,
horizontal_lines: Sequence[HorizontalLine] = None,
vertical_lines: Sequence[VerticalLine] = None,
xlim: Union[Tuple[float, float], Tuple[np.datetime64, np.datetime64]] = None,
ylim: Union[Tuple[float, float], Tuple[np.datetime64, np.datetime64]] = None,
height_ratio: float = 1.0,
display_legend: bool = True,
legend_loc: str = 'best',
log_y: bool = False,
y_tick_format: str = None) -> None:
'''
Args:
data_list: A list of objects to draw. Each element can contain XYData, XYZData, TimeSeries, TradeBarSeries,
BucketedValues or TradeSet
secondary_y: A list of objects to draw on the secondary y axis
title: Title to show for this subplot. Default None
zlabel: Only applicable to 3d subplots. Default None
date_lines: A list of DateLine objects to draw as vertical lines. Only applicable when x axis is datetime.
Default None
horizontal_lines: A list of HorizontalLine objects to draw on the plot. Default None
vertical_lines: A list of VerticalLine objects to draw on the plot
xlim: x limits for the plot as a tuple of numpy datetime objects when x-axis is datetime,
or tuple of floats. Default None
ylim: y limits for the plot. Tuple of floats. Default None
height_ratio: If you have more than one subplot on a plot, use height ratio to determine how high each subplot should be.
For example, if you set height_ratio = 0.75 for the first subplot and 0.25 for the second,
the first will be 3 times taller than the second one. Default 1.0
display_legend: Whether to show a legend on the plot. Default True
legend_loc: Location for the legend. Default 'best'
log_y: Whether the y axis should be logarithmic. Default False
y_tick_format: Format string to use for y axis labels. For example, you can decide to
use fixed notation instead of scientific notation or change number of decimal places shown. Default None
'''
if not isinstance(data_list, collections.abc.Sequence): data_list = [data_list]
self.time_plot = all([isinstance(data, TimePlotData) for data in data_list])
if self.time_plot and any([not isinstance(data, TimePlotData) for data in data_list]):
raise Exception('cannot add a non date subplot on a subplot which has time series plots')
if not self.time_plot and date_lines is not None:
raise Exception('date lines can only be specified on a time series subplot')
self.is_3d = any([isinstance(data.display_attributes, SurfacePlotAttributes) for data in data_list])
if self.is_3d and any([not isinstance(data.display_attributes, SurfacePlotAttributes) for data in data_list]):
raise Exception('cannot combine 2d plot and 3d subplots on the same Subplot')
self.data_list = data_list
self.secondary_y = [] if secondary_y is None else secondary_y
self.date_lines = [] if date_lines is None else date_lines
self.horizontal_lines = [] if horizontal_lines is None else horizontal_lines
self.vertical_lines = [] if vertical_lines is None else vertical_lines
self.title = title
self.xlabel = xlabel
self.ylabel = ylabel
self.zlabel = zlabel
self.ylim = ylim
self.height_ratio = height_ratio
self.display_legend = display_legend
self.legend_loc = legend_loc
self.log_y = log_y
self.y_tick_format = y_tick_format
def _resample(self, sampling_frequency: Optional[str]) -> None:
if sampling_frequency is None: return None
for data in self.data_list:
if isinstance(data, TimeSeries) or isinstance(data, TradeSet):
data.timestamps, data.values = resample_ts(data.timestamps, data.values, sampling_frequency)
elif isinstance(data, TradeBarSeries):
df_dict = {}
cols = ['timestamps', 'o', 'h', 'l', 'c', 'v', 'vwap']
for col in cols:
val = getattr(data, col)
if val is not None:
df_dict[col] = val
df = pd.DataFrame(df_dict)
df = df.set_index('timestamps')
df = resample_trade_bars(df, sampling_frequency)
for col in cols:
if col in df:
setattr(data, col, df[col].values)
else:
raise Exception(f'unknown type: {data}')
def get_all_timestamps(self, date_range: Tuple[Optional[np.datetime64], Optional[np.datetime64]]) -> np.ndarray:
timestamps_list = [data.timestamps for data in self.data_list if isinstance(data, TimePlotData)]
all_timestamps = np.array(reduce(np.union1d, timestamps_list))
if date_range is not None and date_range[0] is not None and date_range[1] is not None:
all_timestamps = all_timestamps[(all_timestamps >= date_range[0]) & (all_timestamps <= date_range[1])]
return all_timestamps
def _reindex(self, all_timestamps: np.ndarray) -> None:
for data in self.data_list:
if not isinstance(data, TimePlotData): continue
disp = data.display_attributes
fill = not isinstance(data, TradeSet) and not (isinstance(disp, BarPlotAttributes) or isinstance(disp, ScatterPlotAttributes))
data.reindex(all_timestamps, fill=fill)
def _draw(self, ax: mpl.axes.Axes, plot_timestamps: Optional[np.ndarray], date_formatter: Optional[DateFormatter]) -> None:
if self.time_plot:
assert plot_timestamps is not None
self._reindex(plot_timestamps)
if date_formatter is not None: ax.xaxis.set_major_formatter(date_formatter)
lines = []
ax2 = None
if self.secondary_y is not None and len(self.secondary_y):
ax2 = ax.twinx()
for data in self.data_list:
if ax2 and data.name in self.secondary_y:
line = _plot_data(ax2, data)
else:
line = _plot_data(ax, data)
lines.append(line)
for date_line in self.date_lines: # vertical lines on time plot
assert plot_timestamps is not None
line = draw_date_line(ax, plot_timestamps, date_line.date, date_line.line_type, date_line.color)
if date_line.name is not None: lines.append(line)
for horizontal_line in self.horizontal_lines:
line = draw_horizontal_line(ax, horizontal_line.y, horizontal_line.line_type, horizontal_line.color)
if horizontal_line.name is not None: lines.append(line)
for vertical_line in self.vertical_lines:
line = draw_vertical_line(ax, vertical_line.x, vertical_line.line_type, vertical_line.color)
if vertical_line.name is not None: lines.append(line)
self.legend_names = [data.name for data in self.data_list]
self.legend_names += [date_line.name for date_line in self.date_lines if date_line.name is not None]
self.legend_names += [horizontal_line.name for horizontal_line in self.horizontal_lines if horizontal_line.name is not None]
self.legend_names += [vertical_line.name for vertical_line in self.vertical_lines if vertical_line.name is not None]
if self.ylim: ax.set_ylim(self.ylim)
if (len(self.data_list) > 1 or len(self.date_lines)) and self.display_legend:
ax.legend([line for line in lines if line is not None],
[self.legend_names[i] for i, line in enumerate(lines) if line is not None], loc=self.legend_loc)
if self.log_y:
ax.set_yscale('log')
ax.yaxis.set_major_locator(mtick.AutoLocator())
ax.yaxis.set_minor_locator(mtick.NullLocator())
if self.y_tick_format:
ax.yaxis.set_major_formatter(mtick.StrMethodFormatter(self.y_tick_format))
if self.title: ax.set_title(self.title)
if self.xlabel: ax.set_xlabel(self.xlabel)
if self.ylabel: ax.set_ylabel(self.ylabel)
if self.zlabel: ax.set_zlabel(self.zlabel)
ax.autoscale_view()
class Plot:
'''Top level plot containing a list of subplots to draw'''
def __init__(self,
subplot_list: Sequence[Subplot],
title: str = None,
figsize: Tuple[float, float] = (15, 8),
date_range: Union[Tuple[str, str], Tuple[Optional[np.datetime64], Optional[np.datetime64]]] = None,
date_format: str = None,
sampling_frequency: str = None,
show_grid: bool = True,
show_date_gaps: bool = True,
hspace: Optional[float] = 0.15) -> None:
'''
Args:
subplot_list: List of Subplot objects to draw
title: Title for this plot. Default None
figsize: Figure size. Default (15, 8)
date_range: Tuple of strings or numpy datetime64 limiting timestamps to draw. e.g. ("2018-01-01 14:00", "2018-01-05"). Default None
date_format: Date format to use for x-axis
sampling_frequency: Set this to downsample subplots that have a datetime x axis.
For example, if you have minute bar data, you might want to subsample to hours if the plot is too crowded.
See pandas time frequency strings for possible values. Default None
show_grid: If set to True, show a grid on the subplots. Default True
show_date_gaps: If set to True, then when there is a gap between timestamps will draw a dashed vertical line.
For example, you may have minute bars and a gap between end of trading day and beginning of next day.
Even if set to True, this will turn itself off if there are too many gaps to avoid clutter. Default True
hspace: Height (vertical) space between subplots. Default 0.15
'''
if isinstance(subplot_list, Subplot): subplot_list = [subplot_list]
assert(len(subplot_list))
self.subplot_list = subplot_list
self.title = title
self.figsize = figsize
self.date_range = strtup2date(date_range)
self.date_format = date_format
self.sampling_frequency = sampling_frequency
self.show_date_gaps = show_date_gaps
self.show_grid = show_grid
self.hspace = hspace
def _get_plot_timestamps(self) -> Optional[np.ndarray]:
timestamps_list = []
for subplot in self.subplot_list:
if not subplot.time_plot: continue
subplot._resample(self.sampling_frequency)
timestamps_list.append(subplot.get_all_timestamps(self.date_range))
if not len(timestamps_list): return None
plot_timestamps = np.array(reduce(np.union1d, timestamps_list))
return plot_timestamps
def draw(self, check_data_size: bool = True) -> Optional[Tuple[mpl.figure.Figure, mpl.axes.Axes]]:
'''Draw the subplots.
Args:
check_data_size: If set to True, will not plot if there are > 100K points to avoid locking up your computer for a long time.
Default True
'''
if not has_display():
print('no display found, cannot plot')
return None
plot_timestamps = self._get_plot_timestamps()
if check_data_size and plot_timestamps is not None and len(plot_timestamps) > 100000:
raise Exception(f'trying to plot large data set with {len(plot_timestamps)} points, reduce date range or turn check_data_size flag off')
date_formatter = None
if plot_timestamps is not None:
date_formatter = get_date_formatter(plot_timestamps, self.date_format)
height_ratios = [subplot.height_ratio for subplot in self.subplot_list]
fig = plt.figure(figsize=self.figsize)
gs = gridspec.GridSpec(len(self.subplot_list), 1, height_ratios=height_ratios, hspace=self.hspace)
axes = []
for i, subplot in enumerate(self.subplot_list):
if subplot.is_3d:
ax = plt.subplot(gs[i], projection='3d')
else:
ax = plt.subplot(gs[i])
axes.append(ax)
time_axes = [axes[i] for i, s in enumerate(self.subplot_list) if s.time_plot]
if len(time_axes):
time_axes[0].get_shared_x_axes().join(*time_axes)
for i, subplot in enumerate(self.subplot_list):
subplot._draw(axes[i], plot_timestamps, date_formatter)
if self.title: axes[0].set_title(self.title)
# We may have added new axes in candlestick plot so get list of axes again
ax_list = fig.axes
for ax in ax_list:
if self.show_grid: ax.grid(linestyle='dotted')
for ax in ax_list:
if ax not in axes: time_axes.append(ax)
for ax in time_axes:
if self.show_date_gaps and plot_timestamps is not None: _draw_date_gap_lines(ax, plot_timestamps)
for ax in ax_list:
ax.autoscale_view()
return fig, ax_list
def _group_trades_by_reason_code(trades: Sequence[Trade]) -> Mapping[str, List[Trade]]:
trade_groups: MutableMapping[str, List[Trade]] = collections.defaultdict(list)
for trade in trades:
trade_groups[trade.order.reason_code].append(trade)
return trade_groups
def trade_sets_by_reason_code(trades: List[Trade],
marker_props: Mapping[str, Mapping] = ReasonCode.MARKER_PROPERTIES,
remove_missing_properties: bool = True) -> List[TradeSet]:
'''
Returns a list of TradeSet objects. Each TradeSet contains trades with a different reason code. The markers for each TradeSet
are set by looking up marker properties for each reason code using the marker_props argument:
Args:
trades: We look up reason codes using the reason code on the corresponding orders
marker_props: Dictionary from reason code string -> dictionary of marker properties.
See ReasonCode.MARKER_PROPERTIES for example. Default ReasonCode.MARKER_PROPERTIES
remove_missing_properties: If set, we remove any reason codes that dont' have marker properties set.
Default True
'''
trade_groups = _group_trades_by_reason_code(trades)
tradesets = []
for reason_code, trades in trade_groups.items():
if reason_code in marker_props:
mp = marker_props[reason_code]
disp = ScatterPlotAttributes(marker=mp['symbol'], marker_color=mp['color'], marker_size=mp['size'])
tradeset = TradeSet(reason_code, trades, display_attributes=disp)
elif remove_missing_properties:
continue
else:
tradeset = TradeSet(reason_code, trades)
tradesets.append(tradeset)
return tradesets
def test_plot() -> None:
class MockOrder:
def __init__(self, reason_code: str) -> None:
self.reason_code = reason_code
class MockTrade:
def __init__(self, timestamp: np.datetime64, qty: float, price: float, reason_code: str) -> None:
self.timestamp = timestamp
self.qty = qty
self.price = price
self.order = MockOrder(reason_code)
def __repr__(self) -> str:
return f'{self.timestamp} {self.qty} {self.price}'
np.random.seed(0)
timestamps = np.array(['2018-01-08 15:00:00', '2018-01-09 15:00:00', '2018-01-10 15:00:00', '2018-01-11 15:00:00'], dtype='M8[ns]')
pnl_timestamps = np.array(['2018-01-08 15:00:00', '2018-01-09 14:00:00', '2018-01-10 15:00:00', '2018-01-15 15:00:00'], dtype='M8[ns]')
positions = (pnl_timestamps, np.array([0., 5., 0., -10.]))
trade_timestamps = np.array(['2018-01-09 14:00:00', '2018-01-10 15:00:00', '2018-01-15 15:00:00'], dtype='M8[ns]')
trade_price = [9., 10., 9.5]
trade_qty = [5, -5, -10]
reason_codes = [ReasonCode.ENTER_LONG, ReasonCode.EXIT_LONG, ReasonCode.ENTER_SHORT]
trades = [MockTrade(trade_timestamps[i], trade_qty[i], trade_price[i], reason_codes[i]) for i, d in enumerate(trade_timestamps)]
disp = LinePlotAttributes(line_type='--')
tb_series = TradeBarSeries(
'price', timestamps=timestamps,
o=np.array([8.9, 9.1, 9.3, 8.6]),
h=np.array([9.0, 9.3, 9.4, 8.7]),
l=np.array([8.8, 9.0, 9.2, 8.4]), # noqa: E741 # ambiguous l
c=np.array([8.95, 9.2, 9.35, 8.5]),
v=np.array([200, 100, 150, 300]),
vwap=
|
np.array([8.9, 9.15, 9.3, 8.55])
|
numpy.array
|
from __future__ import division
import os, glob
import numpy as np
import h5py
from skimage.transform import resize, warp
from transforms3d.euler import euler2mat
from transforms3d.affines import compose
import nibabel as nib
def get_random_transformation():
T = [0, np.random.uniform(-8, 8), np.random.uniform(-8, 8)]
R = euler2mat(np.random.uniform(-5, 5) / 180.0 * np.pi, 0, 0, 'sxyz')
Z = [1, np.random.uniform(0.9, 1.1), np.random.uniform(0.9, 1.1)]
A = compose(T, R, Z)
return A
def get_tform_coords(im_size):
coords0, coords1, coords2 = np.mgrid[:im_size[0], :im_size[1], :im_size[2]]
coords = np.array([coords0 - im_size[0] / 2, coords1 - im_size[1] / 2, coords2 - im_size[2] / 2])
return np.append(coords.reshape(3, -1), np.ones((1, np.prod(im_size))), axis=0)
def remove_low_high(im_input):
im_output = im_input
low = np.percentile(im_input, 1)
high = np.percentile(im_output, 99)
im_output[im_input < low] = low
im_output[im_input > high] = high
return im_output
def normalize(im_input):
x_start = im_input.shape[0] // 4
x_range = im_input.shape[0] // 2
y_start = im_input.shape[1] // 4
y_range = im_input.shape[1] // 2
z_start = im_input.shape[2] // 4
z_range = im_input.shape[2] // 2
roi = im_input[x_start : x_start + x_range, y_start : y_start + y_range, z_start : z_start + z_range]
im_output = (im_input - np.mean(roi)) / np.std(roi)
return im_output
def read_label(path, is_training=True):
seg = nib.load(glob.glob(os.path.join(path, '*_seg.nii.gz'))[0]).get_data().astype(np.float32)
# Crop to 128*128*64
crop_size = (128, 128, 64)
crop = [int((seg.shape[0] - crop_size[0]) / 2), int((seg.shape[1] - crop_size[1]) / 2),
int((seg.shape[2] - crop_size[2]) / 2)]
seg = seg[crop[0] : crop[0] + crop_size[0], crop[1] : crop[1] + crop_size[1], crop[2] : crop[2] + crop_size[2]]
label = np.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3), dtype=np.float32)
label[seg == 1, 0] = 1
label[seg == 2, 1] = 1
label[seg == 4, 2] = 1
final_label = np.empty((16, 16, 16, 3), dtype=np.float32)
for z in range(label.shape[3]):
final_label[..., z] = resize(label[..., z], (16, 16, 16), mode='constant')
# Augmentation
if is_training:
im_size = final_label.shape[:-1]
translation = [np.random.uniform(-2, 2), np.random.uniform(-2, 2), np.random.uniform(-2, 2)]
rotation = euler2mat(0, 0, np.random.uniform(-5, 5) / 180.0 * np.pi, 'sxyz')
scale = [1, 1, 1]
warp_mat = compose(translation, rotation, scale)
tform_coords = get_tform_coords(im_size)
w = np.dot(warp_mat, tform_coords)
w[0] = w[0] + im_size[0] / 2
w[1] = w[1] + im_size[1] / 2
w[2] = w[2] + im_size[2] / 2
warp_coords = w[0:3].reshape(3, im_size[0], im_size[1], im_size[2])
for z in range(label.shape[3]):
final_label[..., z] = warp(final_label[..., z], warp_coords)
return final_label
def read_seg(path):
seg = nib.load(glob.glob(os.path.join(path, '*_seg.nii.gz'))[0]).get_data().astype(np.float32)
return seg
def read_image(path, is_training=True):
t1 = nib.load(glob.glob(os.path.join(path, '*_t1_corrected.nii.gz'))[0]).get_data().astype(np.float32)
t1ce = nib.load(glob.glob(os.path.join(path, '*_t1ce_corrected.nii.gz'))[0]).get_data().astype(np.float32)
t2 = nib.load(glob.glob(os.path.join(path, '*_t2.nii.gz'))[0]).get_data().astype(np.float32)
flair = nib.load(glob.glob(os.path.join(path, '*_flair.nii.gz'))[0]).get_data().astype(np.float32)
assert t1.shape == t1ce.shape == t2.shape == flair.shape
if is_training:
seg = nib.load(glob.glob(os.path.join(path, '*_seg.nii.gz'))[0]).get_data().astype(np.float32)
assert t1.shape == seg.shape
nchannel = 5
else:
nchannel = 4
image = np.empty((t1.shape[0], t1.shape[1], t1.shape[2], nchannel), dtype=np.float32)
#image[..., 0] = remove_low_high(t1)
#image[..., 1] = remove_low_high(t1ce)
#image[..., 2] = remove_low_high(t2)
#image[..., 3] = remove_low_high(flair)
image[..., 0] = normalize(t1)
image[..., 1] = normalize(t1ce)
image[..., 2] = normalize(t2)
image[..., 3] = normalize(flair)
if is_training:
image[..., 4] = seg
return image
def read_patch(path):
image = np.load(path + '.npy')
seg = image[..., -1]
label = np.zeros((image.shape[0], image.shape[1], image.shape[2], 4), dtype=np.float32)
label[seg == 0, 0] = 1
label[seg == 1, 1] = 1
label[seg == 2, 2] = 1
label[seg == 4, 3] = 1
return image[..., :-1], label
def generate_patch_locations(patches, patch_size, im_size):
nx = round((patches * 8 * im_size[0] * im_size[0] / im_size[1] / im_size[2]) ** (1.0 / 3))
ny = round(nx * im_size[1] / im_size[0])
nz = round(nx * im_size[2] / im_size[0])
x = np.rint(np.linspace(patch_size, im_size[0] - patch_size, num=nx))
y = np.rint(np.linspace(patch_size, im_size[1] - patch_size, num=ny))
z = np.rint(np.linspace(patch_size, im_size[2] - patch_size, num=nz))
return x, y, z
def generate_test_locations(patch_size, stride, im_size):
stride_size_x = patch_size[0] / stride
stride_size_y = patch_size[1] / stride
stride_size_z = patch_size[2] / stride
pad_x = (int(patch_size[0] / 2), int(np.ceil(im_size[0] / stride_size_x) * stride_size_x - im_size[0] + patch_size[0] / 2))
pad_y = (int(patch_size[1] / 2), int(
|
np.ceil(im_size[1] / stride_size_y)
|
numpy.ceil
|
import numpy as np
from itertools import combinations
from scipy.special import logsumexp
from scipy.stats import multivariate_normal as gaussian
def extract_logpdfs_array(posterior_predictive_params):
means = []
cov_diags = []
labels = []
for k, k_dict in posterior_predictive_params.items():
means.append(k_dict['mean'])
cov_diags.append(k_dict['cov_diag'])
labels.append(k)
all_logpdfs = []
for mean, cov_diag in zip(means, cov_diags): # For each category.
category_logpdfs = []
for mu, var in zip(mean, cov_diag): # For each dimension.
dimension_logpdf = gaussian(mu, var).logpdf
category_logpdfs.append(dimension_logpdf)
all_logpdfs.append(category_logpdfs)
return all_logpdfs, labels
def extract_logpps_array(u_model, model, teaching_sets):
pp_params = model.posterior_predictive_params
logpdfs_array, labels = extract_logpdfs_array(pp_params)
all_logpps = []
for logpdfs_row in logpdfs_array:
assert len(logpdfs_row) == u_model.shape[0]
logpps_row = []
for logpdf, u_dim in zip(logpdfs_row, u_model):
logpps_row.append(logpdf(u_dim))
all_logpps.append(logpps_row)
all_logpps = np.asarray(all_logpps)[:, teaching_sets]
return np.sum(all_logpps, axis=-1), labels
def normalize_logpps_array(logpps_array):
""" Normalize probabilities for each dimension across categories. """
assert len(logpps_array.shape) == 2
norms = logsumexp(logpps_array, axis=-2)
return logpps_array - norms
def label_to_idx(label, all_labels):
return all_labels.index(label)
def to_image_space(u_model_vector, teaching_set, model):
A = model.A
m = model.m
relevant_U_dims = model.relevant_U_dims
u_vector = np.zeros(m.shape)
u_vector[relevant_U_dims] = u_model_vector
x_vector = np.matmul(A, u_vector)
x_vector[teaching_set] + m[teaching_set]
d_vector = model.transform(x_vector, from_space='X', to_space='D')
d_vector =
|
np.squeeze(d_vector)
|
numpy.squeeze
|
#TODO: make device (cpu/gpu) an input option, default CPU
from __future__ import print_function, division
import csv
import functools
import json
import os
import shutil
import random
import warnings
import random
import numpy as np
import torch
from tqdm import tqdm
from pymatgen.core.structure import Structure
from pymatgen.io import cif
from sklearn import preprocessing
from torch_geometric.data import Data, Dataset, DataLoader
import pandas as pd
import warnings
from ._knn import knn_graph
from ._load_sets import AtomCustomJSONInitializer
from auglichem.crystal._transforms import (
RotationTransformation,
PerturbStructureTransformation,
RemoveSitesTransformation,
SupercellTransformation,
TranslateSitesTransformation,
CubicSupercellTransformation,
PrimitiveCellTransformation,
SwapAxesTransformation,
)
from auglichem.utils import (
ATOM_LIST,
CHIRALITY_LIST,
BOND_LIST,
BONDDIR_LIST,
random_split,
scaffold_split,
random_split
)
from ._load_sets import read_crystal
def collate_pool(dataset_list):
"""
Collate a list of data and return a batch for predicting crystal
properties.
Parameters
----------
dataset_list: list of tuples for each data point.
(atom_fea, nbr_fea, nbr_fea_idx, target)
atom_fea: torch.Tensor shape (n_i, atom_fea_len)
nbr_fea: torch.Tensor shape (n_i, M, nbr_fea_len)
nbr_fea_idx: torch.LongTensor shape (n_i, M)
target: torch.Tensor shape (1, )
cif_id: str or int
Returns
-------
N = sum(n_i); N0 = sum(i)
batch_atom_fea: torch.Tensor shape (N, orig_atom_fea_len)
Atom features from atom type
batch_nbr_fea: torch.Tensor shape (N, M, nbr_fea_len)
Bond features of each atom's M neighbors
batch_nbr_fea_idx: torch.LongTensor shape (N, M)
Indices of M neighbors of each atom
crystal_atom_idx: list of torch.LongTensor of length N0
Mapping from the crystal idx to atom idx
target: torch.Tensor shape (N, 1)
Target value for prediction
batch_cif_ids: list
"""
batch_atom_fea, batch_nbr_fea, batch_nbr_fea_idx = [], [], []
crystal_atom_idx, batch_target = [], []
batch_cif_ids = []
base_idx = 0
for i, ((atom_fea, nbr_fea, nbr_fea_idx), target, cif_id)\
in enumerate(dataset_list):
n_i = atom_fea.shape[0] # number of atoms for this crystal
batch_atom_fea.append(atom_fea)
batch_nbr_fea.append(nbr_fea)
batch_nbr_fea_idx.append(nbr_fea_idx+base_idx)
new_idx = torch.LongTensor(np.arange(n_i)+base_idx)
crystal_atom_idx.append(new_idx)
batch_target.append(target)
batch_cif_ids.append(cif_id)
base_idx += n_i
return (torch.cat(batch_atom_fea, dim=0),
torch.cat(batch_nbr_fea, dim=0),
torch.cat(batch_nbr_fea_idx, dim=0),
crystal_atom_idx),\
torch.stack(batch_target, dim=0),\
batch_cif_ids
class CrystalDataset(Dataset):
"""
The CIFData dataset is a wrapper for a dataset where the crystal structures
are stored in the form of CIF files. The dataset should have the following
directory structure:
root_dir
├── id_prop.csv
├── atom_init.json
├── 0.cif
├── 1.cif
├── ...
id_prop.csv: a CSV file with two columns. The first column recodes a
unique ID for each crystal, and the second column recodes the value of
target property.
atom_init.json: a JSON file that stores the initialization vector for each
element.
ID.cif: a CIF file that recodes the crystal structure, where ID is the
unique ID for the crystal.
"""
def __init__(self, dataset, data_path=None, transform=None, id_prop_augment=None,
atom_init_file=None, id_prop_file=None, ari=None,
radius=8, dmin=0, step=0.2,
on_the_fly_augment=False, kfolds=0,
num_neighbors=8, max_num_nbr=12, seed=None, cgcnn=False, data_src=None):
"""
Inputs:
-------
dataset (str): One of our 5 datasets: lanthanides, perosvkites, band_gap,
fermi_energy, or formation_energy.
data_path (str, optional): Path for our data, automatically checks if it is there
and downloads the data if it isn't.
transform (list of AbstractTransformations, optional): The transformations
to do on our CIF files
id_prop_augment (np.array of floats, shape=(N,2), optional):
atom_init_file (str, optional):
id_prop_file (str, optional):
ari (CustomAtomJSONInitializer, optional):
radius (float, optional, default=0):
dmin (float, optional, default=0):
step (float, optional, default=0.2):
on_the_fly_augment (bool, optional, default=Faalse): Setting to true augments
cif files on-the-fly, like in MoleculeDataset. This feature is
experimental and may significantly slow down run times.
kfolds (int, optional, default=0): Number of folds to use in k-fold cross
validation. Must be >= 2 in order to run.
num_neighbors (int, optional, default=8): Number of neighbors to include for
torch_geometric based models.
max_num_nbr (int, optional, default=12): Maximum number of neighboring atoms used
when building the crystal graph for CGCNN.
random_seed (int, optional): Random seed to use for data splitting.
cgcnn (bool, optional, default=False): If using built-in CGCNN model, must be set
to True.
Outputs:
--------
None
"""
super(Dataset, self).__init__()
self.dataset = dataset
self.data_path = data_path
self.data_src = data_src
self.transform = transform
self._augmented = False # To control runaway augmentation
self.num_neighbors = num_neighbors
self.seed = seed
# If using custom dataset, need to source directory
if((self.dataset == "custom") and (self.data_src is None)):
error_str = "Need data source directory when using custom data set. "
error_str += "Use data_src=/path/to/data."
raise RuntimeError(error_str)
# After specifying data set
if(id_prop_augment is None):
self.id_prop_file, self.atom_init_file, self.ari, self.data_path, \
self.target, self.task = read_crystal(dataset, data_path, self.data_src)
else:
self.id_prop_file = id_prop_file
self.atom_init_file = atom_init_file
self.ari = ari
self.max_num_nbr, self.radius = max_num_nbr, radius
assert os.path.exists(self.data_path), 'root_dir does not exist!'
assert os.path.exists(self.id_prop_file), 'id_prop_augment.csv does not exist!'
if(id_prop_augment is None):
with open(self.id_prop_file) as f:
reader = csv.reader(f)
self.id_prop_augment = [row for row in reader]
else:
self.id_prop_augment = id_prop_augment
assert os.path.exists(self.atom_init_file), 'atom_init.json does not exist!'
self.gdf = lambda dist: self._gaussian_distance(dist, dmin=dmin, dmax=self.radius,
step=step)
# Seeding used for reproducible tranformations
self.reproduce_seeds = list(range(self.__len__()))
np.random.shuffle(self.reproduce_seeds)
self.on_the_fly_augment = on_the_fly_augment
if(self.on_the_fly_augment):
warnings.warn("On-the-fly augmentations for crystals is untested and can lead to memory issues. Use with caution.", category=RuntimeWarning, stacklevel=2)
# Set up for k-fold CV
if(kfolds > 1):
self._k_fold_cv = True
self.kfolds = kfolds
self._k_fold_cross_validation()
elif(kfolds == 1):
raise ValueError("kfolds > 1 to run.")
else:
self._k_fold_cv = False
# Must be true to use built-in CGCNN model
self._cgcnn = cgcnn
# Set atom featurizer
self.atom_featurizer = AtomCustomJSONInitializer(os.path.join(self.data_path,
'atom_init.json'))
def _aug_name(self, transformation):
if(isinstance(transformation, RotationTransformation)):
suffix = '_rotated'
elif(isinstance(transformation, PerturbStructureTransformation)):
suffix = '_perturbed'
elif(isinstance(transformation, RemoveSitesTransformation)):
suffix = '_remove_sites'
elif(isinstance(transformation, SupercellTransformation)):
suffix = '_supercell'
elif(isinstance(transformation, TranslateSitesTransformation)):
suffix = '_translate'
elif(isinstance(transformation, CubicSupercellTransformation)):
suffix = '_cubic_supercell'
elif(isinstance(transformation, PrimitiveCellTransformation)):
suffix = '_primitive_cell'
elif(isinstance(transformation, SwapAxesTransformation)):
suffix = '_swapaxes'
return suffix
def data_augmentation(self, transform=None):
'''
Function call to deliberately augment the data. Transformations are done one at
a time. For example, if we're using the RotationTransformation and
SupercellTransformation, 0.cif will turn into 0.cif, 0_supercell.cif, and
0_rotated.cif. Note: 0_supercell_rotated.cif WILL NOT be created.
input:
-----------------------
transformation (list of AbstractTransformations): The transformations
'''
if(self._augmented):
print("Augmentation has already been done.")
return
if(self.on_the_fly_augment):
print("Augmentation will be done on-the-fly.")
return
# Copy directory and rename it to augmented
if(self._k_fold_cv):
# Copy directory
shutil.copytree(self.data_path,
self.data_path + "_augmented_{}folds".format(self.kfolds),
dirs_exist_ok=True)
# Remove k-fold files from original directory
for i in range(self.kfolds):
os.remove(self.data_path + "/id_prop_train_{}.csv".format(i))
os.remove(self.data_path + "/id_prop_test_{}.csv".format(i))
# Update data path
self.data_path += "_augmented_{}folds".format(self.kfolds)
else:
shutil.copytree(self.data_path, self.data_path + "_augmented", dirs_exist_ok=True)
self.data_path += "_augmented"
self.atom_featurizer = AtomCustomJSONInitializer(os.path.join(self.data_path,
'atom_init.json'))
# Check transforms
if(not isinstance(transform, list)):
transform = [transform]
# Do augmentations
new_id_prop_augment = []
for id_prop in tqdm(self.id_prop_augment):
new_id_prop_augment.append((id_prop[0], id_prop[1]))
# Transform crystal
if(transform == [None] and self.transform is None):
break
for t in transform:
# Get augmented file name
id_name = id_prop[0] + self._aug_name(t)
new_id_prop_augment.append((id_name,id_prop[1]))
# Don't create file if it already exists
if(os.path.exists(self.data_path + '/' + id_name + '.cif')):
continue
try:
seed_idx = np.argwhere(self.id_prop_augment[:,0] == id_prop[0])[0][0]
aug_crystal = t.apply_transformation(
Structure.from_file(os.path.join(self.data_path,
id_prop[0]+'.cif')),
seed=self.reproduce_seeds[seed_idx])
except IndexError:
print(int(id_prop[0]))
print(len(self.reproduce_seeds))
raise
cif.CifWriter(aug_crystal).write_file(self.data_path + '/' + id_name + '.cif')
if(not self._k_fold_cv):
self.id_prop_augment = np.array(new_id_prop_augment)
else:
self.id_prop_augment_all = np.array(new_id_prop_augment)
self._augmented = True
def _updated_train_cifs(self, train_idx, num_transform):
'''
When doing k-fold CV. This function adds the augmented cif names to the train_idx
'''
updated_train_idx = []
for idx in train_idx:
num_idx = int(np.argwhere(self.id_prop_augment[:,0] == idx[0])[0][0])
for jdx in range(num_transform+1):
updated_train_idx.append(self.id_prop_augment_all[(num_transform+1)*num_idx+jdx])
return np.array(updated_train_idx)
def _check_repeats(self, idx1, idx2):
for v in idx1:
try:
assert not(v[0] in idx2[:,0]) # Only checking if cif file id is repeated
except AssertionError:
print("ERROR IN TRAIN/TEST/VALIDATION SPLIT")
print(len(idx1[:,0]))
print(len(idx2[:,0]))
print(v[0], v[0] in idx2[:,0], np.argwhere(idx2[:,0] == v[0])[0][0])
print(idx2[:,0][np.argwhere(idx2[:,0]==v[0])[0][0]])
raise
def _k_fold_cross_validation(self):
'''
k-fold CV data splitting function. Uses class attributes to split into k folds.
Works by shuffling original data then selecting folds one at a time.
'''
# Set seed and shuffle data
np.random.seed(self.seed)
np.random.shuffle(self.id_prop_augment)
frac = 1./self.kfolds
N = len(self.id_prop_augment)
for i in range(self.kfolds):
# Get all idxs
idxs = list(range(N))
# Get train and validation idxs
test_idxs = idxs[int(i*frac*N):int((i+1)*frac*N)]
del idxs[int(i*frac*N):int((i+1)*frac*N)]
# Get train and validation sets
test_set = np.array(self.id_prop_augment)[test_idxs]
train_set = np.array(self.id_prop_augment)[idxs]
self._check_repeats(test_set, train_set)
# Save files
np.savetxt(self.data_path + "/id_prop_test_{}.csv".format(i), test_set.astype(str),
delimiter=',', fmt="%s")
np.savetxt(self.data_path + "/id_prop_train_{}.csv".format(i), train_set.astype(str),
delimiter=',', fmt="%s")
def __len__(self):
return len(self.id_prop_augment)
def _gaussian_distance(self, distances, dmin, dmax, step, var=None):
if var is None:
var = step
self.filter = np.arange(dmin, dmax+step, step)
return
|
np.exp(-(distances[..., np.newaxis] - self.filter)**2 / var**2)
|
numpy.exp
|
import io
import cv2
import numpy as np
import math
import scipy.stats as ss
from scipy.optimize import least_squares
RECT_SCALE = 1000
bg_R = np.array([150.0 / 255.0, 150.0 / 255.0, 150.0 / 255.0])
def get_average_rgb(image_data):
return np.average(image_data, axis=(0, 1))
def crop_image_by_position_and_rect(cv_image, position, rect):
# img[y: y + h, x: x + w]
height = cv_image.shape[0]
width = cv_image.shape[1]
position_x = position.x * width
position_y = position.y * height
rect_x = width * rect.x / RECT_SCALE
rect_y = height * rect.y / RECT_SCALE
return cv_image[int(position_y):int(position_y) + int(rect_y),
int(position_x):int(position_x) + int(rect_x)]
def read_matrix(path, n_params):
H = None
line_arr = np.array([])
count = 0
with open(path) as f:
f.readline()
for line in f:
if "=" in line:
count += 1
if H is None:
H = line_arr
else:
H = np.vstack((H, line_arr))
line_arr =
|
np.array([])
|
numpy.array
|
import os,re,time,glob
import numpy as np
import pickle as pickle
import matplotlib.pylab as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import scipy
from scipy.signal import fftconvolve
from scipy.ndimage.filters import maximum_filter,minimum_filter,median_filter,gaussian_filter
from scipy import ndimage, stats
from skimage import morphology, restoration, measure
from skimage.segmentation import random_walker
from scipy.ndimage import gaussian_laplace
import cv2
import multiprocessing as mp
from sklearn.decomposition import PCA
from scipy.ndimage.interpolation import map_coordinates
from . import get_img_info, corrections, alignment_tools
from .External import Fitting_v3
from . import _correction_folder,_temp_folder,_distance_zxy,_sigma_zxy,_image_size, _allowed_colors
# generate common colors
# generate my colors
from matplotlib.colors import ListedColormap
# red
Red_colors = np.ones([256,4])
Red_colors[:,1] = np.linspace(1,0,256)
Red_colors[:,2] = np.linspace(1,0,256)
myReds = ListedColormap(Red_colors)
# blue
Blue_colors = np.ones([256,4])
Blue_colors[:,0] = np.linspace(1,0,256)
Blue_colors[:,1] = np.linspace(1,0,256)
myBlues = ListedColormap(Blue_colors)
# green
Green_colors = np.ones([256,4])
Green_colors[:,0] = np.linspace(1,0,256)
Green_colors[:,2] = np.linspace(1,0,256)
myGreens = ListedColormap(Green_colors)
_myCmaps = [myReds, myBlues, myGreens]
def partition_map(list_,map_, enumerate_all=False):
"""
Inputs
takes a list [e1,e2,e3,e4,e5,e6] and a map (a list of indices [0,0,1,0,1,2]). map can be a list of symbols too. ['aa','aa','bb','aa','bb','cc']
Output
returns a sorted list of lists, e.g. [[e1, e2,e4],[e3,e5],[e6]]
"""
list__=np.array(list_)
map__=np.array(map_)
if enumerate_all:
return [list(list__[map__==_i]) for _i in np.arange(0, np.max(map__)+1)]
else:
return [list(list__[map__==element]) for element in np.unique(map__)]
def old_gauss_ker(sig_xyz=[2,2,2],sxyz=16,xyz_disp=[0,0,0]):
'''Create a gaussian kernal, return standard gaussian level within sxyz size and sigma 2,2,2'''
dim = len(xyz_disp)
xyz=np.indices([sxyz+1]*dim)
print(sxyz)
for i in range(len(xyz.shape)-1):
sig_xyz=np.expand_dims(sig_xyz,axis=-1)
xyz_disp=np.expand_dims(xyz_disp,axis=-1)
im_ker = np.exp(-np.sum(((xyz-xyz_disp-sxyz/2.)/sig_xyz**2)**2,axis=0)/2.)
return im_ker
def gauss_ker(sig_xyz=[2,2,2],sxyz=16,xyz_disp=[0,0,0]):
"""Faster version of gaussian kernel"""
dim = len(xyz_disp)
xyz=np.swapaxes(np.indices([sxyz+1]*dim), 0,dim)
return np.exp(-np.sum(((xyz-np.array(xyz_disp)-sxyz/2.)/np.array(sig_xyz)**2)**2,axis=dim)/2.)
def gaussian_kernel_2d(center_xy, sigma_xy=[2,2], radius=8):
"""Function to generate gaussian kernel in 2d space"""
## check inputs
if len(center_xy) != 2:
raise IndexError(f"center_xy should be length=2 list or array")
if len(sigma_xy) != 2:
raise IndexError(f"sigma_xy should be length=2 list or array")
radius = int(radius)
if radius < 3 * max(sigma_xy): # if radius is smaller than 3-sigma, expand
radius = 3*max(sigma_xy)
xy_coords=np.swapaxes(np.indices([radius*2+1]*2), 0, 2)
return np.exp(-np.sum(((xy_coords-np.array(center_xy)-radius)/np.array(sigma_xy)**2)**2,axis=2)/2.)
def add_source(im_,pos=[0,0,0],h=200,sig=[2,2,2],size_fold=10):
'''Impose a guassian distribution with given position, height and sigma, onto an existing figure'''
im=np.array(im_,dtype=float)
pos = np.array(pos)
pos_int = np.array(pos,dtype=int)
xyz_disp = -pos_int+pos
im_ker = gauss_ker(sig, int(max(sig)*size_fold), xyz_disp)
im_ker_sz = np.array(im_ker.shape,dtype=int)
pos_min = np.array(pos_int-im_ker_sz/2, dtype=np.int)
pos_max = np.array(pos_min+im_ker_sz, dtype=np.int)
im_shape = np.array(im.shape)
def in_im(pos__):
pos_=np.array(pos__,dtype=np.int)
pos_[pos_>=im_shape]=im_shape[pos_>=im_shape]-1
pos_[pos_<0]=0
return pos_
pos_min_ = in_im(pos_min)
pos_max_ = in_im(pos_max)
pos_min_ker = pos_min_-pos_min
pos_max_ker = im_ker_sz+pos_max_-pos_max
slices_ker = tuple(slice(pm,pM) for pm,pM in zip(pos_min_ker,pos_max_ker))
slices_im = tuple(slice(pm,pM) for pm,pM in zip(pos_min_,pos_max_))
im[slices_im] += im_ker[slices_ker]*h
return im
def subtract_source(im,pfit):
return add_source(im,pos=pfit[1:4],h=-pfit[0],sig=pfit[-3:])
def plus_source(im,pfit):
return add_source(im,pos=pfit[1:4],h=pfit[0],sig=pfit[-3:])
def sphere(center,radius,imshape=None):
"""Returns an int array (size: n x len(center)) with the xyz... coords of a sphere(elipsoid) of radius in imshape"""
radius_=np.array(radius,dtype=float)
if len(radius_.shape)==0:
radius_ = np.array([radius]*len(center),dtype=np.int)
xyz = np.array(np.indices(2*radius_+1),dtype=float)
radius__=np.array(radius_,dtype=float)
for i in range(len(xyz.shape)-1):
radius__=np.expand_dims(radius__,axis=-1)
xyz_keep = np.array(np.where(np.sum((xyz/radius__-1)**2,axis=0)<1))
xyz_keep = xyz_keep-np.expand_dims(np.array(radius_,dtype=int),axis=-1)+np.expand_dims(np.array(center,dtype=int),axis=-1)
xyz_keep = xyz_keep.T
if imshape is not None:
xyz_keep=xyz_keep[np.all((xyz_keep>=0)&(xyz_keep<np.expand_dims(imshape,axis=0)),axis=-1)]
return xyz_keep
def grab_block(im,center,block_sizes):
dims = im.shape
slices = []
def in_dim(c,dim):
c_ = c
if c_<0: c_=0
if c_>dim: c_=dim
return c_
for c,block,dim in zip(center,block_sizes,dims):
block_ = int(block/2)
c=int(c)
c_min,c_max = in_dim(c-block_,dim),in_dim(c+block-block_,dim)
slices.append(slice(c_min,c_max))
slices.append(Ellipsis)
return im[slices]
# fit single gaussian
def fitsinglegaussian_fixed_width(data,center,radius=10,n_approx=10,width_zxy=_sigma_zxy):
"""Returns (height, x, y,z, width_x, width_y,width_z,bk)
the gaussian parameters of a 2D distribution found by a fit"""
data_=np.array(data,dtype=float)
dims = np.array(data_.shape)
if center is not None:
center_z,center_x,center_y = center
else:
xyz = np.array(list(map(np.ravel,np.indices(data_.shape))))
data__=data_[xyz[0],xyz[1],xyz[2]]
args_high = np.argsort(data__)[-n_approx:]
center_z,center_x,center_y = np.median(xyz[:,args_high],axis=-1)
xyz = sphere([center_z,center_x,center_y],radius,imshape=dims).T
if len(xyz[0])>0:
data__=data_[xyz[0],xyz[1],xyz[2]]
sorted_data = np.sort(data__)#np.sort(np.ravel(data__))
bk = np.median(sorted_data[:n_approx])
height = (np.median(sorted_data[-n_approx:])-bk)
width_z,width_x,width_y = np.array(width_zxy)
params_ = (height,center_z,center_x,center_y,bk)
def gaussian(height,center_z, center_x, center_y,
bk=0,
width_z=width_zxy[0],
width_x=width_zxy[1],
width_y=width_zxy[2]):
"""Returns a gaussian function with the given parameters"""
width_x_ = np.abs(width_x)
width_y_ = np.abs(width_y)
width_z_ = np.abs(width_z)
height_ = np.abs(height)
bk_ = np.abs(bk)
def gauss(z,x,y):
g = bk_+height_*np.exp(
-(((center_z-z)/width_z_)**2+((center_x-x)/width_x_)**2+
((center_y-y)/width_y_)**2)/2.)
return g
return gauss
def errorfunction(p):
f=gaussian(*p)(*xyz)
g=data__
#err=np.ravel(f-g-g*np.log(f/g))
err=np.ravel(f-g)
return err
p, success = scipy.optimize.leastsq(errorfunction, params_)
p=np.abs(p)
p = np.concatenate([p,width_zxy])
#p[:1:4]+=0.5
return p,success
else:
return None,None
def fit_seed_points_base(im, centers, width_z=_sigma_zxy[0], width_xy=_sigma_zxy[1],
radius_fit=5, n_max_iter = 10, max_dist_th=0.25):
'''Basic function used for multiple gaussian fitting, given image:im, seeding_result:centers '''
print("Fitting:" +str(len(centers[0]))+" points")
z,x,y = centers # fitting kernels provided by previous seeding
if len(x)>0:
#estimate height
#gfilt_size=0.75
#filt_size=3
#im_plt = gaussian_filter(im,gfilt_size)
#max_filt = maximum_filter(im_plt,filt_size)
#min_filt = minimum_filter(im_plt,filt_size)
#h = max_filt[z,x,y]-min_filt[z,x,y]
#inds = np.argsort(h)[::-1]
#z,x,y = z[inds],x[inds],y[inds]
zxy = np.array([z,x,y],dtype=int).T
ps = []
im_subtr = np.array(im,dtype=float)
for center in zxy:
p,success = fitsinglegaussian_fixed_width(im_subtr,center,radius=radius_fit,n_approx=10,width_zxy=[width_z,width_xy,width_xy])
if p is not None: # If got any successful fitting, substract fitted profile
ps.append(p)
im_subtr = subtract_source(im_subtr,p)
im_add = np.array(im_subtr)
max_dist=np.inf
n_iter = 0
while max_dist > max_dist_th:
ps_1=np.array(ps)
ps_1=ps_1[np.argsort(ps_1[:,0])[::-1]]
ps = []
ps_1_rem=[]
for p_1 in ps_1:
center = p_1[1:4]
im_add = plus_source(im_add,p_1)
p,success = fitsinglegaussian_fixed_width(im_add,center,radius=radius_fit,n_approx=10,width_zxy=[width_z,width_xy,width_xy])
if p is not None:
ps.append(p)
ps_1_rem.append(p_1)
im_add = subtract_source(im_add,p)
ps_2=np.array(ps)
ps_1_rem=np.array(ps_1_rem)
dif = ps_1_rem[:,1:4]-ps_2[:,1:4]
max_dist = np.max(np.sum(dif**2,axis=-1))
n_iter+=1
if n_iter>n_max_iter:
break
return ps_2
else:
return np.array([])
## Fit bead centers
def get_STD_centers(im, seeds=None, th_seed=150,
dynamic=False, seed_by_per=False, th_seed_percentile=95,
min_num_seeds=1,
remove_close_pts=True, close_threshold=0.1, fit_radius=5,
sort_by_h=False, save=False, save_folder='', save_name='',
plt_val=False, force=False, verbose=False):
'''Fit beads for one image:
Inputs:
im: image, ndarray
th_seeds: threshold for seeding, float (default: 150)
dynamic: whether do dynamic seeding, bool (default:True)
th_seed_percentile: intensity percentile for seeding, float (default: 95)
remove_close_pts: whether remove points really close to each other, bool (default:True)
close_threshold: threshold for removing duplicates within a distance, float (default: 0.01)
fit_radius
sort_by_h: whether sort fitted points by height, bool (default:False)
plt_val: whether making plot, bool (default: False)
save: whether save fitting result, bool (default: False)
save_folder: full path of save folder, str (default: None)
save_name: full name of save file, str (default: None)
force: whether force fitting despite of saved file, bool (default: False)
verbose: say something!, bool (default: False)
Outputs:
beads: fitted spots with information, n by 4 array'''
import os
import pickle as pickle
if not force and os.path.exists(save_folder+os.sep+save_name) and save_name != '':
if verbose:
print("- loading file:,", save_folder+os.sep+save_name)
beads = pickle.load(open(save_folder+os.sep+save_name, 'rb'))
if verbose:
print("--", len(beads), " of beads loaded.")
return beads
else:
# seeding
if seeds is None:
seeds = get_seed_in_distance(im, center=None, dynamic=dynamic,
th_seed_percentile=th_seed_percentile,
seed_by_per=seed_by_per,
min_dynamic_seeds=min_num_seeds,
gfilt_size=0.75, filt_size=3,
th_seed=th_seed, hot_pix_th=4, verbose=verbose)
# fitting
fitter = Fitting_v3.iter_fit_seed_points(im, seeds.T, radius_fit=5)
fitter.firstfit()
pfits = fitter.ps
#pfits = visual_tools.fit_seed_points_base_fast(im,seeds.T,width_z=1.8*1.5/2,width_xy=1.,radius_fit=5,n_max_iter=3,max_dist_th=0.25,quiet=not verbose)
# get coordinates for fitted beads
if len(pfits) > 0:
if sort_by_h:
_intensity_order = np.argsort(np.array(pfits)[:,0])
beads = np.array(pfits)[np.flipud(_intensity_order), 1:4]
else:
beads = np.array(pfits)[:, 1:4]
# remove very close spots
if remove_close_pts:
remove = np.zeros(len(beads), dtype=np.bool)
for i, bead in enumerate(beads):
if np.isnan(bead).any() or np.sum(np.sum((beads-bead)**2, axis=1) < close_threshold) > 1:
remove[i] = True
if (bead < 0).any() or (bead > np.array(im.shape)).any():
remove[i] = True
beads = beads[remove==False]
else:
beads = None
if verbose:
print(f"- fitting {len(pfits)} points")
print(
f"-- {np.sum(remove)} points removed given smallest distance {close_threshold}")
# make plot if required
if plt_val:
plt.figure()
plt.imshow(np.max(im, 0), interpolation='nearest')
plt.plot(beads[:, -1], beads[:, -2], 'or')
plt.show()
# save to pickle if specified
if save:
if not os.path.exists(save_folder):
os.makedirs(save_folder)
if verbose:
print("-- saving fitted spots to",
save_folder+os.sep+save_name)
pickle.dump(beads[:,-3:], open(save_folder+os.sep+save_name, 'wb'))
return beads
def get_seed_points_base(im, gfilt_size=0.75, background_gfilt_size=10, filt_size=3,
th_seed=300, hot_pix_th=0, return_h=False):
"""Base function to do seeding"""
# gaussian-filter + max-filter
if gfilt_size:
max_im = gaussian_filter(im,gfilt_size)
else:
max_im = im
# gaussian_filter (large) + min_filter
if background_gfilt_size:
min_im = gaussian_filter(im,background_gfilt_size)
else:
min_im = im
max_filt = np.array(maximum_filter(max_im,filt_size), dtype=np.int64)
min_filt = np.array(minimum_filter(min_im,filt_size), dtype=np.int64)
# get candidate seed points
im_plt2 = (max_filt==max_im) & (min_filt!=min_im) & (min_filt!=0)
z,x,y = np.where(im_plt2)
keep = (max_filt[z,x,y]-min_filt[z,x,y])>th_seed#/np.array(max_filt[z,x,y],dtype=float)>0.5
x,y,z = x[keep],y[keep],z[keep]
h = max_filt[z,x,y]-min_filt[z,x,y]
#get rid of hot pixels
if hot_pix_th>0:
xy_str = [str([x_,y_]) for x_,y_ in zip(x,y)]
xy_str_,cts_ = np.unique(xy_str,return_counts=True)
keep = np.array([xy_str__ not in xy_str_[cts_>hot_pix_th] for xy_str__ in xy_str],dtype=bool)
x,y,z = x[keep],y[keep],z[keep]
h = h[keep]
centers = np.array([z,x,y])
if return_h:
centers = np.array([z,x,y,h])
return centers
def fit_seed_points_base_fast(im,centers,width_z=_sigma_zxy[0],width_xy=_sigma_zxy[1],radius_fit=5,n_max_iter = 10,max_dist_th=0.25, quiet=False):
if not quiet:
print("Fitting:" +str(len(centers[0]))+" points")
z,x,y = centers
if len(x)>0:
zxy = np.array([z,x,y],dtype=int).T
ps = []
im_subtr = np.array(im,dtype=float)
for center in zxy:
p,success = fitsinglegaussian_fixed_width(im_subtr,center,radius=radius_fit,n_approx=5,width_zxy=[width_z,width_xy,width_xy])
if p is not None:
ps.append(p)
im_add = np.array(im_subtr)
max_dist=np.inf
n_iter = 0
while max_dist>max_dist_th:
ps_1=np.array(ps)
ps_1=ps_1[np.argsort(ps_1[:,0])[::-1]]
ps = []
ps_1_rem=[]
for p_1 in ps_1:
center = p_1[1:4]
p,success = fitsinglegaussian_fixed_width(im_add,center,radius=5,n_approx=10,width_zxy=[1.8,1.,1.])
if p is not None:
ps.append(p)
ps_1_rem.append(p_1)
ps_2=np.array(ps)
ps_1_rem=np.array(ps_1_rem)
dif = ps_1_rem[:,1:4]-ps_2[:,1:4]
max_dist = np.max(np.sum(dif**2,axis=-1))
n_iter+=1
if n_iter>n_max_iter:
break
return ps_2
else:
return np.array([])
# fast alignment of fitted items which are bright and sparse (like beads)
def beads_alignment_fast(beads, ref_beads, unique_cutoff=2., check_outlier=True, outlier_sigma=1., verbose=True):
'''beads_alignment_fast, for finding pairs of beads when they are sparse
Inputs:
beads: ndarray of beads coordnates, num_beads by [z,x,y], n-by-3 numpy ndarray
ref_beads: similar coorndiates for beads in reference frame, n-by-3 numpy ndarray
unique_cutoff: a threshold that assuming there are only unique pairs within it, float
check_outlier: whether using Delaunay triangulation neighbors to check
outlier_sigma: times for sigma that determine threshold in checking outlier, positive float
verbose: whether say something during alignment, bool
Outputs:
_paired_beads: beads that find their pairs in ref frame, n-by-3 numpy array
_paired_ref_beads: ref_beads that find their pairs (sorted), n-by-3 numpy array
_shifts: 3d shift of beads (bead - ref_bead), n-by-3 numpy array
'''
# initialize
_paired_beads, _paired_ref_beads, _shifts = [], [], []
# loop through all beads in ref frame
for _rb in ref_beads:
_competing_ref_beads = ref_beads[np.sqrt(np.sum((ref_beads - _rb)**2,1)) < unique_cutoff]
if len(_competing_ref_beads) > 1: # in this case, other ref_bead exist within cutoff
continue
else:
_candidate_beads = beads[np.sqrt(np.sum((beads - _rb)**2,1)) < unique_cutoff]
if len(_candidate_beads) == 1: # if unique pairs identified
_paired_beads.append(_candidate_beads[0])
_paired_ref_beads.append(_rb)
_shifts.append(_candidate_beads[0] - _rb)
# covert to numpy array
_paired_beads = np.array(_paired_beads)
_paired_ref_beads = np.array(_paired_ref_beads)
_shifts = np.array(_shifts)
# remove suspicious shifts
for _j in range(np.shape(_shifts)[1]):
_shift_keeps = np.abs(_shifts)[:,_j] < np.mean(np.abs(_shifts)[:,_j])+outlier_sigma*np.std(np.abs(_shifts)[:,_j])
# filter beads and shifts
_paired_beads = _paired_beads[_shift_keeps]
_paired_ref_beads = _paired_ref_beads[_shift_keeps]
_shifts = _shifts[_shift_keeps]
# check outlier
if check_outlier:
from scipy.spatial import Delaunay
from mpl_toolkits.mplot3d import Axes3D
# initialize list for shifts calculated by neighboring points
_alter_shifts = []
# calculate Delaunay triangulation for ref_beads
_tri = Delaunay(_paired_ref_beads)
# loop through all beads
for _i in range(_paired_ref_beads.shape[0]):
# initialize diff, which used to judge whether keep this
_keep = True
# extract shift
_shift = _shifts[_i]
# initialize neighboring point ids
_neighbor_ids = []
# find neighbors for this point
for _simplex in _tri.simplices.copy():
if _i in _simplex:
_neighbor_ids.append(_simplex)
_neighbor_ids = np.array(np.unique(_neighbor_ids).astype(np.int))
_neighbor_ids = _neighbor_ids[_neighbor_ids != _i] # remove itself
_neighbor_ids = _neighbor_ids[_neighbor_ids != -1] # remove error
# calculate alternative shift
_neighbors = _paired_ref_beads[_neighbor_ids,:]
_neighbor_shifts = _shifts[_neighbor_ids,:]
_neighbor_weights = 1/np.sqrt(np.sum((_neighbors-_paired_ref_beads[_i])**2,1))
_alter_shift = np.dot(_neighbor_shifts.T, _neighbor_weights) / np.sum(_neighbor_weights)
_alter_shifts.append(_alter_shift)
#print _i, _alter_shift, _shift
# differences between shifts and alternative shifts
_diff = [np.linalg.norm(_shift-_alter_shift) for _shift,_alter_shift in zip(_shifts, _alter_shifts)]
# determine whether keep this:
print('-- differences in original drift and neighboring dirft:', _diff, np.mean(_diff), np.std(_diff))
_keeps = np.array(_diff < np.mean(_diff)+np.std(_diff)*outlier_sigma, dtype=np.bool)
# filter beads and shifts
_paired_beads = _paired_beads[_keeps]
_paired_ref_beads = _paired_ref_beads[_keeps]
_shifts = _shifts[_keeps]
return np.array(_paired_beads), np.array(_paired_ref_beads), np.array(_shifts)
class imshow_mark_3d_v2:
def master_reset(self):
#self.dic_min_max = {}
self.class_ids = []
self.draw_x,self.draw_y,self.draw_z=[],[],[]
self.coords = list(zip(self.draw_x,self.draw_y,self.draw_z))
#load vars
self.load_coords()
self.set_image()
def __init__(self,ims,fig=None,image_names=None,rescz=1.,min_max_default = [None,None], given_dic=None,save_file=None,paramaters={}):
#internalize
#seeding paramaters
self.gfilt_size = paramaters.get('gfilt_size',0.75)#first gaussian blur with radius # to avoid false local max from camera fluc
self.filt_size = paramaters.get('filt_size',3)#local maxima and minima are computed on blocks of size #
self.th_seed = paramaters.get('th_seed',300.)#keep points when difference between local minima and maxima is more than #
self.hot_pix_th = paramaters.get('hot_pix_th',0)
#fitting paramaters
self.width_z = paramaters.get('width_z',1.8*1.5)#fixed width in z # 1.8 presuposes isotropic pixel size
self.width_xy = paramaters.get('width_xy',1.)#fixed width in xy
self.radius_fit = paramaters.get('radius_fit',5)#neibouring of fitting for each seed point
self.paramaters=paramaters
self.ims=ims
self.rescz = rescz
if image_names is None:
self.image_names = ['Image '+str(i+1) for i in range(len(ims))]
else:
self.image_names = image_names
self.save_file = save_file
#define extra vars
self.dic_min_max = {}
self.class_ids = []
self.draw_x,self.draw_y,self.draw_z=[],[],[]
self.coords = list(zip(self.draw_x,self.draw_y,self.draw_z))
self.delete_mode = False
#load vars
self.load_coords(_given_dic=given_dic)
#construct images
self.index_im = 0
self.im_ = self.ims[self.index_im]
self.im_xy = np.max(self.im_,axis=0)
self.im_z = np.max(self.im_,axis=1)
im_z_len = self.im_z.shape[0]
indz=np.array(np.round(np.arange(0,im_z_len,self.rescz)),dtype=int)
self.im_z = self.im_z[indz[indz<im_z_len],...]
#setup plots
if fig is None:
self.f=plt.figure()
else:
self.f=fig
self.ax1,self.ax2 = ImageGrid(self.f, 111, nrows_ncols=(2, 1), axes_pad=0.1)
self.lxy,=self.ax1.plot(self.draw_x, self.draw_y, 'o',
markersize=12,markeredgewidth=1,markeredgecolor='y',markerfacecolor='None')
self.lz,=self.ax2.plot(self.draw_x, self.draw_z, 'o',
markersize=12,markeredgewidth=1,markeredgecolor='y',markerfacecolor='None')
self.imshow_xy = self.ax1.imshow(self.im_xy,interpolation='nearest',cmap='gray')
self.imshow_z = self.ax2.imshow(self.im_z,interpolation='nearest',cmap='gray')
self.min_,self.max_ = min_max_default
if self.min_ is None: self.min_ = np.min(self.im_)
if self.max_ is None: self.max_ = np.max(self.im_)
self.imshow_xy.set_clim(self.min_,self.max_)
self.imshow_z.set_clim(self.min_,self.max_)
self.ax1.callbacks.connect('ylim_changed', self.xy_on_lims_change)
self.ax2.callbacks.connect('ylim_changed', self.z_on_lims_change)
self.f.suptitle(self.image_names[self.index_im])
#connect mouse and keyboard
cid = self.f.canvas.mpl_connect('button_press_event', self.onclick)
cid2 = self.f.canvas.mpl_connect('key_press_event', self.press)
cid3 = self.f.canvas.mpl_connect('key_release_event', self.release)
self.set_image()
if fig is None:
plt.show()
def onclick(self,event):
if event.button==3:
#print "click"
if event.inaxes is self.ax1:
if self.delete_mode:
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
x_,y_,z_ = list(map(np.array,[self.draw_x,self.draw_y,self.draw_z]))
#print x_min,x_max,y_min,y_max,z_min,z_max
#print x_,y_,z_
keep_in_window = (x_>y_min)&(x_<y_max)&(y_>x_min)&(y_<x_max)&(z_>z_min)&(z_<z_max)
keep_class = (np.array(self.class_ids)==self.index_im)&(np.isnan(self.draw_x)==False)
keep = keep_in_window&keep_class
if np.sum(keep)>0:
keep_ind = np.arange(len(keep))[keep]
coords_xy_class = list(zip(np.array(self.draw_x)[keep],
np.array(self.draw_y)[keep]))
difs = np.array(coords_xy_class)-np.array([[event.xdata,event.ydata]])
ind_= np.argmin(np.sum(np.abs(difs),axis=-1))
self.draw_x.pop(keep_ind[ind_])
self.draw_y.pop(keep_ind[ind_])
self.draw_z.pop(keep_ind[ind_])
self.class_ids.pop(keep_ind[ind_])
print(ind_)
else:
print('test')
else:
if event.xdata is not None and event.ydata is not None:
self.draw_x.append(event.xdata)
self.draw_y.append(event.ydata)
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
self.draw_z.append((z_min+z_max)/2.)
self.class_ids.append(self.index_im)
if event.inaxes is self.ax2:
if event.xdata is not None and event.ydata is not None:
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
x_,y_,z_ = list(map(np.array,[self.draw_x,self.draw_y,self.draw_z]))
keep_in_window = (x_>y_min)&(x_<y_max)&(y_>x_min)&(y_<x_max)&(z_>z_min)&(z_<z_max)
keep_class = (np.array(self.class_ids)==self.index_im)&(np.isnan(self.draw_x)==False)
keep = keep_in_window&keep_class
if np.sum(keep)>0:
keep_ind = np.arange(len(keep))[keep]
coords_x = np.array(self.draw_x)[keep]
ind_ = np.argmin(np.abs(coords_x-event.xdata))
self.draw_z[keep_ind[ind_]]=event.ydata
self.update_point_plot()
def press(self,event):
if event.key== 'd':
self.index_im = (self.index_im+1)%len(self.ims)
self.set_image()
if event.key== 'a':
self.index_im = (self.index_im-1)%len(self.ims)
self.set_image()
if event.key=='s':
self.save_ims()
if event.key== 'x':
self.auto_scale()
if event.key== 't':
self.get_seed_points()
if event.key== 'n':
self.handle_in_nucleus()
if event.key== 'q':
prev_im = self.index_im
for self.index_im in range(len(self.ims)):
self.set_image()
self.get_seed_points()
self.fit_seed_points()
self.index_im = prev_im
self.set_image()
if event.key== 'y':
self.fit_seed_points()
if event.key == 'delete':
self.draw_x.pop(-1)
self.draw_y.pop(-1)
self.draw_z.pop(-1)
self.class_ids.pop(-1)
self.update_point_plot()
if event.key == 'shift':
self.delete_mode = True
def release(self, event):
if event.key == 'shift':
self.delete_mode = False
def populate_draw_xyz(self,flip=False):
if len(self.coords)>0:
self.draw_x,self.draw_y,self.draw_z = list(zip(*self.coords))
if flip: self.draw_x,self.draw_y,self.draw_z = list(map(list,[self.draw_y,self.draw_x,self.draw_z]))
else: self.draw_x,self.draw_y,self.draw_z = list(map(list,[self.draw_x,self.draw_y,self.draw_z]))
else:
self.draw_x,self.draw_y,self.draw_z = [],[],[]
def create_text(self):
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
self.texts = []
i_ims = np.zeros(len(self.ims),dtype=int)
for (xyz,c_id) in zip(self.coords,self.class_ids):
i_ims[c_id]+=1
if c_id==self.index_im:
if not np.isnan(xyz[0]):
if z_min<xyz[2] and z_max>xyz[2] and y_min<xyz[0] and y_max>xyz[0] and x_min<xyz[1] and x_max>xyz[1]:
text_ = str(i_ims[c_id])
color_='r'
if hasattr(self,'dec_text'):
key_dec = tuple(list(np.array(xyz,dtype=int))+[c_id])
if key_dec in self.dec_text:
text_=self.dec_text[key_dec]['text']
color_='b'
self.texts.append(self.ax1.text(xyz[0],xyz[1],text_,color=color_))
self.texts.append(self.ax2.text(xyz[0],xyz[2],text_,color=color_))
def update_point_plot(self):
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
self.coords = list(zip(self.draw_x,self.draw_y,self.draw_z))
x_,y_,z_ = list(map(np.array,[self.draw_x,self.draw_y,self.draw_z]))
#print x_min,x_max,y_min,y_max,z_min,z_max
#print x_,y_,z_
keep_class = np.array(self.class_ids)==self.index_im
keep_in_window = (x_>y_min)&(x_<y_max)&(y_>x_min)&(y_<x_max)&(z_>z_min)&(z_<z_max)
keep = keep_class&keep_in_window
self.lxy.set_xdata(x_[keep])
self.lxy.set_ydata(y_[keep])
self.lz.set_xdata(x_[keep])
self.lz.set_ydata(z_[keep])
self.save_coords()
self.remove_text()
self.create_text()
self.f.canvas.draw()
def remove_text(self):
if not hasattr(self,'texts'): self.texts = []
for txt in self.texts:
txt.remove()
def load_coords(self, _given_dic=None):
save_file = self.save_file
if _given_dic:
save_dic = _given_dic
elif save_file is not None and os.path.exists(save_file):
with open(save_file,'rb') as fid:
save_dic = pickle.load(fid)
else:
return False
# load information from save_dic
self.coords,self.class_ids = save_dic['coords'],save_dic['class_ids']
if 'pfits' in save_dic:
self.pfits_save = save_dic['pfits']
if 'dec_text' in save_dic:
self.dec_text=save_dic['dec_text']
self.populate_draw_xyz()#coords to plot list
def save_coords(self):
save_file = self.save_file
if save_file is not None:
if not os.path.exists(os.path.dirname(save_file)):
os.makedirs(os.path.dirname(save_file))
fid = open(save_file,'wb')
self.pfits_save = getattr(self,'pfits_save',{})
self.dec_text = getattr(self,'dec_text',{})
save_dic = {'coords':self.coords,'class_ids':self.class_ids,'pfits':self.pfits_save,'dec_text':self.dec_text}
pickle.dump(save_dic,fid)
fid.close()
def auto_scale(self):
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
im_chop = self.im_[z_min:z_max,x_min:x_max,y_min:y_max,...]
min_,max_ = np.min(im_chop),np.max(im_chop)
self.imshow_xy.set_clim(min_,max_)
self.imshow_z.set_clim(min_,max_)
self.dic_min_max[self.index_im] = [min_,max_]
self.f.canvas.draw()
def del_ext(self,str_):
"Deletes extention"
if os.path.basename(str_).count('.')>0:
return '.'.join(str_.split('.')[:-1])
else:
return str_
def save_ims(self):
import scipy.misc
save_file = self.save_file
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
for index_im,im_ in enumerate(self.ims):
im_chop = im_[self.get_z_ind(),x_min:x_max,y_min:y_max,...]
im_xy = np.max(im_chop,axis=0)
im_z = np.max(im_chop,axis=1)
if index_im in self.dic_min_max:
min_,max_ = self.dic_min_max[index_im]
im_xy = minmax(im_xy,min_=min_,max_=max_)
im_z = minmax(im_z,min_=min_,max_=max_)
else:
min_,max_ = self.min_,self.max_
im_xy = minmax(im_xy,min_=min_,max_=max_)
im_z = minmax(im_z,min_=min_,max_=max_)
if save_file is not None:
if not os.path.exists(os.path.dirname(save_file)):
os.makedirs(os.path.dirname(save_file))
save_image = self.del_ext(save_file)+'_'+self.image_names[index_im]
scipy.misc.imsave(save_image+'_xy.png', im_xy)
scipy.misc.imsave(save_image+'_z.png', im_z)
def set_image(self):
self.im_ = self.ims[self.index_im]
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
self.im_sm = self.im_[z_min:z_max,x_min:x_max,y_min:y_max]
self.im_xy = np.max(self.im_[z_min:z_max,:,...],axis=0)
self.imshow_xy.set_data(self.im_xy)
self.im_z = np.max(self.im_[:,x_min:x_max,...],axis=1)
self.im_z = self.im_z[self.get_z_ind(),:]
self.imshow_z.set_data(self.im_z)
if self.index_im in self.dic_min_max:
min_,max_ = self.dic_min_max[self.index_im]
self.imshow_xy.set_clim(min_,max_)
self.imshow_z.set_clim(min_,max_)
self.update_point_plot()
self.f.suptitle(self.image_names[self.index_im])
self.f.canvas.draw()
def get_limits(self):
y_min,y_max = self.ax1.get_xlim()
x_min,x_max = self.ax1.get_ylim()[::-1]
x_min = max(int(x_min),0)
x_max = min(int(x_max),self.im_.shape[1])
y_min = max(int(y_min),0)
y_max = min(int(y_max),self.im_.shape[2])
z_min,z_max = np.array(self.ax2.get_ylim()[::-1])*self.rescz
z_min = max(int(z_min),0)
z_max = min(int(z_max),self.im_.shape[0])
return z_min,z_max,x_min,x_max,y_min,y_max
def get_z_ind(self):
im_z_len = self.im_z.shape[0]
indz=np.array(np.round(np.arange(0,im_z_len,self.rescz)),dtype=int)
return indz[indz<im_z_len]
def xy_on_lims_change(self,ax):
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
self.im_sm = self.im_[z_min:z_max,x_min:x_max,y_min:y_max]
self.im_z = np.max(self.im_[:,x_min:x_max,...],axis=1)
self.im_z = self.im_z[self.get_z_ind(),:]
self.imshow_z.set_data(self.im_z)
self.update_point_plot()
def z_on_lims_change(self,ax):
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
self.im_sm = self.im_[z_min:z_max,x_min:x_max,y_min:y_max]
self.im_xy = np.max(self.im_[z_min:z_max,:,...],axis=0)
self.imshow_xy.set_data(self.im_xy)
self.update_point_plot()
def fit_seed_points(self):
#get default paramaters from self
width_z = self.width_z
width_xy = self.width_xy
radius_fit = self.radius_fit
im = self.im_sm
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
y_,x_,z_ = list(map(np.array,[self.draw_x,self.draw_y,self.draw_z]))
keep_class = np.array(self.class_ids)==self.index_im
keep_in_window = (x_>x_min)&(x_<x_max)&(y_>y_min)&(y_<y_max)&(z_>z_min)&(z_<z_max)
keep = keep_class&keep_in_window
xyzguess = np.array([z_[keep]-z_min,x_[keep]-x_min,y_[keep]-y_min],dtype=int)
self.pfits = fit_seed_points_base(im,xyzguess,width_z=width_z,width_xy=width_xy,radius_fit=3,n_max_iter = 15,max_dist_th=0.25)
if len(self.pfits>0):
self.pfits[:,1:4]+=[[z_min,x_min,y_min]]
#update graph and points
keep = np.array(self.class_ids)!=self.index_im
self.class_ids,self.draw_z,self.draw_x,self.draw_y = [list(np.array(x)[keep]) for x in [self.class_ids,self.draw_z,self.draw_x,self.draw_y]]
if not hasattr(self,'pfits_save'):
self.pfits_save={}
self.pfits_save[self.index_im]=self.pfits
centers_0,centers_1,centers_2 = self.pfits[:,1:4].T
self.draw_z.extend(centers_0)
self.draw_x.extend(centers_2)
self.draw_y.extend(centers_1)
self.class_ids.extend([self.index_im]*len(centers_0))
self.update_point_plot()
def get_seed_points(self):
#get default paramaters from self
gfilt_size = self.gfilt_size
filt_size = self.filt_size
th_seed = self.th_seed
hot_pix_th = self.hot_pix_th
im = self.im_sm
centers = get_seed_points_base(im,gfilt_size=gfilt_size,filt_size=filt_size,th_seed=th_seed,hot_pix_th=hot_pix_th)
z_min,z_max,x_min,x_max,y_min,y_max = self.get_limits()
keep = np.array(self.class_ids)!=self.index_im
self.class_ids,self.draw_z,self.draw_x,self.draw_y = [list(np.array(x)[keep]) for x in [self.class_ids,self.draw_z,self.draw_x,self.draw_y]]
self.draw_z.extend(centers[0]+z_min)
self.draw_x.extend(centers[2]+y_min)
self.draw_y.extend(centers[1]+x_min)
self.class_ids.extend([self.index_im]*len(centers[0]))
self.update_point_plot()
def handle_in_nucleus(self):
if hasattr(self,'nucl_x'):
i_im = self.index_im
class_ids = np.array(self.class_ids)
Y,X,Z = np.array(self.draw_x,dtype=int),np.array(self.draw_y,dtype=int),np.array(self.draw_z,dtype=int)
keep = class_ids==i_im
Y,X,Z=Y[keep],X[keep],Z[keep]
nucl_ = np.array([self.nucl_x,self.nucl_y,self.nucl_z],dtype=int).T
draw_x,draw_y,draw_z=[],[],[]
for x,y,z in zip(X,Y,Z):
if np.any(np.sum(np.abs(nucl_-[[x,y,z]]),axis=-1)==0):
draw_z.append(z)
draw_x.append(y)
draw_y.append(x)
keep = np.array(self.class_ids)!=self.index_im
self.class_ids,self.draw_z,self.draw_x,self.draw_y = [list(np.array(x)[keep]) for x in [self.class_ids,self.draw_z,self.draw_x,self.draw_y]]
self.draw_z.extend(draw_z)
self.draw_x.extend(draw_x)
self.draw_y.extend(draw_y)
self.class_ids.extend([self.index_im]*len(draw_x))
self.update_point_plot()
class Reader:
# Close the file on cleanup.
def __del__(self):
if self.fileptr:
self.fileptr.close()
def __enter__(self):
return self
def __exit__(self, etype, value, traceback):
if self.fileptr:
self.fileptr.close()
# Average multiple frames in a movie.
def averageFrames(self, start = False, end = False, verbose = False):
if (not start):
start = 0
if (not end):
end = self.number_frames
length = end - start
average = np.zeros((self.image_width, self.image_height), np.float)
for i in range(length):
if verbose and ((i%10)==0):
print(" processing frame:", i, " of", self.number_frames)
average += self.loadAFrame(i + start)
average = average/float(length)
return average
# returns the film name
def filmFilename(self):
return self.filename
# returns the film size
def filmSize(self):
return [self.image_width, self.image_height, self.number_frames]
# returns the picture x,y location, if available
def filmLocation(self):
if hasattr(self, "stage_x"):
return [self.stage_x, self.stage_y]
else:
return [0.0, 0.0]
# returns the film focus lock target
def lockTarget(self):
if hasattr(self, "lock_target"):
return self.lock_target
else:
return 0.0
# returns the scale used to display the film when
# the picture was taken.
def filmScale(self):
if hasattr(self, "scalemin") and hasattr(self, "scalemax"):
return [self.scalemin, self.scalemax]
else:
return [100, 2000]
# Dax reader class. This is a Zhuang lab custom format.
#
def batch_load_dax(filename):
_im = DaxReader(filename).loadAll()
return _im
class DaxReader(Reader):
# dax specific initialization
def __init__(self, filename, swap_axis=False, verbose = 0):
import os,re
# save the filenames
self.filename = filename
dirname = os.path.dirname(filename)
if (len(dirname) > 0):
dirname = dirname + "/"
self.inf_filename = dirname + os.path.splitext(os.path.basename(filename))[0] + ".inf"
# swap_axis
self.swap_axis = swap_axis
# defaults
self.image_height = None
self.image_width = None
# extract the movie information from the associated inf file
size_re = re.compile(r'frame dimensions = ([\d]+) x ([\d]+)')
length_re = re.compile(r'number of frames = ([\d]+)')
endian_re = re.compile(r' (big|little) endian')
stagex_re = re.compile(r'Stage X = ([\d\.\-]+)')
stagey_re = re.compile(r'Stage Y = ([\d\.\-]+)')
lock_target_re = re.compile(r'Lock Target = ([\d\.\-]+)')
scalemax_re = re.compile(r'scalemax = ([\d\.\-]+)')
scalemin_re = re.compile(r'scalemin = ([\d\.\-]+)')
inf_file = open(self.inf_filename, "r")
while 1:
line = inf_file.readline()
if not line: break
m = size_re.match(line)
if m:
self.image_height = int(m.group(1))
self.image_width = int(m.group(2))
m = length_re.match(line)
if m:
self.number_frames = int(m.group(1))
m = endian_re.search(line)
if m:
if m.group(1) == "big":
self.bigendian = 1
else:
self.bigendian = 0
m = stagex_re.match(line)
if m:
self.stage_x = float(m.group(1))
m = stagey_re.match(line)
if m:
self.stage_y = float(m.group(1))
m = lock_target_re.match(line)
if m:
self.lock_target = float(m.group(1))
m = scalemax_re.match(line)
if m:
self.scalemax = int(m.group(1))
m = scalemin_re.match(line)
if m:
self.scalemin = int(m.group(1))
inf_file.close()
# set defaults, probably correct, but warn the user
# that they couldn't be determined from the inf file.
if not self.image_height:
print("Could not determine image size, assuming 256x256.")
self.image_height = 256
self.image_width = 256
# open the dax file
if os.path.exists(filename):
self.fileptr = open(filename, "rb")
else:
self.fileptr = 0
if verbose:
print("dax data not found", filename)
# Create and return a memory map the dax file
def loadMap(self):
if os.path.exists(self.filename):
if self.bigendian:
self.image_map = np.memmap(self.filename, dtype='>u2', mode='r', shape=(self.number_frames,self.image_width, self.image_height))
else:
self.image_map = np.memmap(self.filename, dtype='uint16', mode='r', shape=(self.number_frames,self.image_width, self.image_height))
return self.image_map
# load a frame & return it as a np array
def loadAFrame(self, frame_number):
if self.fileptr:
assert frame_number >= 0, "frame_number must be greater than or equal to 0"
assert frame_number < self.number_frames, "frame number must be less than " + str(self.number_frames)
self.fileptr.seek(frame_number * self.image_height * self.image_width * 2)
image_data = np.fromfile(self.fileptr, dtype='uint16', count = self.image_height * self.image_width)
if self.swap_axis:
image_data = np.transpose(np.reshape(image_data, [self.image_width, self.image_height]))
else:
image_data = np.reshape(image_data, [self.image_width, self.image_height])
if self.bigendian:
image_data.byteswap(True)
return image_data
# load full movie and retun it as a np array
def loadAll(self):
image_data = np.fromfile(self.fileptr, dtype='uint16', count = -1)
if self.swap_axis:
image_data = np.swapaxes(np.reshape(image_data, [self.number_frames,self.image_width, self.image_height]),1,2)
else:
image_data = np.reshape(image_data, [self.number_frames,self.image_width, self.image_height])
if self.bigendian:
image_data.byteswap(True)
return image_data
def close(self):
if self.fileptr.closed:
print(f"file {self.filename} has been closed.")
else:
self.fileptr.close()
## segmentation with DAPI
def DAPI_segmentation(ims, names,
cap_percentile=0.5,
illumination_correction=True,
illumination_correction_channel=405,
correction_folder=_correction_folder,
merge_layer_num = 11,
denoise_window = 5,
log_window = 13,
signal_cap_ratio = 0.15,
cell_min_size=1000,
shape_ratio_threshold = 0.030,
remove_fov_boundary = 40,
make_plot=False,
verbose=True):
"""cell segmentation for DAPI images with pooling and convolution layers
Inputs:
ims: list of images
names: list of names, same length as ims
cap_percentile: removing top and bottom percentile in each image, float from 0-100 (default: 0.5)
illumination_correction: whether correct illumination for each field of view, bool (default: True)
illumination_correction_channel: which color channel to correct illumination for each field of view, int or str (default: 405)
correction_folder: full directory that contains such correction files, string (default: )
merge_layer_num: number of z-stack layers to merge, int (default: 11)
denoise_window: window size used for billateral denoising method, int (default: 31)
log_window: window size for laplacian-gaussian filter, int (default: 13)
signal_cap_ratio: intensity ratio that considered as signal if intensity over max intensity larger than this, float between 0-1, (default: 0.15)
cell_min_size: smallest object size allowed as nucleus, int (default:1000 for 2D)
shape_ratio_threshold: min threshold for: areasize of one label / (contour length of a label)^2, float (default: 0.15)
remove_fov_boundary: if certain label is too close to fov boundary within this number of pixels, remove, int (default: 50)
make_plot: whether making plots for checking purpose, bool
verbose: whether say something during the process, bool
Output:
_ft_seg_labels: list of labels, same dimension as ims, list of bool matrix"""
# imports
from scipy import ndimage
from skimage import morphology
from scipy import stats
from skimage import restoration, measure
from ImageAnalysis3.corrections import Illumination_correction
from skimage.segmentation import random_walker
from scipy.ndimage import gaussian_laplace
# check whether input is a list of images or just one image
if isinstance(ims, list):
if verbose:
print("Start segmenting list of images")
_ims = ims
_names = names
else:
if verbose:
print("Start segmenting one image")
_ims = [ims]
_names = [names]
# check input length
if len(_names) != len(_ims):
raise ValueError('input images and names length not compatible!')
# illumination correction
if illumination_correction:
_ims = [corrections.Illumination_correction(_im, illumination_correction_channel,
correction_folder=correction_folder,
verbose=verbose) for _im in _ims]
# rescale image to 0-1 gray scale
_limits = [stats.scoreatpercentile(_im, (cap_percentile, 100.-cap_percentile)).astype(np.float) for _im in _ims]
_norm_ims = [(_im-np.min(_limit))/(np.max(_limit)-np.min(_limit)) for _im,_limit in zip(_ims, _limits)]
for _im in _norm_ims:
_im[_im < 0] = 0
_im[_im > 1] = 1
# find the layer that on focus
_focus_layers = [np.argmin(np.array([np.sum(_layer > signal_cap_ratio) for _layer in _im])) for _im in _norm_ims]
# stack images close to this focal layer
if verbose:
print('- find focal plane and slice')
_stack_ims = []
for _im, _layer in zip(_norm_ims, _focus_layers):
if _im.shape[0] - _layer < np.ceil((merge_layer_num-1)/2):
_stack_lims = [_im.shape[0]-merge_layer_num, _im.shape[0]]
elif _layer < np.floor((merge_layer_num-1)/2):
_stack_lims = [0, merge_layer_num]
else:
_stack_lims = [_layer-np.ceil((merge_layer_num-1)/2), _layer+np.floor((merge_layer_num-1)/2)]
_stack_lims = np.array(_stack_lims, dtype=np.int)
# extract image
_stack_im = np.zeros([np.max(_stack_lims)-np.min(_stack_lims), np.shape(_im)[1], np.shape(_im)[2]])
# denoise and merge
if denoise_window:
for _i,_l in enumerate(range(np.min(_stack_lims), np.max(_stack_lims))):
_stack_im[_i] = restoration.denoise_bilateral(_im[_l], win_size=int(denoise_window), mode='edge', multichannel=False)
else:
for _i,_l in enumerate(range(np.min(_stack_lims), np.max(_stack_lims))):
_stack_im[_i] = _im[_l]
_stack_im = np.mean(_stack_im, axis=0)
_stack_ims.append(_stack_im)
# laplace of gaussian filter
if verbose:
print("- apply by laplace-of-gaussian filter")
_conv_ims = [gaussian_laplace(_im, log_window) for _im in _stack_ims]
# binarilize the image
_supercell_masks = [(_cim < -1e-6) *( _sim > signal_cap_ratio) for _cim, _sim in zip(_conv_ims, _stack_ims)]
_supercell_masks = [ndimage.binary_dilation(_im, structure=morphology.disk(4)) for _im in _supercell_masks]
_supercell_masks = [ndimage.binary_erosion(_im, structure=morphology.disk(12)) for _im in _supercell_masks]
_supercell_masks = [ndimage.binary_fill_holes(_im, structure=morphology.disk(3)) for _im in _supercell_masks]
# acquire labels
if verbose:
print("- acquire labels")
_open_objects = [morphology.opening(_im, morphology.disk(3)) for _im in _supercell_masks]
_close_objects = [morphology.closing(_open, morphology.disk(3)) for _open in _open_objects]
_close_objects = [morphology.remove_small_objects(_close, 2000) for _close in _close_objects]
_bboxes = [ndimage.find_objects(_close) for _close in _close_objects]
_masks = [_close[_bbox[0]] for _bbox, _close in zip(_bboxes, _close_objects)]
_labels = []
for _close,_sim in zip(_close_objects,_stack_ims):
_label, _num = ndimage.label(_close)
_label[(_sim > signal_cap_ratio)*(_label==0)] = 0
_label[(_sim <= signal_cap_ratio)*(_label==0)] = -1
_labels.append(_label)
# random walker segmentation
if verbose:
print ("- random walker segmentation!")
_seg_labels = [random_walker(_im, _label, beta=1, mode='bf') for _im, _label in zip(_stack_ims, _labels)]
# remove bad labels by shape ratio: A(x)/I(x)^2
if verbose:
print ("- remove failed labels by shape ratio: A(x)/I(x)^2")
_ft_seg_labels = []
_contours = []
for _i, _seg_label in enumerate(_seg_labels):
if verbose:
print ("- screen labels in field of view:", names[_i])
_failed_labels = []
for _l in range(np.max(_seg_label)):
_contour = measure.find_contours(np.array(_seg_label==_l+1, dtype=np.int), 0)[0]
_length = np.sum(np.sqrt(np.sum((_contour[1:] - _contour[:-1])**2, axis=1)))
_size = np.sum(_seg_label==_l+1)
_center = np.round(ndimage.measurements.center_of_mass(_seg_label==_l+1))
_shape_ratio = _size/_length**2
if _shape_ratio < shape_ratio_threshold:
_seg_label[_seg_label==_l+1] = -1
_failed_labels.append(_l+1)
if verbose:
print("-- fail by shape_ratio, label", _l+1, 'contour length:', _length, 'size:', _size, 'shape_ratio:',_size/_length**2)
continue
for _coord,_dim in zip(_center[-2:], _seg_label.shape[-2:]):
if _coord < remove_fov_boundary or _coord > _dim - remove_fov_boundary:
_seg_label[_seg_label==_l+1] = -1
_failed_labels.append(_l+1)
if verbose:
print("-- fail by center_coordinate, label:", _l+1, "center of this nucleus:", _center[-2:])
break
_lb = 1
while _lb <= np.max(_seg_label):
if np.sum(_seg_label == _lb) == 0:
print ("-- remove", _lb)
_seg_label[_seg_label>_lb] -= 1
else:
print ("-- pass", _lb)
_lb += 1
_ft_seg_labels.append(_seg_label)
# plot
if make_plot:
for _seg_label, _name in zip(_ft_seg_labels, _names):
plt.figure()
plt.imshow(_seg_label)
plt.title(_name)
plt.colorbar()
plt.show()
# return segmentation results
return _ft_seg_labels
# segmentation with convolution of DAPI images
def DAPI_convoluted_segmentation(filenames, correction_channel=405,
num_threads=12, cap_percentile=1,
single_im_size=_image_size, all_channels=_allowed_colors,
num_buffer_frames=10, num_empty_frames=1,
illumination_correction=True, illumination_correction_channel=405,
correction_folder=_correction_folder,
merge_layer_num=11, denoise_window=5, mft_size=25, glft_size=30,
max_conv_th=0, min_boundary_th=0.48, signal_cap_ratio=0.20,
max_cell_size=40000, min_cell_size=5000, min_shape_ratio=0.035,
max_iter=4, shrink_percent=15,
dialation_dim=4, random_walker_beta=0.1, remove_fov_boundary=50,
save=True, save_folder=None, force=False,
save_npy=True, save_postfix="_segmentation",
make_plot=False, return_images=False, verbose=True):
"""cell segmentation for DAPI images with pooling and convolution layers
Inputs:
ims: list of images
names: list of names, same length as ims
cap_percentile: removing top and bottom percentile in each image, float from 0-100 (default: 0.5)
num_buffer_frames: number of buffer frames, int
num_empty_frames: num of empty frames, int
illumination_correction: whether correct illumination for each field of view, bool (default: True)
illumination_correction_channel: which color channel to correct illumination for each field of view, int or str (default: 405)
correction_folder: full directory that contains such correction files, string (default: )
merge_layer_num: number of z-stack layers to merge, int (default: 11)
denoise_window: window size used for billateral denoising method, int (default: 31)
mft_size: size of max-min filters to get cell boundaries, int (default: 25)
glft_size: window size for laplacian-gaussian filter, int (default: 35)
binarilize image:
max_conv_th: maximum convolution threshold, float(default: -5e-5)
min_boundary_th: minimal boundary im threshold, float(default: 0.55)
signal_cap_ratio: intensity ratio that considered as signal if intensity over max intensity larger than this, float between 0-1, (default: 0.15)
max_cell_size: upper limit for object otherwise undergoes extra screening, int(default: 30000)
min_cell_size: smallest object size allowed as nucleus, int (default:5000 for 2D)
min_shape_ratio: min threshold for: areasize of one label / (contour length of a label)^2, float (default: 0.15)
max_iter: maximum iterations allowed in splitting shapes, int (default:3)
shrink_percent: percentage of label areas removed during splitting, float (0-100, default: 13)
dialation_dim: dimension for dialation after splitting objects, int (default:4)
random_walker_beta: beta used for random walker segementation algorithm, float (default: 0.1)
remove_fov_boundary: if certain label is too close to fov boundary within this number of pixels, remove, int (default: 50)
make_plot: whether making plots for checking purpose, bool
verbose: whether say something during the process, bool
Output:
_seg_labels: list of labels, same dimension as ims, list of bool matrix"""
## import images
if not isinstance(filenames, list):
filenames = [filenames]
## load segmentation if already existed:
if save_folder is None:
save_folder = os.path.dirname(os.path.dirname(filenames[0]))
save_folder = os.path.join(save_folder, 'Analysis', 'segmentation')
if not os.path.exists(save_folder): # create folder if not exists
os.makedirs(save_folder)
if save_npy:
save_filenames = [os.path.join(save_folder, os.path.basename(_fl).replace('.dax', save_postfix +'.npy')) for _fl in filenames]
else:
save_filenames = [os.path.join(save_folder, os.path.basename(_fl).replace('.dax', save_postfix +'.pkl')) for _fl in filenames]
# decide if directly load
_direct_load_flags = [True for _fl in save_filenames if os.path.exists(_fl) and not force]
if len(_direct_load_flags) == len(filenames) and not force:
if verbose:
if len(filenames) == 1:
print(f"-- directly load segmentation result from:{save_filenames[0]}")
else:
print(f"-- directly load segmentation result from folder:{save_folder}, load_npy:{save_npy}")
# load segmentation labels
if save_npy:
_seg_labels = [np.load(_fl) for _fl in save_filenames]
else:
_seg_labels = [pickle.load(open(_fl, 'rb')) for _fl in save_filenames]
# return
if return_images:
if verbose:
print(f"- loading {len(filenames)} images for output")
_load_args = [(_fl, correction_channel, None, None, 20,
single_im_size, all_channels,
num_buffer_frames,num_empty_frames,
np.zeros(3), correction_folder) for _fl in filenames]
_load_pool = mp.Pool(num_threads)
_ims = _load_pool.starmap(corrections.correct_single_image, _load_args, chunksize=1)
_load_pool.close()
_load_pool.join()
_load_pool.terminate()
return _seg_labels, _ims
else:
return _seg_labels
else:
if verbose:
print(f"- loading {len(filenames)} images for segmentation")
_load_args = [(_fl, correction_channel, None, None, 20,
single_im_size, all_channels,
num_buffer_frames,num_empty_frames,
np.zeros(3), correction_folder) for _fl in filenames]
_load_pool = mp.Pool(num_threads)
_ims = _load_pool.starmap(corrections.correct_single_image, _load_args, chunksize=1)
_load_pool.close()
_load_pool.join()
_load_pool.terminate()
## rescaling and stack
# rescale image to 0-1 gray scale
_limits = [stats.scoreatpercentile(_im, (cap_percentile, 100.-cap_percentile)).astype(np.float) for _im in _ims]
_norm_ims = [(_im-np.min(_limit))/(np.max(_limit)-np.min(_limit)) for _im,_limit in zip(_ims, _limits)]
for _im in _norm_ims:
_im[_im < 0] = 0
_im[_im > 1] = 1
# find the layer that on focus
_focus_layers = [np.argmin(np.array([np.sum(_layer > signal_cap_ratio) for _layer in _im])) for _im in _norm_ims]
# stack images close to this focal layer
if verbose:
print('-- find focal plane and slice')
_stack_ims = []
for _im, _layer in zip(_norm_ims, _focus_layers):
if _im.shape[0] - _layer < np.ceil((merge_layer_num-1)/2):
_stack_lims = [_im.shape[0]-merge_layer_num, _im.shape[0]]
elif _layer < np.floor((merge_layer_num-1)/2):
_stack_lims = [0, merge_layer_num]
else:
_stack_lims = [_layer-np.ceil((merge_layer_num-1)/2), _layer+np.floor((merge_layer_num-1)/2)]
_stack_lims = np.array(_stack_lims, dtype=np.int)
# extract image
_stack_im = np.zeros([np.max(_stack_lims)-np.min(_stack_lims), np.shape(_im)[1], np.shape(_im)[2]])
# denoise and merge
if denoise_window:
for _i,_l in enumerate(range(np.min(_stack_lims), np.max(_stack_lims))):
_stack_im[_i] = restoration.denoise_bilateral(_im[_l], win_size=int(denoise_window), mode='edge', multichannel=False)
else:
for _i,_l in enumerate(range(np.min(_stack_lims), np.max(_stack_lims))):
_stack_im[_i] = _im[_l]
_stack_im = np.mean(_stack_im, axis=0)
_stack_ims.append(_stack_im)
## Get boundaries of cells and apply Gaussian-Laplacian filter
# get boundaries of cells
_diff_ims = [2*ndimage.filters.maximum_filter(_stack_im, mft_size)-ndimage.filters.minimum_filter(_stack_im, mft_size) for _stack_im in _stack_ims]
# laplace of gaussian filter
if verbose:
print("- apply by laplace-of-gaussian filter")
_conv_ims = [gaussian_laplace(_im, glft_size) for _im in _diff_ims]
## get rough labels
# binarilize the image
_supercell_masks = [(_cim < max_conv_th) *( _sim > min_boundary_th) for _cim, _sim in zip(_conv_ims, _diff_ims)]
# erosion and dialation
_supercell_masks = [ndimage.binary_erosion(_im, structure=morphology.disk(3)) for _im in _supercell_masks]
_supercell_masks = [ndimage.binary_dilation(_im, structure=morphology.disk(5)) for _im in _supercell_masks]
# filling holes
_supercell_masks = [ndimage.binary_fill_holes(_im, structure=morphology.disk(4)) for _im in _supercell_masks]
# acquire labels
if verbose:
print("- acquire labels")
_open_objects = [morphology.opening(_im, morphology.disk(3)) for _im in _supercell_masks]
_close_objects = [morphology.closing(_open, morphology.disk(3)) for _open in _open_objects]
_close_objects = [morphology.remove_small_objects(_close, min_cell_size) for _close in _close_objects]
# labeling
_labels = [ np.array(ndimage.label(_close)[0], dtype=np.int) for _close in _close_objects]
## Tuning labels
def _label_binary_im(_im, obj_size=3):
'''Given an binary image, find labels for all isolated objects with given size'''
# make sure image is binary
_bim = np.array(_im > 0, dtype=np.int)
# find objects
_open = morphology.opening(_bim, morphology.disk(obj_size))
_close = morphology.closing(_open, morphology.disk(obj_size))
# label objects
_label, _num = ndimage.label(_close.astype(bool))
# return
return _label, _num
def _check_label(_label, _id, _min_shape_ratio, _max_size, verbose=False):
"""Check whether the label is qualified as a cell"""
# get features
_length,_size,_center,_ratio = _get_label_features(_label, _id)
if _ratio < _min_shape_ratio:
if verbose:
print(f"--- {_ratio} is smaller than minimum shape ratio, failed")
return False
if _size > _max_size:
if verbose:
print(f"--- {_size} is larger than maximum shape size, failed")
return False
return True
def _get_label_features(_label, _id):
"""Given a label and corresponding label id, return four features of this label"""
# get features
_contours = measure.find_contours(np.array(_label==_id, dtype=np.int), 0)
if len(_contours) > 0:
_length = np.sum(np.sqrt(np.sum((np.roll(_contours[0],1,axis=0) - _contours[0])**2, axis=1)))
else:
_length = 0
_size = np.sum(_label==_id)
_center = np.round(ndimage.measurements.center_of_mass(_label==_id))
_shape_ratio = _size/_length**2
return _length, _size, _center, _shape_ratio
def _split_single_label(_stack_im, _conv_im, _label, _id,
min_size=min_cell_size, shrink_percent=shrink_percent,
erosion_dim=2, dialation_dim=dialation_dim):
"""Function to split suspicious labels and validate"""
if shrink_percent > 50 or shrink_percent < 0:
raise ValueError(f"Wrong shrink_percent kwd ({shrink_percent}) is given, should be in [0,50]")
# get features
_length,_size,_center,_ratio = _get_label_features(_label, _id)
if _size < 2*min_size: # adjust shrink percentage if shape is small
shrink_percent = shrink_percent * 0.8
_mask = np.array(_label == _id, dtype=np.int)
_mask *= np.array(_stack_im > stats.scoreatpercentile(_stack_im[_label==_id], shrink_percent), dtype=int)
#_mask *= np.array(_conv_im < stats.scoreatpercentile(_conv_im[_label==_id], 100-2*shrink_percent), dtype=int)
_mask = ndimage.binary_erosion(_mask, structure=morphology.disk(erosion_dim))
_mask = morphology.remove_small_objects(_mask.astype(bool), min_size)
_new_label, _num = _label_binary_im(_mask, 3)
for _l in range(_num):
_single_label = np.array(_new_label==_l+1, dtype=np.int)
_single_label = ndimage.binary_dilation(_single_label, structure=morphology.disk(int(dialation_dim/2)))
_new_label[_single_label>0] = _l+1
return _new_label, _num
def _iterative_split_labels(_stack_im, _conv_im, _label, max_iter=3,
min_shape_ratio=min_shape_ratio, max_size=max_cell_size,
min_size=min_cell_size, shrink_percent=15,
erosion_dim=2, dialation_dim=10,
verbose=False):
"""Function to iteratively split labels within one fov"""
_single_labels = [np.array(_label==_i+1,dtype=np.int) for _i in range(int(np.max(_label))) if np.sum(np.array(_label==_i+1,dtype=np.int))>0]
_iter_counts = [0 for _i in range(len(_single_labels))]
_final_label = np.zeros(np.shape(_label), dtype=np.int)
# start selecting labels
while(len(_single_labels)) > 0:
_sg_label = _single_labels.pop(0)
_iter_ct = _iter_counts.pop(0)
if verbose:
print(f"- Remaining labels:{len(_single_labels)}, iter_num:{_iter_ct}")
# if this cell passes the filter
if _check_label(_sg_label, 1, min_shape_ratio, max_size, verbose=verbose):
if verbose:
print(f"-- saving label: {np.max(_final_label)+1}")
_save_label = ndimage.binary_dilation(_sg_label, structure=morphology.disk(int(dialation_dim/2)))
_save_label = ndimage.binary_fill_holes(_save_label, structure=morphology.disk(int(dialation_dim/2)))
if np.sum(_save_label==1) > min_size:
if verbose:
print('save1', _get_label_features(_save_label, 1))
_final_label[_save_label==1] = np.max(_final_label)+1
continue
# not pass, try to split
else:
_new_label, _num = _split_single_label(_stack_im, _conv_im, _sg_label, 1,
min_size=min_size*(1-shrink_percent/100)**_iter_ct,
shrink_percent=shrink_percent,
erosion_dim=erosion_dim, dialation_dim=dialation_dim)
for _i in range(_num):
_cand_label = np.array(_new_label==_i+1, dtype=np.int)
if _check_label(_cand_label, 1, min_shape_ratio*0.9**_iter_ct, max_size, verbose=verbose):
if verbose:
print(f"-- saving label: {np.max(_final_label)+1}")
_save_label = ndimage.binary_dilation(_cand_label, structure=morphology.disk(int(dialation_dim/2+1)))
_save_label = ndimage.binary_fill_holes(_save_label, structure=morphology.disk(int(dialation_dim/2)))
if np.sum(_save_label == 1) > min_size:
if verbose:
print('save2', _get_label_features(_save_label, 1))
_final_label[_save_label==1] = np.max(_final_label)+1
elif _iter_ct > max_iter:
if verbose:
print("--- Exceeding max-iteration count, skip.")
continue
else:
if verbose:
print("--- Append this cell back to pool")
_single_labels.append(_cand_label)
_iter_counts.append(_iter_ct+1)
return _final_label
# initialize updated labels and call functions
if verbose:
print("- start iterative segmentation")
_seg_labels = []
for _i, (_sim, _cim, _label) in enumerate(zip(_stack_ims, _conv_ims, _labels)):
_updated_label = _iterative_split_labels(_sim, _cim, _label, max_iter=max_iter,
min_shape_ratio=min_shape_ratio, shrink_percent=shrink_percent,
max_size=max_cell_size, min_size=min_cell_size,
dialation_dim=dialation_dim, verbose=verbose)
for _l in range(int(np.max(_updated_label))):
_, _, _center, _ = _get_label_features(_updated_label, _l+1)
if _center[0] < remove_fov_boundary or _center[1] < remove_fov_boundary or _center[0] >= _updated_label.shape[0]-remove_fov_boundary or _center[1] >= _updated_label.shape[1]-remove_fov_boundary:
if verbose:
print(f"-- Remove im:{_i}, label {_l+1} for center coordiate too close to edge.")
_updated_label[_updated_label==_l+1] = 0
# relabel
_relabel_id = 1
_seg_label = np.zeros(np.shape(_updated_label), dtype=np.int)
for _l in range(int(np.max(_updated_label))):
if np.sum(np.array(_updated_label == _l+1,dtype=np.int)) > 0:
_seg_label[_updated_label==_l+1] = _relabel_id
_relabel_id += 1
# label background
_dialated_mask = ndimage.binary_dilation(np.array(_seg_label>0, dtype=np.int), structure=morphology.disk(int(dialation_dim/2)))
_seg_label[(_seg_label==0)*(_dialated_mask==0)] = -1
# save
_seg_labels.append(_seg_label)
## random walker segmentation
if random_walker_beta:
if verbose:
print ("- random walker segmentation!")
_seg_labels = [random_walker(_im, _label, beta=random_walker_beta, mode='bf') for _im, _label in zip(_stack_ims, _seg_labels)]
## plot
if make_plot:
for _seg_label, _name in zip(_seg_labels, filenames):
plt.figure()
plt.imshow(_seg_label)
plt.title(_name)
plt.colorbar()
plt.show()
## save
if save:
if save_npy:
for _fl, _lb in zip(save_filenames, _seg_labels):
np.save(_fl, _lb)
else:
for _fl, _lb in zip(save_filenames, _seg_labels):
pickle.dump(_lb, open(_fl, 'wb'))
if return_images:
return _seg_labels, _ims
else:
return _seg_labels
# merge images to generate "chromosome"
def generate_chromosome_from_dic(im_dic, merging_channel, color_dic, bead_label='beads',
merge_num=10, ref_frame=0, fft_dim=125, verbose=True):
'''Function to generate "chromosomes" by merging first several regions
Given:
im_dic: dictionary of images loaded by get_img_info.split_channels_by_image, dic
merging_channel: use which channel to merge as chromosome, -1 means all channels except beads, int
color_dic: dictionary of color usage loaded by get_img_info.Load_Color_Usage, dic
merge_num: number of images to be merged, int (default: 10)
ref_frame: which frame is used as reference, non-negative int (default: 0)
fft_dim: dimension for FFT, positive int (default: 125)
verbose: say something!, bool (default: True)
Return:
_mean_im: merged image, 3d-array
_rough_dfts: drifts calculated by FFT, list of 1d-arrays
'''
import numpy as np
import os
from ImageAnalysis3.corrections import fast_translate, fftalign
# initialize mean_image as chromosome
_mean_im=[]
_rough_dfts = []
# get ref frame
_ref_name = sorted(list(im_dic.items()), key=lambda k_v: int(k_v[0].split('H')[1].split('R')[0]))[ref_frame][0]
_ref_ims = sorted(list(im_dic.items()), key=lambda k_v1: int(k_v1[0].split('H')[1].split('R')[0]))[ref_frame][1]
if bead_label not in color_dic[_ref_name.split(os.sep)[0]]:
raise ValueError('wrong ref frame, no beads exist in this hybe.')
for _i, _label in enumerate(color_dic[_ref_name.split(os.sep)[0]]):
# check bead label
if bead_label == _label:
_ref_bead = _ref_ims[_i]
break
# loop through all images for this field of view
for _name, _ims in sorted(list(im_dic.items()), key=lambda k_v2: int(k_v2[0].split('H')[1].split('R')[0])):
if len(_rough_dfts) >= merge_num: # stop if images more than merge_num are already calclulated.
break
if _name == _ref_name: # pass the ref frame
continue
if bead_label in color_dic[_name.split(os.sep)[0]]:
#if verbose:
# print "processing image:", _name
# extract bead image
for _i, _label in enumerate(color_dic[_name.split(os.sep)[0]]):
# check bead label
if bead_label == _label:
_bead = _ims[_i]
break
# calculate drift fastly with FFT
_rough_dft = fftalign(_ref_bead, _bead)
_rough_dfts.append(_rough_dft)
# roughly align image and save
if merging_channel >=0 and merging_channel < len(_ims): # if merging_channel is provided properly
_corr_im = fast_translate(_ims[merging_channel],-_rough_dft)
_mean_im.append(_corr_im)
else: # if merging_channel is not provided etc:
for _i, _label in enumerate(color_dic[_name.split(os.sep)[0]]):
if bead_label != _label and _label != '':
_corr_im = fast_translate(_ims[_i],-_rough_dft)
_mean_im.append(_corr_im)
if verbose:
print('- number of images to calculate mean: '+str(len(_mean_im))+'\n- number of FFT drift corrections: '+str(len(_rough_dfts)))
print("- drifts are: \n", _rough_dfts)
_mean_im = np.mean(_mean_im,0)
return _mean_im, _rough_dfts
# crop cells based on DAPI segmentation result
def crop_cell(im, segmentation_label, drift=None, extend_dim=20, overlap_threshold = 0.1, verbose=True):
'''basic function to crop image into small ones according to segmentation_label
Inputs:
im: single nd-image, numpy.ndarray
segmentation_label: 2D or 3D segmentaiton label, each cell has unique id, numpy.ndarray (if None, no drift applied)
drift: whether applying drift to the cropping, should be relative drift to frame with DAPI, 1darray (default: None)
extend_dim: dimension that expand for cropping, int (default: 30)
overlap_threshold: upper limit of how much the cropped image include other labels, float<1 (default: 0.1)
verbose: say something during processing!, bool (default: True)
Outputs:
_crop_ims: list of images that has been cropped
'''
# imports
from scipy.ndimage.interpolation import shift
# check dimension
_im_dim = np.shape(im)
_label_dim = np.shape(segmentation_label)
if drift is not None:
if len(drift) != len(im.shape):
raise ValueError('drift dimension and image dimension doesnt match!')
# initialize cropped image list
_crop_ims = []
for _l in range(int(np.max(segmentation_label))):
#print _l
if len(_label_dim) == 3: # 3D
_limits = np.zeros([len(_label_dim),2]) # initialize matrix to save cropping limit
_binary_label = segmentation_label == _l+1 # extract binary image
for _m in range(len(_label_dim)):
_1d_label = _binary_label.sum(_m) > 0
_has_label=False
for _n in range(len(_1d_label)):
if _1d_label[_n] and not _has_label:
_limits[_m,0] = max(_n-extend_dim, 0)
_has_label = True
elif not _1d_label[_n] and _has_label:
_limits[_m,1] = min(_n+extend_dim, _im_dim[_m])
_has_label = False
if _has_label:
_limits[_m,1] = _im_dim[_m]
# crop image and save to _crop_ims
if drift is None:
_crop_ims.append(im[_limits[0,0]:_limits[0,1], _limits[2,0]:_limits[2,1], _limits[1,0]:_limits[1,1]])
else: # do drift correction first and crop
# define a new drift limits to do cropping
_drift_limits = np.zeros(_limits.shape)
for _m, _dim, _d in zip(list(range(len(_label_dim))), _im_dim[-len(_label_dim):], drift[[0,2,1]]):
_drift_limits[_m, 0] = max(_limits[_m, 0]-np.ceil(np.max(np.abs(_d))), 0)
_drift_limits[_m, 1] = min(_limits[_m, 1]+np.ceil(np.max(np.abs(_d))), _dim)
#print _drift_limits
# crop image for pre-correction
_pre_im = im[_drift_limits[0,0]:_drift_limits[0,1],_drift_limits[2,0]:_drift_limits[2,1],_drift_limits[1,0]:_drift_limits[1,1]]
# drift correction
_post_im = shift(_pre_im, - drift)
# re-crop
_limit_diffs = _limits - _drift_limits
for _m in range(len(_label_dim)):
if _limit_diffs[_m,1] == 0:
_limit_diffs[_m,1] = _limits[_m,1] - _limits[_m,0]
_limit_diffs = _limit_diffs.astype(np.int)
#print _limit_diffs
_crop_ims.append(_post_im[_limit_diffs[0,0]:_limit_diffs[0,0]+_limits[0,1]-_limits[0,0],\
_limit_diffs[2,0]:_limit_diffs[2,0]+_limits[2,1]-_limits[2,0],\
_limit_diffs[1,0]:_limit_diffs[1,0]+_limits[1,1]-_limits[1,0]])
else: # 2D
_limits = np.zeros([len(_label_dim),2], dtype=np.int) # initialize matrix to save cropping limit
_binary_label = segmentation_label == _l+1 # extract binary image
for _m in range(len(_label_dim)):
_1d_label = _binary_label.sum(_m) > 0
_has_label=False
for _n in range(len(_1d_label)):
if _1d_label[_n] and not _has_label:
_limits[_m,0] = max(_n-extend_dim, 0)
_has_label = True
elif not _1d_label[_n] and _has_label:
_limits[_m,1] = min(_n+extend_dim, _im_dim[1+_m])
_has_label = False
if _has_label: # if label touch boundary
_limits[_m,1] = _im_dim[1+_m]
#print _limits
# crop image and save to _crop_ims
if drift is None:
_crop_ims.append(im[:,_limits[1,0]:_limits[1,1],_limits[0,0]:_limits[0,1]])
else: # do drift correction first and crop
# define a new drift limits to do cropping
_drift_limits = np.zeros(_limits.shape, dtype=np.int)
for _m, _dim in zip(list(range(len(_label_dim))), _im_dim[-len(_label_dim):]):
_drift_limits[_m, 0] = max(_limits[_m, 0]-np.ceil(np.abs(drift[2-_m])), 0)
_drift_limits[_m, 1] = min(_limits[_m, 1]+np.ceil(np.abs(drift[2-_m])), _dim)
#print _drift_limits
# crop image for pre-correction
_pre_im = im[:,_drift_limits[1,0]:_drift_limits[1,1],_drift_limits[0,0]:_drift_limits[0,1]]
# drift correction
_post_im = shift(_pre_im, -drift)
# re-crop
_limit_diffs = (_limits - _drift_limits).astype(np.int)
#print _limit_diffs
_crop_ims.append(_post_im[:,_limit_diffs[1,0]:_limit_diffs[1,0]+_limits[1,1]-_limits[1,0],_limit_diffs[0,0]:_limit_diffs[0,0]+_limits[0,1]-_limits[0,0]])
return _crop_ims
# get limitied points of seed within radius of a center
def get_seed_in_distance(im, center=None, num_seeds=0, seed_radius=30,
gfilt_size=0.75, background_gfilt_size=10, filt_size=3,
seed_by_per=False, th_seed_percentile=95,
th_seed=300,
dynamic=True, dynamic_iters=10, min_dynamic_seeds=2,
distance_to_edge=1, hot_pix_th=4,
return_h=False, verbose=False):
'''Get seed points with in a distance to a center coordinate
Inputs:
im: image, 3D-array
center: center coordinate to get seeds nearby, 1d array / list of 3
num_seed: maximum number of seeds kept within radius, 0 means keep all, int (default: -1)
seed_radius: distance of seed points to center, float (default: 15)
gfilt_size: sigma of gaussian filter applied to image before seeding, float (default: 0.5)
filt_size: getting local maximum and minimum within in size, int (default: 3)
th_seed_percentile: intensity percentile of whole image that used as seeding threshold, float (default: 90.)
hot_pix_th: thereshold for hot pixels, int (default: 0, not removing hot pixel)
return_h: whether return height of seeds, bool (default: False)
Outputs:
_seeds: z,x,y coordinates of seeds, 3 by n matrix
n = num_seed
if return height is true, return h,z,x,y instead.
'''
from scipy.stats import scoreatpercentile
from scipy.spatial.distance import cdist
# check input
if center is not None and len(center) != 3:
raise ValueError('wrong input dimension of center!')
_dim = np.shape(im)
_im = im.copy()
# seeding threshold
if seed_by_per:
_im_ints = _im[np.isnan(_im)==False].astype(np.float)
_th_seed = scoreatpercentile(_im_ints, th_seed_percentile) - \
scoreatpercentile(_im_ints, 100-th_seed_percentile)
else:
_th_seed = th_seed
if verbose:
print(f"-- seeding with threshold: {_th_seed}, per={th_seed_percentile}")
# start seeding
if center is not None:
_center = np.array(center, dtype=np.float)
_limits = np.zeros([2, 3], dtype=np.int)
_limits[0, 1:] = np.array([np.max([x, y]) for x, y in zip(
np.zeros(2), _center[1:]-seed_radius)], dtype=np.int)
_limits[0, 0] = np.array(
np.max([0, _center[0]-seed_radius/2]), dtype=np.int)
_limits[1, 1:] = np.array([np.min([x, y]) for x, y in zip(
_dim[1:], _center[1:]+seed_radius)], dtype=np.int)
_limits[1, 0] = np.array(
np.min([_dim[0], _center[0]+seed_radius/2]), dtype=np.int)
_local_center = _center - _limits[0]
# crop im
_cim = _im[_limits[0, 0]:_limits[1, 0], _limits[0, 1]:_limits[1, 1], _limits[0, 2]:_limits[1, 2]]
if dynamic:
_dynamic_range = np.linspace(1, 1 / dynamic_iters, dynamic_iters)
for _dy_ratio in _dynamic_range:
_dynamic_th = _th_seed * _dy_ratio
#print(_dynamic_th)
# get candidate seeds
_cand_seeds = get_seed_points_base(_cim, gfilt_size=gfilt_size, background_gfilt_size=background_gfilt_size,
filt_size=filt_size, th_seed=_dynamic_th, hot_pix_th=hot_pix_th, return_h=True)
# keep seed within distance
_distance = cdist(_cand_seeds[:3].transpose(), _local_center[np.newaxis, :3]).transpose()[0]
_keep = _distance < seed_radius
_seeds = _cand_seeds[:, _keep]
_seeds[:3, :] += _limits[0][:, np.newaxis]
if len(_seeds.shape) == 2:
if num_seeds > 0 and _seeds.shape[1] >= min(num_seeds, min_dynamic_seeds):
break
elif num_seeds == 0 and _seeds.shape[1] >= min_dynamic_seeds:
break
else:
# get candidate seeds
_seeds = get_seed_points_base(_cim, gfilt_size=gfilt_size, filt_size=filt_size,
th_seed=th_seed, hot_pix_th=hot_pix_th, return_h=True)
else:
# get candidate seeds
_seeds = get_seed_points_base(_im, gfilt_size=gfilt_size, filt_size=filt_size,
th_seed=_th_seed, hot_pix_th=hot_pix_th, return_h=True)
# remove seeds out of boundary
#_keep = np.sum(, axis=0)
# if limited seeds reported, report top n
if _seeds.shape[1] > 1:
_intensity_order = np.argsort(_seeds[-1])
_seeds = _seeds[:, np.flipud(_intensity_order[-num_seeds:])]
# if not return height, remove height
if not return_h:
_seeds = _seeds[:3].transpose()
else:
_seeds = _seeds[:4].transpose()
return _seeds
# fit single gaussian with varying width given prior
def fit_single_gaussian(im, center_zxy, counted_indices=None,
width_zxy=[1.35, 1.9, 1.9], fit_radius=5, n_approx=10,
height_sensitivity=100., expect_intensity=800.,
weight_sigma=1000.,
th_to_end=1e-6):
""" Function to fit single gaussian with given prior
Inputs:
im: image, 3d-array
center_zxy: center coordinate of seed, 1d-array or list of 3
counted_indices: z,x,y indices for pixels to be counted, np.ndarray, length=3
width_zxy: prior width of gaussian fit, 1darray or list of 3 (default: [1.35,1,1])
fit_radius: fit_radius that allowed for fitting, float (default: 10)
n_approx: number of pixels used for approximation, int (default: 10)
height_sensitivity: grant height parameter extra sensitivity compared to others, float (default: 100)
expect_intensity: lower limit of penalty function applied to fitting, float (default: 1000)
weight_sigma: L1 norm penalty function applied to widths, float (default: 1000)
Outputs:
p.x, p.success: parameters and whether success
Returns (height, x, y,z, width_x, width_y,width_z,bk)
the gaussian parameters of a 2D distribution found by a fit"""
_im = np.array(im, dtype=np.float32)
dims = np.array(_im.shape)
# dynamic adjust height_sensitivity
if np.max(_im) < height_sensitivity:
height_sensitivity = np.ceil(np.max(_im)) * 0.5
if np.max(_im) < expect_intensity:
expect_intensity = np.max(_im) * 0.1
if len(center_zxy) == 3:
center_z, center_x, center_y = center_zxy
else:
raise ValueError(
"Wrong input for kwd center_zxy, should be of length=3")
if counted_indices is not None and len(counted_indices) != 3:
raise ValueError(
"Length of counted_indices should be 3, for z,x,y coordinates")
elif counted_indices is not None:
zxy = counted_indices
else: # get affected coordinates de novo
total_zxy = (np.indices([2*fit_radius+1]*3) + center_zxy[:,
np.newaxis, np.newaxis, np.newaxis] - fit_radius).reshape(3, -1)
keep = (total_zxy >= 0).all(0) * (total_zxy[0] < _im.shape[0]) * (
total_zxy[1] < _im.shape[1]) * (total_zxy[2] < _im.shape[2])
zxy = total_zxy[:, keep]
if len(zxy[0]) > 0:
_used_im = _im[zxy[0], zxy[1], zxy[2]]
sorted_im = np.sort(_used_im) # np.sort(np.ravel(_used_im))
bk = np.median(sorted_im[:n_approx])
if bk < 0:
bk = 0
height = (np.median(sorted_im[-n_approx:])-bk) / height_sensitivity
if height < 0:
height = 0
width_z, width_x, width_y = np.array(width_zxy)
params_ = (height, center_z, center_x, center_y,
bk, width_z, width_x, width_y)
def gaussian(height, center_z, center_x, center_y,
bk=0,
width_z=width_zxy[0],
width_x=width_zxy[1],
width_y=width_zxy[2]):
"""Returns a gaussian function with the given parameters"""
width_x_ = np.abs(width_x)
width_y_ = np.abs(width_y)
width_z_ = np.abs(width_z)
height_ = np.abs(height)
bk_ = np.abs(bk)
def gauss(z, x, y):
g = bk_ + height_ * height_sensitivity * np.exp(
-(((center_z-z)/width_z_)**2 +
((center_x-x)/width_x_)**2 +
((center_y-y)/width_y_)**2)/2.)
return g
return gauss
def errorfunction(p):
f = gaussian(*p)(*zxy)
g = _used_im
#err=np.ravel(f-g-g*np.log(f/g))
err = np.ravel(f-g) \
+ weight_sigma * np.linalg.norm(p[-3:]-width_zxy, 1)
return err
p = scipy.optimize.least_squares(errorfunction, params_, bounds=(
0, np.inf), ftol=th_to_end, xtol=th_to_end, gtol=th_to_end/10.)
p.x[0] *= height_sensitivity
return p.x, p.success
else:
return None, None
# Multi gaussian fitting
def fit_multi_gaussian(im, seeds, width_zxy = [1.5, 2, 2], fit_radius=5,
height_sensitivity=100., expect_intensity=500., expect_weight=1000.,
th_to_end=1e-7,
n_max_iter=10, max_dist_th=0.25, min_height=100.0,
return_im=False, verbose=True):
""" Function to fit multiple gaussians (with given prior)
Inputs:
im: image, 3d-array
center_zxy: center coordinate of seed, 1darray or list of 3
width_zxy: prior width of gaussian fit, 1darray or list of 3 (default: [1.35,1,1])
fit_radius: radius that allowed for fitting, float (default: 10)
height_sensitivity: grant height parameter extra sensitivity compared to others, float (default: 100)
expect_intensity: lower limit of penalty function applied to fitting, float (default: 1000)
expect_weight: L1 norm penalty function applied to widths, float (default: 1000)
n_max_iter: max iteration count for re-fit existing points, int (default: 10)
max_dist_th: maximum allowed distance between original fit and re-fit, float (default: 0.25)
min_height: miminal heights required for fitted spots, float (default: 100.)
return_im: whether return images of every single fitting, bool (default: False)
verbose: whether say something, bool (default: True)
Outputs:
p: parameters
Returns (height, x, y,z, width_x, width_y,width_z,bk)
the gaussian parameters of a 2D distribution found by a fit"""
if verbose:
print(f"-- Multi-Fitting:{len(seeds)} points")
# adjust min_height:
if np.max(im) * 0.1 < min_height:
min_height = np.max(im)*0.05
# seeds
_seeds = seeds
if len(_seeds) > 0:
# initialize
ps = []
sub_ims = []
im_subtr = np.array(im,dtype=np.float)
# loop through seeds
for _seed in _seeds:
p, success = fit_single_gaussian(im_subtr,_seed[:3],
height_sensitivity=height_sensitivity,
expect_intensity=expect_intensity,
weight_sigma=expect_weight,
fit_radius=fit_radius,
width_zxy=width_zxy,
th_to_end=th_to_end)
if p is not None and success: # If got any successful fitting, substract fitted profile
ps.append(p)
sub_ims.append(im_subtr)
im_subtr = subtract_source(im_subtr,p)
return np.array(ps)
print("do something")
# recheck fitting
im_add = np.array(im_subtr)
max_dist=np.inf
n_iter = 0
while max_dist > max_dist_th:
ps_1=
|
np.array(ps)
|
numpy.array
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
from PIL import Image
from evaluate import trans_error
from drawBox import draw
from Reader import Reader
from Math import get_R
mpl.use('QT5Agg')
def get_location_1(box_2d, dimension, rotation_x, rotation_y, rotation_z, proj_matrix):
"""
方法1 2Dbbox中心与3Dbbox中心重合
只存在一个中心点间的对应关系。难以约束。
若是将Z的值替换成真实值,效果还行。Z方向的值与XY相比差距太大。
"""
R = get_R(rotation_x, rotation_y)
# format 2d corners
xmin = box_2d[0]
ymin = box_2d[1]
xmax = box_2d[2]
ymax = box_2d[3]
h, w, l = dimension[0], dimension[1], dimension[2]
constraints = [0, -h/2, 0]
corners = [(xmin+xmax)/2, (ymin+ymax)/2]
# create pre M (the term with I and the R*X)
M = np.zeros([4, 4])
for i in range(0, 4):
M[i][i] = 1
# create A, b
A = np.zeros([2, 3], dtype=np.float)
b = np.zeros([2, 1])
RX = np.dot(R, constraints)
M[:3, 3] = RX.reshape(3)
M = np.dot(proj_matrix, M)
A[0, :] = M[0, :3] - corners[0] * M[2, :3] # [540 0 960] - 1116[0 0 1]
b[0] = corners[0] * M[2, 3] - M[0, 3]
A[1, :] = M[1, :3] - corners[1] * M[2, :3] # [540 0 960] - 1116[0 0 1]
b[1] = corners[1] * M[2, 3] - M[1, 3]
loc, error, rank, s = np.linalg.lstsq(A, b, rcond=None)
# loc = [loc[0][0], loc[1][0] + dimension[0] / 2, loc[2][0]]
loc = [loc[0][0], loc[1][0], loc[2][0]]
return loc
def get_location_2(box_2d, dimension, rotation_x, rotation_y, rotation_z, proj_matrix):
"""
2D框包含3D框作为约束条件
"""
R = get_R(rotation_x, rotation_y, rotation_z)
# format 2d corners
xmin = box_2d[0]
ymin = box_2d[1]
xmax = box_2d[2]
ymax = box_2d[3]
# left top right bottom
box_corners = [xmin, ymin, xmax, ymax]
# get the point constraints
constraints = []
left_constraints = []
right_constraints = []
top_constraints = []
bottom_constraints = []
# using a different coord system ; 原数据集中第一个是height(车高度),第二个是width(车两侧的距离),第三个是length(车头到车尾)
dx = dimension[2] / 2 # length
dy = dimension[0] / 2 # height
dz = dimension[1] / 2 # width
left_mult = 1
right_mult = -1
switch_mult = -1 # -1
for i in (-2, 0):
left_constraints.append([left_mult * dx, i*dy, -switch_mult * dz])
for i in (-2, 0):
right_constraints.append([right_mult * dx, i*dy, switch_mult * dz])
for i in (-1, 1):
for j in (-1, 1):
top_constraints.append([i*dx, -dy*2, j*dz])
for i in (-1, 1):
for j in (-1, 1):
bottom_constraints.append([i*dx, 0, j*dz])
# now, 64 combinations
for left in left_constraints:
for top in top_constraints:
for right in right_constraints:
for bottom in bottom_constraints:
constraints.append([left, top, right, bottom])
# filter out the ones with repeats
constraints = filter(lambda x: len(x) == len(
set(tuple(i) for i in x)), constraints)
# create pre M (the term with I and the R*X)
pre_M = np.zeros([4, 4])
# 1's down diagonal
for i in range(0, 4):
pre_M[i][i] = 1
best_loc = None
best_error = [1e09]
best_X = None
# loop through each possible constraint, hold on to the best guess
# constraint will be 64 sets of 4 corners
count = 0
for constraint in constraints:
# each corner
Xa = constraint[0]
Xb = constraint[1]
Xc = constraint[2]
Xd = constraint[3]
X_array = [Xa, Xb, Xc, Xd] # 4约束,对应上下左右 ,shape=4,3
# M: all 1's down diagonal, and upper 3x1 is Rotation_matrix * [x, y, z]
Ma = np.copy(pre_M)
Mb = np.copy(pre_M)
Mc = np.copy(pre_M)
Md = np.copy(pre_M)
M_array = [Ma, Mb, Mc, Md] # 4个对角为1的4*4方阵
# create A, b
A = np.zeros([4, 3], dtype=np.float)
b = np.zeros([4, 1])
# 对于其中某个约束/上/下/左/右
indicies = [0, 1, 0, 1]
for row, index in enumerate(indicies):
X = X_array[row]
M = M_array[row] # 一个对角是1的4*4方阵 .shape = 4*4
# create M for corner Xx
RX = np.dot(R, X) # 某边对应的某点在相机坐标系下的坐标, 维度3 .shape = 3,1
# 对角线是1,最后一列前三行分别是RX对应的相机坐标系下的长宽高 .shape = 4,4
M[:3, 3] = RX.reshape(3)
# 投影到二维平面的坐标(前三维).shape = 3,4,前三列是project矩阵,最后一列是二维平面的x,y,1
M = np.dot(proj_matrix, M)
A[row, :] = M[index, :3] - box_corners[row] * M[2, :3]
b[row] = box_corners[row] * M[2, 3] - M[index, 3]
# solve here with least squares, since over fit will get some error
loc, error, rank, s = np.linalg.lstsq(A, b, rcond=None)
# found a better estimation
if error < best_error:
count += 1 # for debugging
best_loc = loc
best_error = error
best_X = X_array
best_loc = [best_loc[0][0], best_loc[1][0], best_loc[2][0]]
return best_loc
def get_location_3(box_2d, dimension, rotation_x, rotation_y, rotation_z, proj_matrix):
"""
2D框包含3D框作为约束条件,增加left和right的约束
"""
R = get_R(rotation_x, rotation_y, rotation_z)
# format 2d corners
xmin = box_2d[0]
ymin = box_2d[1]
xmax = box_2d[2]
ymax = box_2d[3]
box_corners = [xmin, ymin, xmax, ymax]
# get the point constraints
constraints = []
left_constraints = []
right_constraints = []
top_constraints = []
bottom_constraints = []
# using a different coord system ; 原数据集中第一个是height(车高度),第二个是width(车两侧的距离),第三个是length(车头到车尾)
dx = dimension[2] / 2 # length
dy = dimension[0] / 2 # height
dz = dimension[1] / 2 # width
for i in (-1, 1):
for j in (-1, 1):
for k in (-2, 0):
left_constraints.append([i * dx, k * dy, j * dz])
for i in (-1, 1):
for j in (-1, 1):
for k in (-2, 0):
right_constraints.append([i * dx, k * dy, j * dz])
# top and bottom are easy, just the top and bottom of car
for i in (-1, 1):
for j in (-1, 1):
top_constraints.append([i*dx, -dy*2, j*dz])
for i in (-1, 1):
for j in (-1, 1):
bottom_constraints.append([i*dx, 0, j*dz])
# now, 64 combinations
for left in left_constraints:
for top in top_constraints:
for right in right_constraints:
for bottom in bottom_constraints:
constraints.append([left, top, right, bottom])
# filter out the ones with repeats
constraints = filter(lambda x: len(x) == len(
set(tuple(i) for i in x)), constraints)
# create pre M (the term with I and the R*X)
pre_M = np.zeros([4, 4])
# 1's down diagonal
for i in range(0, 4):
pre_M[i][i] = 1
best_loc = None
best_error = [1e09]
best_X = None
# loop through each possible constraint, hold on to the best guess
# constraint will be 64 sets of 4 corners
count = 0
for constraint in constraints:
# each corner
Xa = constraint[0]
Xb = constraint[1]
Xc = constraint[2]
Xd = constraint[3]
X_array = [Xa, Xb, Xc, Xd] # 4约束,对应上下左右 ,shape=4,3
# M: all 1's down diagonal, and upper 3x1 is Rotation_matrix * [x, y, z]
Ma = np.copy(pre_M)
Mb = np.copy(pre_M)
Mc = np.copy(pre_M)
Md = np.copy(pre_M)
M_array = [Ma, Mb, Mc, Md] # 4个对角为1的4*4方阵
# create A, b
A = np.zeros([4, 3], dtype=np.float)
b = np.zeros([4, 1])
# 对于其中某个约束/上/下/左/右
indicies = [0, 1, 0, 1]
for row, index in enumerate(indicies):
X = X_array[row]
M = M_array[row] # 一个对角是1的4*4方阵 .shape = 4*4
# create M for corner Xx
RX = np.dot(R, X) # 某边对应的某点在相机坐标系下的坐标, 维度3 .shape = 3,1
# 对角线是1,最后一列前三行分别是RX对应的相机坐标系下的长宽高 .shape = 4,4
M[:3, 3] = RX.reshape(3)
# 投影到二维平面的坐标(前三维).shape = 3,4,前三列是project矩阵,最后一列是二维平面的x,y,1
M = np.dot(proj_matrix, M)
A[row, :] = M[index, :3] - box_corners[row] * M[2, :3]
b[row] = box_corners[row] * M[2, 3] - M[index, 3]
# solve here with least squares, since over fit will get some error
loc, error, rank, s = np.linalg.lstsq(A, b, rcond=None)
# found a better estimation
if error < best_error:
count += 1 # for debugging
best_loc = loc
best_error = error
best_X = X_array
best_loc = [best_loc[0][0], best_loc[1][0], best_loc[2][0]]
return best_loc
def get_location_4(box_2d, dimension, rotation_x, rotation_y, rotation_z, proj_matrix):
"""
2D框包含3D框作为约束条件。
固定住坐标系,约束为64-30=34
再考虑长宽置换的情况,最终约束为34*2=68
考虑000这种特殊情况
"""
R = get_R(rotation_x, rotation_y, rotation_z)
# format 2d corners
xmin = box_2d[0]
ymin = box_2d[1]
xmax = box_2d[2]
ymax = box_2d[3]
# left top right bottom
box_corners = [xmin, ymin, xmax, ymax]
# get the point constraints
constraints = []
left_constraints = []
right_constraints = []
top_constraints = []
bottom_constraints = []
left_constraints_2 = []
right_constraints_2 = []
top_constraints_2 = []
bottom_constraints_2 = []
dx = dimension[2] / 2 # length
dy = dimension[0] / 2 # height
dz = dimension[1] / 2 # width
left_mult = -1
right_mult = 1
for i in (-2, 0):
for j in (-1, 1):
left_constraints.append([left_mult * dx, i*dy, j * dz])
for i in (-2, 0):
for j in (-1, 1):
right_constraints.append([right_mult * dx, i*dy, j * dz])
# 考虑长宽的置换
for i in (-2, 0):
for j in (-1, 1):
left_constraints_2.append([left_mult * dz, i*dy, j * dx])
for i in (-2, 0):
for j in (-1, 1):
right_constraints_2.append([right_mult * dz, i*dy, j * dx])
for i in (-1, 1):
top_constraints.append([i*dx, -dy*2, dz])
for i in (-1, 1):
bottom_constraints.append([i*dx, 0, -dz])
# 考虑长宽的置换
for i in (-1, 1):
top_constraints_2.append([i*dz, -dy*2, dx])
for i in (-1, 1):
bottom_constraints_2.append([i*dz, 0, -dx])
# now, 128 combinations
for left in left_constraints:
for top in top_constraints:
for right in right_constraints:
for bottom in bottom_constraints:
constraints.append([left, top, right, bottom])
for left in left_constraints_2:
for top in top_constraints_2:
for right in right_constraints_2:
for bottom in bottom_constraints_2:
constraints.append([left, top, right, bottom])
# filter out the ones with repeats
constraints = filter(lambda x: len(x) == len(
set(tuple(i) for i in x)), constraints)
# print(len(list(constraints)))
# create pre M (the term with I and the R*X)
pre_M = np.zeros([4, 4])
# 1's down diagonal
for i in range(0, 4):
pre_M[i][i] = 1
best_loc = None
best_error = [1e09]
best_X = None
# loop through each possible constraint, hold on to the best guess
# constraint will be 64 sets of 4 corners
count = 0
for constraint in constraints:
# print('constraint:',constraint)
# each corner
Xa = constraint[0]
Xb = constraint[1]
Xc = constraint[2]
Xd = constraint[3]
X_array = [Xa, Xb, Xc, Xd] # 4约束,对应上下左右 ,shape=4,3
# print('X_array:',X_array)
# M: all 1's down diagonal, and upper 3x1 is Rotation_matrix * [x, y, z]
Ma = np.copy(pre_M)
Mb = np.copy(pre_M)
Mc = np.copy(pre_M)
Md = np.copy(pre_M)
M_array = [Ma, Mb, Mc, Md] # 4个对角为1的4*4方阵
# create A, b
A = np.zeros([4, 3], dtype=np.float)
b = np.zeros([4, 1])
# 对于其中某个约束/上/下/左/右
indicies = [0, 1, 0, 1]
for row, index in enumerate(indicies):
X = X_array[row]
# x_array is four constrains for up bottom left and right;
# x is one point in world World coordinate system, .shape = 3
M = M_array[row] # 一个对角是1的4*4方阵 .shape = 4*4
# create M for corner Xx
RX = np.dot(R, X) # 某边对应的某点在相机坐标系下的坐标, 维度3 .shape = 3,1
# 对角线是1,最后一列前三行分别是RX对应的相机坐标系下的长宽高 .shape = 4,4
M[:3, 3] = RX.reshape(3)
# 投影到二维平面的坐标(前三维).shape = 3,4,前三列是project矩阵,最后一列是二维平面的x,y,1
M = np.dot(proj_matrix, M)
A[row, :] = M[index, :3] - box_corners[row] * M[2, :3]
b[row] = box_corners[row] * M[2, 3] - M[index, 3]
loc, error, rank, s = np.linalg.lstsq(A, b, rcond=None)
# found a better estimation
if error < best_error:
count += 1 # for debugging
best_loc = loc
best_error = error
best_X = X_array
best_loc = [best_loc[0][0], best_loc[1][0], best_loc[2][0]]
return best_loc
def get_location_5(box_2d, dimension, rotation_x, rotation_y, rotation_z, proj_matrix):
"""
2D框包含3D框作为约束条件。
固定住坐标系,约束为64-30=34
再考虑长宽置换的情况,最终约束为34*2=68
不考虑000这种特殊情况
"""
R = get_R(rotation_x, rotation_y, rotation_z)
# format 2d corners
xmin = box_2d[0]
ymin = box_2d[1]
xmax = box_2d[2]
ymax = box_2d[3]
# left top right bottom
box_corners = [xmin, ymin, xmax, ymax]
# print('box_corners:',box_corners)
# get the point constraints
constraints = []
left_constraints = []
right_constraints = []
top_constraints = []
bottom_constraints = []
left_constraints_2 = []
right_constraints_2 = []
top_constraints_2 = []
bottom_constraints_2 = []
dx = dimension[2] / 2 # length
dy = dimension[0] / 2 # height
dz = dimension[1] / 2 # width
left_mult = -1
right_mult = 1
for j in (-1, 1):
left_constraints.append([left_mult * dx, -2*dy, j * dz])
for j in (-2, 0):
right_constraints.append([right_mult * dx, j*dy, -dz])
left_constraints.append([left_mult * dx, 0, dz])
# 考虑长宽的置换
for j in (-1, 1):
left_constraints_2.append([left_mult * dz, -2*dy, j * dx])
for j in (-2, 0):
right_constraints.append([right_mult * dz, j*dy, -dx])
left_constraints_2.append([left_mult * dz, 0, dx])
# top and bottom are easy, just the top and bottom of car
for i in (-1, 1):
top_constraints.append([i*dx, -dy*2, dz])
for i in (-1, 1):
bottom_constraints.append([i*dx, 0, -dz])
# 考虑长宽的置换
for i in (-1, 1):
top_constraints_2.append([i*dz, -dy*2, dx])
for i in (-1, 1):
bottom_constraints_2.append([i*dz, 0, -dx])
# now, 64 combinations
for left in left_constraints:
for top in top_constraints:
for right in right_constraints:
for bottom in bottom_constraints:
constraints.append([left, top, right, bottom])
for left in left_constraints_2:
for top in top_constraints_2:
for right in right_constraints_2:
for bottom in bottom_constraints_2:
constraints.append([left, top, right, bottom])
# filter out the ones with repeats
constraints = filter(lambda x: len(x) == len(
set(tuple(i) for i in x)), constraints)
# print(len(list(constraints)))
# create pre M (the term with I and the R*X)
pre_M = np.zeros([4, 4])
# 1's down diagonal
for i in range(0, 4):
pre_M[i][i] = 1
best_loc = None
best_error = [1e09]
best_X = None
# loop through each possible constraint, hold on to the best guess
# constraint will be 64 sets of 4 corners
count = 0
for constraint in constraints:
# print('constraint:',constraint)
# each corner
Xa = constraint[0]
Xb = constraint[1]
Xc = constraint[2]
Xd = constraint[3]
X_array = [Xa, Xb, Xc, Xd] # 4约束,对应上下左右 ,shape=4,3
# print('X_array:',X_array)
# M: all 1's down diagonal, and upper 3x1 is Rotation_matrix * [x, y, z]
Ma = np.copy(pre_M)
Mb = np.copy(pre_M)
Mc = np.copy(pre_M)
Md = np.copy(pre_M)
M_array = [Ma, Mb, Mc, Md] # 4个对角为1的4*4方阵
# create A, b
A = np.zeros([4, 3], dtype=np.float)
b = np.zeros([4, 1])
# 对于其中某个约束/上/下/左/右
indicies = [0, 1, 0, 1]
for row, index in enumerate(indicies):
X = X_array[row]
M = M_array[row] # 一个对角是1的4*4方阵 .shape = 4*4
# create M for corner Xx
RX = np.dot(R, X) # 某边对应的某点在相机坐标系下的坐标, 维度3 .shape = 3,1
# 对角线是1,最后一列前三行分别是RX对应的相机坐标系下的长宽高 .shape = 4,4
M[:3, 3] = RX.reshape(3)
# 投影到二维平面的坐标(前三维).shape = 3,4,前三列是project矩阵,最后一列是二维平面的x,y,1
M = np.dot(proj_matrix, M)
A[row, :] = M[index, :3] - box_corners[row] * M[2, :3]
b[row] = box_corners[row] * M[2, 3] - M[index, 3]
# solve here with least squares, since over fit will get some error
loc, error, rank, s = np.linalg.lstsq(A, b, rcond=None)
# found a better estimation
if error < best_error:
count += 1 # for debugging
best_loc = loc
best_error = error
best_X = X_array
best_loc = [best_loc[0][0], best_loc[1][0], best_loc[2][0]]
return best_loc
def get_location_6(box_2d, dimension, rotation_x, rotation_y, rotation_z, proj_matrix):
"""
2D框包含3D框作为约束条件。
固定住坐标系,约束为64-30=34
再考虑长宽置换的情况,最终约束为34*2=68
不考虑000这种特殊情况
2D框中心也是3D框的中心
"""
R = get_R(rotation_x, rotation_y, rotation_z)
# format 2d corners
xmin = box_2d[0]
ymin = box_2d[1]
xmax = box_2d[2]
ymax = box_2d[3]
# left top right bottom
box_corners = [xmin, ymin, xmax, ymax]
# print('box_corners:',box_corners)
# get the point constraints
constraints = []
left_constraints = []
right_constraints = []
top_constraints = []
bottom_constraints = []
left_constraints_2 = []
right_constraints_2 = []
top_constraints_2 = []
bottom_constraints_2 = []
# using a different coord system ; 原数据集中第一个是height(车高度),第二个是width(车两侧的距离),第三个是length(车头到车尾)
dx = dimension[2] / 2 # length
dy = dimension[0] / 2 # height
dz = dimension[1] / 2 # width
left_mult = -1
right_mult = 1
for j in (-1, 1):
left_constraints.append([left_mult * dx, -2*dy, j * dz])
for j in (-2, 0):
right_constraints.append([right_mult * dx, j*dy, -dz])
left_constraints.append([left_mult * dx, 0, dz])
# 考虑长宽的置换
for j in (-1, 1):
left_constraints_2.append([left_mult * dz, -2*dy, j * dx])
for j in (-2, 0):
right_constraints.append([right_mult * dz, j*dy, -dx])
left_constraints_2.append([left_mult * dz, 0, dx])
# top and bottom are easy, just the top and bottom of car
for i in (-1, 1):
top_constraints.append([i*dx, -dy*2, dz])
for i in (-1, 1):
bottom_constraints.append([i*dx, 0, -dz])
# 考虑长宽的置换
for i in (-1, 1):
top_constraints_2.append([i*dz, -dy*2, dx])
for i in (-1, 1):
bottom_constraints_2.append([i*dz, 0, -dx])
# now, 64 combinations
for left in left_constraints:
for top in top_constraints:
for right in right_constraints:
for bottom in bottom_constraints:
constraints.append([left, top, right, bottom])
for left in left_constraints_2:
for top in top_constraints_2:
for right in right_constraints_2:
for bottom in bottom_constraints_2:
constraints.append([left, top, right, bottom])
# filter out the ones with repeats
constraints = filter(lambda x: len(x) == len(
set(tuple(i) for i in x)), constraints)
# print(len(list(constraints)))
# create pre M (the term with I and the R*X)
pre_M = np.zeros([4, 4])
# 1's down diagonal
for i in range(0, 4):
pre_M[i][i] = 1
best_loc = None
best_error = [1e09]
best_X = None
# 约束2 中心点
constraints_center = [0, dy, 0]
corners = [(xmin+xmax)/2, (ymin+ymax)/2]
# loop through each possible constraint, hold on to the best guess
# constraint will be 64 sets of 4 corners
# 约束1
count = 0
for constraint in constraints:
# print('constraint:',constraint)
# each corner
Xa = constraint[0]
Xb = constraint[1]
Xc = constraint[2]
Xd = constraint[3]
X_array = [Xa, Xb, Xc, Xd] # 4约束,对应上下左右 ,shape=4,3
# print('X_array:',X_array)
# M: all 1's down diagonal, and upper 3x1 is Rotation_matrix * [x, y, z]
Ma = np.copy(pre_M)
Mb =
|
np.copy(pre_M)
|
numpy.copy
|
# Copyright (C) 2019-2021 Ruhr West University of Applied Sciences, Bottrop, Germany
# AND Elektronische Fahrwerksysteme GmbH, Gaimersheim Germany
#
# This Source Code Form is subject to the terms of the Apache License 2.0
# If a copy of the APL2 was not distributed with this
# file, You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.txt.
from typing import Union, Iterable, List
import numpy as np
from scipy.stats import norm
from scipy.interpolate import interp1d, griddata
import matplotlib.pyplot as plt
import tikzplotlib
from netcal.metrics import _Miscalibration
class ReliabilityDiagram(object):
"""
Plot Confidence Histogram and Reliability Diagram to visualize miscalibration.
On classification, plot the gaps between average confidence and observed accuracy bin-wise over the confidence
space [1]_, [2]_.
On detection, plot the miscalibration w.r.t. the additional regression information provided (1-D or 2-D) [3]_.
Parameters
----------
bins : int or iterable, default: 10
Number of bins used by the ACE/ECE/MCE.
On detection mode: if int, use same amount of bins for each dimension (nx1 = nx2 = ... = bins).
If iterable, use different amount of bins for each dimension (nx1, nx2, ... = bins).
equal_intervals : bool, optional, default: True
If True, the bins have the same width. If False, the bins are splitted to equalize
the number of samples in each bin.
detection : bool, default: False
If False, the input array 'X' is treated as multi-class confidence input (softmax)
with shape (n_samples, [n_classes]).
If True, the input array 'X' is treated as a box predictions with several box features (at least
box confidence must be present) with shape (n_samples, [n_box_features]).
fmin : float, optional, default: None
Minimum value for scale color.
fmax : float, optional, default: None
Maximum value for scale color.
metric : str, default: 'ECE'
Metric to measure miscalibration. Might be either 'ECE', 'ACE' or 'MCE'.
References
----------
.. [1] <NAME>, <NAME>, <NAME> and <NAME>:
"On Calibration of Modern Neural Networks."
Proceedings of the 34th International Conference on Machine Learning-Volume 70. JMLR. org, 2017.
`Get source online <https://arxiv.org/abs/1706.04599>`_
.. [2] <NAME> and <NAME>:
“Predicting good probabilities with supervised learning.”
Proceedings of the 22nd International Conference on Machine Learning, 2005, pp. 625–632.
`Get source online <https://www.cs.cornell.edu/~alexn/papers/calibration.icml05.crc.rev3.pdf>`_
.. [3] <NAME>, <NAME>, <NAME> and <NAME>:
"Multivariate Confidence Calibration for Object Detection."
The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2020.
`Get source online <https://openaccess.thecvf.com/content_CVPRW_2020/papers/w20/Kuppers_Multivariate_Confidence_Calibration_for_Object_Detection_CVPRW_2020_paper.pdf>`_
"""
def __init__(self, bins: Union[int, Iterable[int]] = 10, equal_intervals: bool = True,
detection: bool = False, sample_threshold: int = 1,
fmin: float = None, fmax: float = None,
metric: str = 'ECE', **kwargs):
""" Constructor. For detailed parameter documentation view classdocs. """
self.bins = bins
self.detection = detection
self.sample_threshold = sample_threshold
self.fmin = fmin
self.fmax = fmax
self.metric = metric
if 'feature_names' in kwargs:
self.feature_names = kwargs['feature_names']
if 'title_suffix' in kwargs:
self.title_suffix = kwargs['title_suffix']
self._miscalibration = _Miscalibration(bins=bins, equal_intervals=equal_intervals,
detection=detection, sample_threshold=sample_threshold)
def plot(self, X: Union[Iterable[np.ndarray], np.ndarray], y: Union[Iterable[np.ndarray], np.ndarray],
batched: bool = False, uncertainty: str = None, filename: str = None, tikz: bool = False,
title_suffix: str = None, feature_names: List[str] = None, **save_args) -> Union[plt.Figure, str]:
"""
Reliability diagram to visualize miscalibration. This could be either in classical way for confidences only
or w.r.t. additional properties (like x/y-coordinates of detection boxes, width, height, etc.). The additional
properties get binned. Afterwards, the miscalibration will be calculated for each bin. This is
visualized as a 2-D plots.
Parameters
----------
X : iterable of np.ndarray, or np.ndarray of shape=([n_bayes], n_samples, [n_classes/n_box_features])
NumPy array with confidence values for each prediction on classification with shapes
1-D for binary classification, 2-D for multi class (softmax).
If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points
for a single sample (e.g. variational inference or MC dropout samples).
If this is an iterable over multiple instances of np.ndarray and parameter batched=True,
interpret this parameter as multiple predictions that should be averaged.
On detection, this array must have 2 dimensions with number of additional box features in last dim.
y : iterable of np.ndarray with same length as X or np.ndarray of shape=([n_bayes], n_samples, [n_classes])
NumPy array with ground truth labels.
Either as label vector (1-D) or as one-hot encoded ground truth array (2-D).
If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points
for a single sample (e.g. variational inference or MC dropout samples).
If iterable over multiple instances of np.ndarray and parameter batched=True,
interpret this parameter as multiple predictions that should be averaged.
batched : bool, optional, default: False
Multiple predictions can be evaluated at once (e.g. cross-validation examinations) using batched-mode.
All predictions given by X and y are separately evaluated and their results are averaged afterwards
for visualization.
uncertainty : str, optional, default: False
Define uncertainty handling if input X has been sampled e.g. by Monte-Carlo dropout or similar methods
that output an ensemble of predictions per sample. Choose one of the following options:
- flatten: treat everything as a separate prediction - this option will yield into a slightly better
calibration performance but without the visualization of a prediction interval.
- mean: compute Monte-Carlo integration to obtain a simple confidence estimate for a sample
(mean) with a standard deviation that is visualized.
filename : str, optional, default: None
Optional filename to save the plotted figure.
tikz : bool, optional, default: False
If True, use 'tikzplotlib' package to return tikz-code for Latex rather than a Matplotlib figure.
title_suffix : str, optional, default: None
Suffix for plot title.
feature_names : list, optional, default: None
Names of the additional features that are attached to the axes of a reliability diagram.
**save_args : args
Additional arguments passed to 'matplotlib.pyplot.Figure.savefig' function if 'tikz' is False.
If 'tikz' is True, the argument are passed to 'tikzplotlib.get_tikz_code' function.
Returns
-------
matplotlib.pyplot.Figure if 'tikz' is False else str with tikz code.
Raises
------
AttributeError
- If parameter metric is not string or string is not 'ACE', 'ECE' or 'MCE'
- If parameter 'feature_names' is set but length does not fit to second dim of X
- If no ground truth samples are provided
- If length of bins parameter does not match the number of features given by X
- If more than 3 feature dimensions (including confidence) are provided
"""
# assign deprecated constructor parameter to title_suffix and feature_names
if hasattr(self, 'title_suffix') and title_suffix is None:
title_suffix = self.title_suffix
if hasattr(self, 'feature_names') and feature_names is None:
feature_names = self.feature_names
# check if metric is correct
if not isinstance(self.metric, str):
raise AttributeError('Parameter \'metric\' must be string with either \'ece\', \'ace\' or \'mce\'.')
# check metrics parameter
if self.metric.lower() not in ['ece', 'ace', 'mce']:
raise AttributeError('Parameter \'metric\' must be string with either \'ece\', \'ace\' or \'mce\'.')
else:
self.metric = self.metric.lower()
# perform checks and prepare input data
X, matched, sample_uncertainty, bin_bounds, num_features = self._miscalibration.prepare(X, y, batched, uncertainty)
if num_features > 3:
raise AttributeError("Diagram is not defined for more than 2 additional feature dimensions.")
histograms = []
for batch_X, batch_matched, batch_uncertainty, bounds in zip(X, matched, sample_uncertainty, bin_bounds):
batch_histograms = self._miscalibration.binning(bounds, batch_X, batch_matched, batch_X[:, 0], batch_uncertainty[:, 0])
histograms.append(batch_histograms[:-1])
# no additional dimensions? compute standard reliability diagram
if num_features == 1:
fig = self.__plot_confidence_histogram(X, matched, histograms, bin_bounds, title_suffix)
# one additional feature? compute 1D-plot
elif num_features == 2:
fig = self.__plot_1d(histograms, bin_bounds, title_suffix, feature_names)
# two additional features? compute 2D plot
elif num_features == 3:
fig = self.__plot_2d(histograms, bin_bounds, title_suffix, feature_names)
# number of dimensions exceeds 3? quit
else:
raise AttributeError("Diagram is not defined for more than 2 additional feature dimensions.")
# if tikz is true, create tikz code from matplotlib figure
if tikz:
# get tikz code for our specific figure and also pass filename to store possible bitmaps
tikz_fig = tikzplotlib.get_tikz_code(fig, filepath=filename, **save_args)
# close matplotlib figure when tikz figure is requested to save memory
plt.close(fig)
fig = tikz_fig
# save figure either as matplotlib PNG or as tikz output file
if filename is not None:
if tikz:
with open(filename, "w") as open_file:
open_file.write(fig)
else:
fig.savefig(filename, **save_args)
return fig
@classmethod
def __interpolate_grid(cls, metric_map: np.ndarray) -> np.ndarray:
""" Interpolate missing values in a 2D-grid using the mean of the data. The interpolation is done inplace. """
# get all NaNs
nans = np.isnan(metric_map)
x = lambda z: z.nonzero()
# get mean of the remaining values and interpolate missing by the mean
mean = float(np.mean(metric_map[~nans]))
metric_map[nans] = griddata(x(~nans), metric_map[~nans], x(nans), method='cubic', fill_value=mean)
return metric_map
def __plot_confidence_histogram(self, X: List[np.ndarray], matched: List[np.ndarray], histograms: List[np.ndarray],
bin_bounds: List, title_suffix: str = None) -> plt.Figure:
""" Plot confidence histogram and reliability diagram to visualize miscalibration for condidences only. """
# get number of bins (self.bins has not been processed yet)
n_bins = len(bin_bounds[0][0])-1
median_confidence = [(bounds[0][1:] + bounds[0][:-1]) * 0.5 for bounds in bin_bounds]
mean_acc, mean_conf = [], []
for batch_X, batch_matched, batch_hist, batch_median in zip(X, matched, histograms, median_confidence):
acc_hist, conf_hist, _, num_samples_hist = batch_hist
empty_bins, = np.nonzero(num_samples_hist == 0)
# calculate overall mean accuracy and confidence
mean_acc.append(np.mean(batch_matched))
mean_conf.append(np.mean(batch_X))
# set empty bins to median bin value
acc_hist[empty_bins] = batch_median[empty_bins]
conf_hist[empty_bins] = batch_median[empty_bins]
# convert num_samples to relative afterwards (inplace denoted by [:])
num_samples_hist[:] = num_samples_hist / np.sum(num_samples_hist)
# get mean histograms and values over all batches
acc = np.mean([hist[0] for hist in histograms], axis=0)
conf = np.mean([hist[1] for hist in histograms], axis=0)
uncertainty = np.sqrt(np.mean([hist[2] for hist in histograms], axis=0))
num_samples = np.mean([hist[3] for hist in histograms], axis=0)
mean_acc = np.mean(mean_acc)
mean_conf = np.mean(mean_conf)
median_confidence = np.mean(median_confidence, axis=0)
bar_width = np.mean([np.diff(bounds[0]) for bounds in bin_bounds], axis=0)
# compute credible interval of uncertainty
p = 0.05
z_score = norm.ppf(1. - (p / 2))
uncertainty = z_score * uncertainty
# if no uncertainty is given, set variable uncertainty to None in order to prevent drawing error bars
if
|
np.count_nonzero(uncertainty)
|
numpy.count_nonzero
|
import numpy as np
import torch
import torch.autograd as autograd
def weighted_mse(pred, target, weight=None):
if weight is None:
return torch.mean((pred - target) ** 2)
else:
return torch.mean(weight * (pred - target) ** 2)
def get_3dboundary_points(num_x, # number of points on x axis
num_y, # number of points on y axis
num_t, # number of points on t axis
bot=(0, 0, 0), # lower bound
top=(1.0, 1.0, 1.0) # upper bound
):
x_top, y_top, t_top = top
x_bot, y_bot, t_bot = bot
x_arr = np.linspace(x_bot, x_top, num=num_x, endpoint=False)
y_arr = np.linspace(y_bot, y_top, num=num_y, endpoint=False)
xx, yy = np.meshgrid(x_arr, y_arr, indexing='ij')
xarr = np.ravel(xx)
yarr = np.ravel(yy)
tarr = np.ones_like(xarr) * t_bot
point0 = np.stack([xarr, yarr, tarr], axis=0).T # (SxSx1, 3), boundary on t=0
t_arr = np.linspace(t_bot, t_top, num=num_t)
yy, tt = np.meshgrid(y_arr, t_arr, indexing='ij')
yarr = np.ravel(yy)
tarr = np.ravel(tt)
xarr = np.ones_like(yarr) * x_bot
point2 = np.stack([xarr, yarr, tarr], axis=0).T # (1xSxT, 3), boundary on x=0
xarr = np.ones_like(yarr) * x_top
point3 = np.stack([xarr, yarr, tarr], axis=0).T # (1xSxT, 3), boundary on x=2pi
xx, tt = np.meshgrid(x_arr, t_arr, indexing='ij')
xarr = np.ravel(xx)
tarr = np.ravel(tt)
yarr = np.ones_like(xarr) * y_bot
point4 = np.stack([xarr, yarr, tarr], axis=0).T # (128x1x65, 3), boundary on y=0
yarr = np.ones_like(xarr) * y_top
point5 = np.stack([xarr, yarr, tarr], axis=0).T # (128x1x65, 3), boundary on y=2pi
points = np.concatenate([point0,
point2, point3,
point4, point5],
axis=0)
return points
def get_3dboundary(value):
boundary0 = value[0, :, :, 0:1] # 128x128x1, boundary on t=0
# boundary1 = value[0, :, :, -1:] # 128x128x1, boundary on t=0.5
boundary2 = value[0, 0:1, :, :] # 1x128x65, boundary on x=0
boundary3 = value[0, -1:, :, :] # 1x128x65, boundary on x=1
boundary4 = value[0, :, 0:1, :] # 128x1x65, boundary on y=0
boundary5 = value[0, :, -1:, :] # 128x1x65, boundary on y=1
part0 = np.ravel(boundary0)
# part1 = np.ravel(boundary1)
part2 = np.ravel(boundary2)
part3 = np.ravel(boundary3)
part4 = np.ravel(boundary4)
part5 = np.ravel(boundary5)
boundary = np.concatenate([part0,
part2, part3,
part4, part5],
axis=0)[:, np.newaxis]
return boundary
def get_xytgrid(S, T, bot=[0, 0, 0], top=[1, 1, 1]):
'''
Args:
S: number of points on each spatial domain
T: number of points on temporal domain including endpoint
bot: list or tuple, lower bound on each dimension
top: list or tuple, upper bound on each dimension
Returns:
(S * S * T, 3) array
'''
x_arr = np.linspace(bot[0], top[0], num=S, endpoint=False)
y_arr = np.linspace(bot[1], top[1], num=S, endpoint=False)
t_arr = np.linspace(bot[2], top[2], num=T)
xgrid, ygrid, tgrid = np.meshgrid(x_arr, y_arr, t_arr, indexing='ij')
xaxis = np.ravel(xgrid)
yaxis = np.ravel(ygrid)
taxis = np.ravel(tgrid)
points = np.stack([xaxis, yaxis, taxis], axis=0).T
return points
def get_2dgird(num=31):
x =
|
np.linspace(-1, 1, num)
|
numpy.linspace
|
import numpy as np
frame =
|
np.random.rand(64,64,3)
|
numpy.random.rand
|
from copy import deepcopy
from ELDAmwl.utils.wrapper import scipy_reduce_wrapper
from scipy.integrate import cumulative_trapezoid
from scipy.stats import sem
import numpy as np
import xarray as xr
def rolling_mean_sem(data, level):
"""calculate rolling mean and stderror of mean"""
means = data.rolling(level=level).reduce(np.mean)
# the use of scipy_reduce_wrapper is needed to deal with incompatible axis types
sems = data.rolling(level=level).reduce(scipy_reduce_wrapper(sem))
return means, sems
def calc_rolling_means_sems(ds, w_width):
ww0 = w_width[0, 0]
# calculate rolling means, std errs of mean, and rel sem
# if window_width are equal for all time slices,
# get means and sems at once
if np.all(w_width == ww0):
means, sems = rolling_mean_sem(ds.data, ww0)
# else do it for each time slice separately
else:
m_list = []
s_list = []
for t in range(ds.dims.time):
mean, sems = rolling_mean_sem(ds.data[t], w_width[t, 0])
m_list.append(mean)
s_list.append(sems)
means = xr.concat(m_list, 'time')
sems = xr.concat(s_list, 'time')
return means, sems
def find_minimum_window(means, sems, w_width, error_threshold):
rel_sem = sems / means
# find all means with rel_sem < error threshold:
# rel_sem.where(rel_sem.data < error_threshold)
# => rel_sem values and nans
# rel_sem.where(rel_sem.data < error_threshold) / rel_sem
# => ones and nans
# valid_means = means and nans
valid_means = (rel_sem.where(rel_sem.data < error_threshold) / rel_sem * means)
# min_idx is the last bin of rolling window with smallest mean
win_last_idx = np.nanargmin(valid_means.data, axis=1)
win_first_idx = (win_last_idx[:] - w_width[:, 0])
return win_first_idx, win_last_idx
def closest_bin(data, error=None, first_bin=None, last_bin=None, search_value=None):
"""
finds the bin which has the value closest to the search value
Args:
data(np.array) : 1-dimensional vertical profile of the data
error(np.array): vertical profile of absolute data errors. Default = None
if provided: if the closest bin is not equal to the search value within its error, returns None.
first_bin, last_bin (int): first and last bin of the profile, where the search shall be done.
Default: None => the complete profile is used
search_value (float): Default=None. if not provided, the mean value of the profile is used
returns:
idx (int): the index which has the value closest to the search value
"""
if first_bin is None:
first_bin = 0
if last_bin is None:
last_bin = data.size
_data = data[first_bin:last_bin]
if search_value is not None:
_search_value = search_value
else:
_search_value = np.nanmean(_data)
diff = np.absolute(_data - _search_value)
min_idx = np.argmin(diff)
if first_bin is not None:
result = first_bin + min_idx
else:
result = min_idx
if error is not None:
if abs(data[result] - search_value) > error[result]:
result = None
return result
def integral_profile(data,
range_axis=None,
extrapolate_ovl_factor=None,
first_bin=None,
last_bin=None):
"""
calculates the vertical integral of a profile
uses the scipy.integrate.cumulative_trapezoid method.
if there are nan values, they are removed before integration and the resulting cumulative integral
will be interpolated to the original range axis.
Args:
data (ndarray, 1 dimensional): the ydata to be integrated
range_axis (ndarray, 1 dimensional): the xdata.
first_bin (int, optional): (default = 0) the first bin of the integration
last_bin (int, optional): (default = ydata.size) the last bin of the integration.
if last_bin < first_bin, the integration direction is reversed
extrapolate_ovl_factor (float, optional): (default = None) if not None, the profile is extrapolated towards
the ground by inserting a new data point with values
range_new = 0, data_new = data[0] * extrapolate_ovl_factor
Returns:
"""
ydata = deepcopy(data)
xdata = deepcopy(range_axis)
if last_bin is None:
lb = ydata.size
else:
lb = last_bin
if first_bin is None:
fb = 0
else:
fb = first_bin
# if integration direction is downward -> flip data arrays and exchange fb, lb
reverse = False
if lb < fb:
reverse = True
xdata = np.flip(xdata)
ydata = np.flip(ydata)
lb = ydata.size - lb
fb = ydata.size - fb - 1
# use only profile parts between first_bin and last_bin
ydata = ydata[fb:lb]
xdata = xdata[fb:lb]
# remove nan data points
fill_nan = False
orig_xdata = None
if np.any(np.isnan(ydata)):
fill_nan = True
orig_xdata = xdata
nan_idxs = np.where(np.isnan(ydata))
ydata = np.delete(ydata, nan_idxs)
xdata = np.delete(xdata, nan_idxs)
# fill the overlap region with extrapolated values
# this is done by inserting an additional point at
# the beginning (if ascending range axis) or end (if descending range axis) of xdata and ydata arrays
# the insert_pos is 0 or -1, respectively.
# the new point has the values xdata= 0 and ydata = ydata[insert_pos] * extrapolate_ovl_factor
# insert_pos = None
if extrapolate_ovl_factor is not None:
# if the range axis is ascending, insert at position 0
if xdata[0] < xdata[-1]:
xdata = np.insert(xdata, 0, np.array([0]))
ydata = np.insert(ydata, 0, np.array(ydata[0]) * extrapolate_ovl_factor)
# if range axis is descending, append at the end
else:
xdata = np.append(xdata, np.array([0]))
ydata = np.append(ydata, np.array(ydata[-1]) * extrapolate_ovl_factor)
# calculate cumulative integral
result = cumulative_trapezoid(ydata, x=xdata, initial=0)
# add half first bin
if ydata.size > 1:
result = result + ydata[0] * (xdata[1] - xdata[0]) / 2
else:
result = np.array([np.nan])
# if integration direction is downward -> flip result and xdata
# note: the integral is usually negative because the differential x axis is negative
if reverse:
result =
|
np.flip(result)
|
numpy.flip
|
import copy
from pathlib import Path
import numpy as np
import pytest
from argoverse.utils.json_utils import read_json_file
from argoverse.utils.se2 import SE2
from argoverse.utils.sim2 import Sim2
TEST_DATA_ROOT = Path(__file__).resolve().parent / "test_data"
def test_constructor() -> None:
"""Sim(2) to perform p_b = bSa * p_a"""
bRa = np.eye(2)
bta = np.array([1, 2])
bsa = 3.0
bSa = Sim2(R=bRa, t=bta, s=bsa)
assert isinstance(bSa, Sim2)
assert np.allclose(bSa.R_, bRa)
assert np.allclose(bSa.t_, bta)
assert np.allclose(bSa.s_, bsa)
def test_is_eq() -> None:
"""Ensure object equality works properly (are equal)."""
bSa = Sim2(R=
|
np.eye(2)
|
numpy.eye
|
"""
Implement principle-component-analysis tools.
.. include common links, assuming primary doc root is up one directory
.. include:: ../links.rst
"""
from IPython import embed
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from pypeit import msgs
from pypeit import utils
from IPython import embed
def pca_decomposition(vectors, npca=None, pca_explained_var=99.0, mean=None):
r"""
Perform principle-component analysis (PCA) for a set of 1D vectors.
The vectors are first passed to an unconstrained PCA to determine
the growth curve of the accounted variance as a function of the
PCA component. If specifying a number of PCA components to use
(see `npca`), this yields the percentage of the variance
accounted for in the analysis. If instead specifying the target
variance percentage (see `pca_explained_var`), this is used to
determine the number of PCA components to use in the final
analysis.
.. note::
This is a fully generalized convenience function for a
specific use of `sklearn.decomposition.PCA`_. When used
within PypeIt, the vectors to decompose (see, e.g.,
:class:`pypeit.edgetrace.EdgeTracePCA`) typically have the
length of the spectral axis. This means that, within PypeIt,
arrays are typically transposed when passed to this function.
Args:
vectors (`numpy.ndarray`_):
A 2D array with vectors to analyze with shape
:math:`(N_{\rm vec}, N_{\rm pix})`. All vectors must be
the same length and cannot be masked.
npca (:obj:`bool`, optional):
The number of PCA components to keep, which must be less
than :math:`N_{\rm vec}`. If `npca==nvec`, no PCA
compression occurs. If None, `npca` is automatically
determined by calculating the minimum number of
components required to explain a given percentage of
variance in the data. (see `pca_explained_var`).
pca_explained_var (:obj:`float`, optional):
The percentage (i.e., not the fraction) of the variance
in the data accounted for by the PCA used to truncate the
number of PCA coefficients to keep (see `npca`). Ignored
if `npca` is provided directly.
mean (`numpy.ndarray`_, optional):
The mean value of each vector to subtract from the data
before performing the PCA. If None, this is determined
directly from the data. Shape must be :math:`N_{\rm
vec}`.
Returns:
Returns four `numpy.ndarray`_ objects:
- The coefficients of each PCA component, `coeffs`. Shape
is :math:`(N_{\rm vec},N_{\rm comp})`.
- The PCA component vectors, `components`. Shape is
:math:`(N_{\rm comp},N_{\rm pix})`.
- The mean offset of each PCA for each pixel, `pca_mean`.
Shape is :math:`(N_{\rm pix},)`.
- The mean offset applied to each vector before the PCA,
`vec_mean`. Shape is :math:`(N_{\rm vec},)`.
To reconstruct the PCA representation of the input vectors, compute::
np.dot(coeffs, components) + pca_mean[None,:] + vec_mean[:,None]
"""
# Check input
if vectors.ndim != 2:
raise ValueError('Input trace data must be a 2D array')
nvec = vectors.shape[0]
if nvec < 2:
raise ValueError('There must be at least 2 vectors for the PCA analysis.')
# Take out the mean value of each vector
if mean is None:
mean = np.mean(vectors, axis=1)
vec_pca = vectors - mean[:,None]
# Perform unconstrained PCA of the vectors
pca = PCA()
pca.fit(vec_pca)
# Compute the cumulative distribution of the variance explained by the PCA components.
# TODO: Why round to 6 decimals? Why work in percentages?
var_growth = np.cumsum(np.round(pca.explained_variance_ratio_, decimals=6) * 100)
# Number of components for a full decomposition
npca_tot = var_growth.size
msgs.info('The unconstrained PCA yields {0} components.'.format(npca_tot))
if npca is None:
# Assign the number of components to use based on the variance
# percentage
if pca_explained_var is None:
raise ValueError('Must provide percentage explained variance.')
npca = int(np.ceil(np.interp(pca_explained_var, var_growth, np.arange(npca_tot)+1))) \
if var_growth[0] < pca_explained_var else 1
elif npca_tot < npca:
raise ValueError('Too few vectors for a PCA of the requested dimensionality. '
'The full (uncompressing) PCA has {0} component(s)'.format(npca_tot)
+ ', which is less than the requested {0} component(s).'.format(npca)
+ ' Lower the number of requested PCA component(s) or turn off the PCA.')
msgs.info('PCA will include {0} component(s), '.format(npca)
+ 'containing {0:.3f}% of the total variance.'.format(var_growth[npca-1]))
# Determine the PCA coefficients with the revised number of
# components, and return the results
pca = PCA(n_components=npca)
pca_coeffs = pca.fit_transform(vec_pca)
return pca_coeffs, pca.components_, pca.mean_, mean
def fit_pca_coefficients(coeff, order, ivar=None, weights=None, function='legendre', lower=3.0,
upper=3.0, maxrej=1, maxiter=25, coo=None, minx=None, maxx=None,
debug=False):
r"""
Fit a parameterized function to a set of PCA coefficients,
primarily for the purpose of predicting coefficients at
intermediate locations.
The coefficients of each PCA component are fit by a low-order
polynomial, where the abscissa is set by the `coo` argument (see
:func:`pypeit.utils.robust_polyfit_djs`).
.. note::
This is a general function, not really specific to the PCA;
and is really just a wrapper for
:func:`pypeit.utils.robust_polyfit_djs`.
Args:
coeff (`numpy.ndarray`_):
PCA component coefficients. If the PCA decomposition used
:math:`N_{\rm comp}` components for :math:`N_{\rm vec}`
vectors, the shape of this array must be :math:`(N_{\rm
vec}, N_{\rm comp})`. The array can be 1D with shape
:math:`(N_{\rm vec},)` if there was only one PCA
component.
order (:obj:`int`, `numpy.ndarray`_):
The order, :math:`o`, of the function used to fit the PCA
coefficients. Can be a single number for all PCA
components, or an array with an order specific to each
component. If the latter, the shape must be
:math:`(N_{\rm comp},)`.
ivar (`numpy.ndarray`_, optional):
Inverse variance in the PCA coefficients to use during
the fit; see the `invvar` parameter of
:func:`pypeit.utils.robust_polyfit_djs`. If None, fit is
not error weighted. If a vector with shape :math:`(N_{\rm
vec},)`, the same error will be assumed for all PCA
components (i.e., `ivar` will be expanded to match the
shape of `coeff`). If a 2D array, the shape must match
`coeff`.
weights (`numpy.ndarray`_, optional):
Weights to apply to the PCA coefficients during the fit;
see the `weights` parameter of
:func:`pypeit.utils.robust_polyfit_djs`. If None, the
weights are uniform. If a vector with shape
:math:`(N_{\rm vec},)`, the same weights will be assumed
for all PCA components (i.e., `weights` will be expanded
to match the shape of `coeff`). If a 2D array, the shape
must match `coeff`.
function (:obj:`str`, optional):
Type of function used to fit the data.
lower (:obj:`float`, optional):
Number of standard deviations used for rejecting data
**below** the mean residual. If None, no rejection is
performed. See :func:`utils.robust_polyfit_djs`.
upper (:obj:`float`, optional):
Number of standard deviations used for rejecting data
**above** the mean residual. If None, no rejection is
performed. See :func:`utils.robust_polyfit_djs`.
maxrej (:obj:`int`, optional):
Maximum number of points to reject during fit iterations.
See :func:`utils.robust_polyfit_djs`.
maxiter (:obj:`int`, optional):
Maximum number of rejection iterations allows. To force
no rejection iterations, set to 0.
coo (`numpy.ndarray`_, optional):
Floating-point array with the independent coordinates to
use when fitting the PCA coefficients. If None, simply
uses a running number. Shape must be :math:`(N_{\rm
vec},)`.
minx, maxx (:obj:`float`, optional):
Minimum and maximum values used to rescale the
independent axis data. If None, the minimum and maximum
values of `coo` are used. See
:func:`utils.robust_polyfit_djs`.
debug (:obj:`bool`, optional):
Show plots useful for debugging.
Returns:
Returns four objects:
- A boolean `numpy.ndarray`_ masking data (`coeff`) that
were rejected during the polynomial fitting. Shape is the
same as the input `coeff`.
- A `list` of `numpy.ndarray`_ objects (or a single
`numpy.ndarray`_), one per PCA component where the length
of the 1D array is the number of coefficients fit to the
PCA-component coefficients. The number of function
coefficients is typically :math:`N_{\rm coeff} = o+1`.
- The minimum and maximum coordinate values used to rescale
the abscissa during the fitting.
"""
# Check the input
# - Get the shape of the input data to fit
_coeff = np.asarray(coeff)
if _coeff.ndim == 1:
_coeff = np.expand_dims(_coeff, 1)
if _coeff.ndim != 2:
raise ValueError('Array with coefficiencts cannot be more than 2D')
nvec, npca = _coeff.shape
# - Check the inverse variance
_ivar = None if ivar is None else np.atleast_2d(ivar)
if _ivar is not None and _ivar.shape != _coeff.shape:
raise ValueError('Inverse variance array does not match input coefficients.')
# - Check the weights
_weights = np.ones(_coeff.shape, dtype=float) if weights is None else np.asarray(weights)
if _weights.ndim == 1:
_weights = np.tile(_weights, (_coeff.shape[1],1)).T
if _weights.shape != _coeff.shape:
raise ValueError('Weights array does not match input coefficients.')
# - Set the abscissa of the data if not provided and check its
# shape
if coo is None:
coo = np.arange(nvec, dtype=float)
if coo.size != nvec:
raise ValueError('Vector coordinates have incorrect shape.')
# - Check the order of the functions to fit
_order = np.atleast_1d(order)
if _order.size == 1:
_order = np.full(npca, order, dtype=int)
if _order.size != npca:
raise ValueError('Function order must be a single number or one number per PCA component.')
# - Force the values of minx and maxx if they're not provided directly
if minx is None:
minx = np.amin(coo)
if maxx is None:
maxx = np.amax(coo)
# Instantiate the output
coeff_used = np.ones(_coeff.shape, dtype=bool)
fit_coeff = [None]*npca
# TODO: This fitting is fast. Maybe we should determine the best
# order for each PCA component, up to some maximum, by comparing
# reduction in chi-square vs added number of parameters?
# Fit the coefficients of each PCA component so that they can be
# interpolated to other coordinates.
inmask = np.ones_like(coo, dtype=bool)
for i in range(npca):
coeff_used[:,i], fit_coeff[i] \
= utils.robust_polyfit_djs(coo, _coeff[:,i], _order[i], inmask=inmask,
invvar=None if _ivar is None else _ivar[:,i],
weights=_weights[:,i], function=function,
maxiter=maxiter, lower=lower, upper=upper,
maxrej=maxrej, sticky=False, use_mad=_ivar is None,
minx=minx, maxx=maxx)
if debug:
# Visually check the fits
xvec = np.linspace(np.amin(coo), np.amax(coo), num=100)
rejected = np.invert(coeff_used[:,i]) & inmask
plt.scatter(coo[inmask], _coeff[inmask,i], marker='.', color='k', s=100,
facecolor='none', label='pca coeff')
plt.scatter(coo[np.invert(inmask)], _coeff[np.invert(inmask),i], marker='.',
color='orange', s=100, facecolor='none',
label='pca coeff, masked from previous')
if np.any(rejected):
plt.scatter(coo[rejected], _coeff[rejected,i], marker='x', color='C3', s=80,
label='robust_polyfit_djs rejected')
plt.plot(xvec, utils.func_val(fit_coeff[i], xvec, function, minx=minx, maxx=maxx),
linestyle='--', color='C0',
label='Polynomial fit of order={0}'.format(_order[i]))
plt.xlabel('Trace Coordinate', fontsize=14)
plt.ylabel('PCA Coefficient', fontsize=14)
plt.title('PCA Fit for Dimension #{0}/{1}'.format(i+1, npca))
plt.legend()
plt.show()
inmask=coeff_used[:,i]
# Return arrays that match the shape of the input data
if coeff.ndim == 1:
return np.invert(coeff_used)[0], fit_coeff[0], minx, maxx
return
|
np.invert(coeff_used)
|
numpy.invert
|
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import unittest
from rlgraph.components.policies import Policy, SharedValueFunctionPolicy, DuelingPolicy
from rlgraph.spaces import *
from rlgraph.tests import ComponentTest
from rlgraph.tests.test_util import config_from_path
from rlgraph.utils import softmax, relu
class TestPoliciesOnContainerActions(unittest.TestCase):
def test_policy_for_discrete_container_action_space(self):
# state_space.
state_space = FloatBox(shape=(4,), add_batch_rank=True)
# Container action space.
action_space = dict(
type="dict",
a=IntBox(2),
b=IntBox(3),
add_batch_rank=True
)
flat_float_action_space = dict(
type="dict",
a=FloatBox(shape=(2,)),
b=FloatBox(shape=(3,)),
add_batch_rank=True
)
policy = Policy(network_spec=config_from_path("configs/test_simple_nn.json"), action_space=action_space)
test = ComponentTest(
component=policy,
input_spaces=dict(
nn_input=state_space,
actions=action_space,
probabilities=flat_float_action_space,
logits=flat_float_action_space
),
action_space=action_space
)
policy_params = test.read_variable_values(policy.variables)
# Some NN inputs (batch size=2).
states = state_space.sample(2)
# Raw NN-output.
expected_nn_output = np.matmul(states, policy_params["policy/test-network/hidden-layer/dense/kernel"])
test.test(("get_nn_output", states), expected_outputs=dict(output=expected_nn_output), decimals=6)
# Raw action layers' output.
expected_action_layer_outputs = dict(
a=np.matmul(expected_nn_output, policy_params["policy/action-adapter-0/action-network/action-layer/dense/kernel"]),
b=np.matmul(expected_nn_output, policy_params["policy/action-adapter-1/action-network/action-layer/dense/kernel"])
)
test.test(("get_action_layer_output", states), expected_outputs=dict(output=expected_action_layer_outputs),
decimals=5)
# Logits, parameters (probs) and skip log-probs (numerically unstable for small probs).
expected_probabilities_output = dict(
a=np.array(softmax(expected_action_layer_outputs["a"], axis=-1), dtype=np.float32),
b=np.array(softmax(expected_action_layer_outputs["b"], axis=-1), dtype=np.float32)
)
test.test(("get_logits_probabilities_log_probs", states, ["logits", "probabilities"]), expected_outputs=dict(
logits=expected_action_layer_outputs, probabilities=expected_probabilities_output
), decimals=5)
print("Probs: {}".format(expected_probabilities_output))
expected_actions = dict(
a=np.argmax(expected_action_layer_outputs["a"], axis=-1),
b=np.argmax(expected_action_layer_outputs["b"], axis=-1)
)
test.test(("get_action", states), expected_outputs=dict(action=expected_actions))
# Stochastic sample.
out = test.test(("get_stochastic_action", states), expected_outputs=None) # dict(action=expected_actions))
self.assertTrue(out["action"]["a"].dtype == np.int32)
self.assertTrue(out["action"]["a"].shape == (2,))
self.assertTrue(out["action"]["b"].dtype == np.int32)
self.assertTrue(out["action"]["b"].shape == (2,))
# Deterministic sample.
test.test(("get_deterministic_action", states), expected_outputs=None) # dict(action=expected_actions))
self.assertTrue(out["action"]["a"].dtype == np.int32)
self.assertTrue(out["action"]["a"].shape == (2,))
self.assertTrue(out["action"]["b"].dtype == np.int32)
self.assertTrue(out["action"]["b"].shape == (2,))
# Distribution's entropy.
out = test.test(("get_entropy", states), expected_outputs=None) # dict(entropy=expected_h), decimals=3)
self.assertTrue(out["entropy"]["a"].dtype == np.float32)
self.assertTrue(out["entropy"]["a"].shape == (2,))
self.assertTrue(out["entropy"]["b"].dtype == np.float32)
self.assertTrue(out["entropy"]["b"].shape == (2,))
# Action log-probs.
expected_action_log_prob_output = dict(
a=np.log(np.array([expected_probabilities_output["a"][0][expected_actions["a"][0]],
expected_probabilities_output["a"][1][expected_actions["a"][1]]])),
b=np.log(np.array([expected_probabilities_output["b"][0][expected_actions["b"][0]],
expected_probabilities_output["b"][1][expected_actions["b"][1]]])),
)
test.test(
("get_action_log_probs", [states, expected_actions]), expected_outputs=dict(
action_log_probs=expected_action_log_prob_output, logits=expected_action_layer_outputs
), decimals=5
)
def test_shared_value_function_policy_for_discrete_container_action_space(self):
# state_space (NN is a simple single fc-layer relu network (2 units), random biases, random weights).
state_space = FloatBox(shape=(5,), add_batch_rank=True)
# action_space (complex nested container action space).
action_space = dict(
type="dict",
a=IntBox(2),
b=Dict(b1=IntBox(3), b2=IntBox(4)),
add_batch_rank=True
)
flat_float_action_space = dict(
type="dict",
a=FloatBox(shape=(2,)),
b=Dict(b1=FloatBox(shape=(3,)), b2=FloatBox(shape=(4,))),
add_batch_rank=True
)
# Policy with baseline action adapter.
shared_value_function_policy = SharedValueFunctionPolicy(
network_spec=config_from_path("configs/test_lrelu_nn.json"),
action_space=action_space
)
test = ComponentTest(
component=shared_value_function_policy,
input_spaces=dict(
nn_input=state_space,
actions=action_space,
probabilities=flat_float_action_space,
logits=flat_float_action_space
),
action_space=action_space,
)
policy_params = test.read_variable_values(shared_value_function_policy.variables)
base_scope = "shared-value-function-policy/action-adapter-"
# Some NN inputs (batch size=2).
states = state_space.sample(size=2)
# Raw NN-output.
expected_nn_output = relu(np.matmul(
states, policy_params["shared-value-function-policy/test-network/hidden-layer/dense/kernel"]
), 0.1)
test.test(("get_nn_output", states), expected_outputs=dict(output=expected_nn_output), decimals=5)
# Raw action layers' output.
expected_action_layer_outputs = dict(
a=np.matmul(expected_nn_output, policy_params[base_scope + "0/action-network/action-layer/dense/kernel"]),
b=dict(b1=np.matmul(expected_nn_output, policy_params[base_scope + "1/action-network/action-layer/dense/kernel"]),
b2=np.matmul(expected_nn_output, policy_params[base_scope + "2/action-network/action-layer/dense/kernel"]))
)
test.test(("get_action_layer_output", states), expected_outputs=dict(output=expected_action_layer_outputs),
decimals=5)
# State-values.
expected_state_value_output = np.matmul(
expected_nn_output, policy_params["shared-value-function-policy/value-function-node/dense-layer/dense/kernel"]
)
test.test(("get_state_values", states), expected_outputs=dict(state_values=expected_state_value_output),
decimals=5)
# logits-values: One for each action-choice per item in the batch (simply take the remaining out nodes).
test.test(("get_state_values_logits_probabilities_log_probs", states, ["state_values", "logits"]),
expected_outputs=dict(state_values=expected_state_value_output, logits=expected_action_layer_outputs),
decimals=5)
# Parameter (probabilities). Softmaxed logits.
expected_probabilities_output = dict(
a=softmax(expected_action_layer_outputs["a"], axis=-1),
b=dict(
b1=softmax(expected_action_layer_outputs["b"]["b1"], axis=-1),
b2=softmax(expected_action_layer_outputs["b"]["b2"], axis=-1)
)
)
test.test(("get_logits_probabilities_log_probs", states, ["logits", "probabilities"]), expected_outputs=dict(
logits=expected_action_layer_outputs,
probabilities=expected_probabilities_output
), decimals=5)
print("Probs: {}".format(expected_probabilities_output))
# Action sample.
expected_actions = dict(
a=np.argmax(expected_action_layer_outputs["a"], axis=-1),
b=dict(
b1=np.argmax(expected_action_layer_outputs["b"]["b1"], axis=-1),
b2=np.argmax(expected_action_layer_outputs["b"]["b2"], axis=-1)
)
)
test.test(("get_action", states), expected_outputs=dict(action=expected_actions))
# Stochastic sample.
out = test.test(("get_stochastic_action", states), expected_outputs=None)
self.assertTrue(out["action"]["a"].dtype == np.int32)
self.assertTrue(out["action"]["a"].shape == (2,))
self.assertTrue(out["action"]["b"]["b1"].dtype == np.int32)
self.assertTrue(out["action"]["b"]["b1"].shape == (2,))
self.assertTrue(out["action"]["b"]["b2"].dtype == np.int32)
self.assertTrue(out["action"]["b"]["b2"].shape == (2,))
# Deterministic sample.
out = test.test(("get_deterministic_action", states), expected_outputs=None)
self.assertTrue(out["action"]["a"].dtype == np.int32)
self.assertTrue(out["action"]["a"].shape == (2,))
self.assertTrue(out["action"]["b"]["b1"].dtype == np.int32)
self.assertTrue(out["action"]["b"]["b1"].shape == (2,))
self.assertTrue(out["action"]["b"]["b2"].dtype == np.int32)
self.assertTrue(out["action"]["b"]["b2"].shape == (2,))
# Distribution's entropy.
out = test.test(("get_entropy", states), expected_outputs=None)
self.assertTrue(out["entropy"]["a"].dtype == np.float32)
self.assertTrue(out["entropy"]["a"].shape == (2,))
self.assertTrue(out["entropy"]["b"]["b1"].dtype == np.float32)
self.assertTrue(out["entropy"]["b"]["b1"].shape == (2,))
self.assertTrue(out["entropy"]["b"]["b2"].dtype == np.float32)
self.assertTrue(out["entropy"]["b"]["b2"].shape == (2,))
def test_shared_value_function_policy_for_discrete_container_action_space_with_time_rank_folding(self):
# state_space (NN is a simple single fc-layer relu network (2 units), random biases, random weights).
state_space = FloatBox(shape=(6,), add_batch_rank=True, add_time_rank=True)
# Action_space.
action_space = Tuple(
IntBox(2),
IntBox(3),
Dict(
a=IntBox(4),
),
add_batch_rank=True,
add_time_rank=True
)
flat_float_action_space = Tuple(
FloatBox(shape=(2,)),
FloatBox(shape=(3,)),
Dict(
a=FloatBox(shape=(4,)),
),
add_batch_rank=True,
add_time_rank=True
)
# Policy with baseline action adapter AND batch-apply over the entire policy (NN + ActionAdapter + distr.).
network_spec = config_from_path("configs/test_lrelu_nn.json")
network_spec["fold_time_rank"] = True
shared_value_function_policy = SharedValueFunctionPolicy(
network_spec=network_spec,
action_adapter_spec=dict(unfold_time_rank=True),
action_space=action_space,
value_unfold_time_rank=True
)
test = ComponentTest(
component=shared_value_function_policy,
input_spaces=dict(
nn_input=state_space,
actions=action_space,
probabilities=flat_float_action_space,
logits=flat_float_action_space
),
action_space=action_space,
)
policy_params = test.read_variable_values(shared_value_function_policy.variables)
base_scope = "shared-value-function-policy/action-adapter-"
# Some NN inputs.
states = state_space.sample(size=(2, 3))
states_folded = np.reshape(states, newshape=(6, 6))
# Raw NN-output (still folded).
expected_nn_output = relu(np.matmul(
states_folded, policy_params["shared-value-function-policy/test-network/hidden-layer/dense/kernel"]
), 0.1)
test.test(("get_nn_output", states), expected_outputs=dict(output=expected_nn_output), decimals=5)
# Raw action layer output; Expected shape=(3,3): 3=batch, 2=action categories + 1 state value
expected_action_layer_output = tuple([
np.matmul(expected_nn_output, policy_params[base_scope + "0/action-network/action-layer/dense/kernel"]),
np.matmul(expected_nn_output, policy_params[base_scope + "1/action-network/action-layer/dense/kernel"]),
dict(
a=np.matmul(expected_nn_output, policy_params[base_scope + "2/action-network/action-layer/dense/kernel"])
)
])
expected_action_layer_output_unfolded = tuple([
np.reshape(expected_action_layer_output[0], newshape=(2, 3, 2)),
np.reshape(expected_action_layer_output[1], newshape=(2, 3, 3)),
dict(
a=np.reshape(expected_action_layer_output[2]["a"], newshape=(2, 3, 4))
)
])
test.test(("get_action_layer_output", states),
expected_outputs=dict(output=expected_action_layer_output_unfolded),
decimals=5)
# State-values: One for each item in the batch.
expected_state_value_output = np.matmul(
expected_nn_output,
policy_params["shared-value-function-policy/value-function-node/dense-layer/dense/kernel"]
)
expected_state_value_output_unfolded = np.reshape(expected_state_value_output, newshape=(2, 3, 1))
test.test(("get_state_values", states),
expected_outputs=dict(state_values=expected_state_value_output_unfolded), decimals=5)
test.test(
("get_state_values_logits_probabilities_log_probs", states, ["state_values", "logits"]),
expected_outputs=dict(
state_values=expected_state_value_output_unfolded, logits=expected_action_layer_output_unfolded
), decimals=5
)
# Parameter (probabilities). Softmaxed logits.
expected_probabilities_output = tuple([
softmax(expected_action_layer_output_unfolded[0], axis=-1),
softmax(expected_action_layer_output_unfolded[1], axis=-1),
dict(
a=softmax(expected_action_layer_output_unfolded[2]["a"], axis=-1)
)
])
test.test(("get_logits_probabilities_log_probs", states, ["logits", "probabilities"]),
expected_outputs=dict(
logits=expected_action_layer_output_unfolded,
probabilities=expected_probabilities_output
), decimals=5)
print("Probs: {}".format(expected_probabilities_output))
expected_actions = tuple([
np.argmax(expected_action_layer_output_unfolded[0], axis=-1),
np.argmax(expected_action_layer_output_unfolded[1], axis=-1),
dict(
a=
|
np.argmax(expected_action_layer_output_unfolded[2]["a"], axis=-1)
|
numpy.argmax
|
# -*- coding: utf-8 -*-
"""
The module contains the Somoclu class that trains and visualizes
self-organizing maps and emergent self-organizing maps.
Created on Sun July 26 15:07:47 2015
@author: <NAME>
"""
from __future__ import division, print_function
import sys
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.collections as mcoll
import numpy as np
from scipy.spatial.distance import cdist
try:
import seaborn as sns
from sklearn.metrics.pairwise import pairwise_distances
have_heatmap = True
except ImportError:
have_heatmap = False
try:
from .somoclu_wrap import train as wrap_train
except ImportError:
print("Warning: the binary library cannot be imported. You cannot train "
"maps, but you can load and analyze ones that you have already saved.")
if sys.platform.startswith('win'):
print("If you installed Somoclu with pip on Windows, this typically "
"means missing DLLs. Please refer to the documentation.")
elif sys.platform.startswith('darwin'):
print("If you installed Somoclu with pip on macOS, this typically "
"means missing a linked library. If you compiled Somoclu with "
"GCC, please make sure you have set DYLD_LIBRARY_PATH to include "
"the GCC path. For more information, please refer to the "
"documentation.")
else:
print("The problem occurs because either compilation failed when you "
"installed Somoclu or a path is missing from the dependencies "
"when you are trying to import it. Please refer to the "
"documentation to see your options.")
def is_pos_real(s):
""" Returns True if s is a positive real.
"""
try:
return (float(s) > 0)
except ValueError:
return False
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param vect_distance: Optional parameter to specify the vector distance function:
* "euclidean": Euclidean (default)
* "norm-inf": infinite norm (max absolute distance among components)
* "norm-p": p-th root of sum of absolute differences ^ p (only supported by kerneltype 0)
:type vect_distance: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0, vect_distance="euclidean"):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._vect_distance = vect_distance
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix, self._vect_distance)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if not (self._vect_distance == "euclidean" or self._vect_distance == "norm-inf"
or (self._vect_distance[:5] == "norm-" and is_pos_real(self._vect_distance[5:]))):
raise Exception("Invalid parameter for vect_distance: " +
self._vect_distance)
if (self._vect_distance[:5] == "norm-" and self._kernel_type != 0):
raise Exception("Invalid parameter for vect_distance: " +
self._vect_distance + " when using kernel_type: " + self._kernel_type)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(
self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate(
(am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map, order='F'):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:param order: order of returned numpy array, 'F' for column-major
(Fortran-style) or 'C' for row-major (C-style).
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
if order == 'F':
return np.vstack((X, Y)).T
elif order == 'C':
return np.vstack((Y, X)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
def _check_cooling_parameters(radiuscooling, scalecooling):
"""Helper function to verify the cooling parameters of the training.
"""
if radiuscooling != "linear" and radiuscooling != "exponential":
raise Exception("Invalid parameter for radiuscooling: " +
radiuscooling)
if scalecooling != "linear" and scalecooling != "exponential":
raise Exception("Invalid parameter for scalecooling: " +
scalecooling)
def _hexplot(matrix, fig, colormap):
"""Internal function to plot a hexagonal map.
"""
umatrix_min = matrix.min()
umatrix_max = matrix.max()
n_rows, n_columns = matrix.shape
cmap = plt.get_cmap(colormap)
offsets = np.zeros((n_columns * n_rows, 2))
facecolors = []
for row in range(n_rows):
for col in range(n_columns):
if row % 2 == 0:
offsets[row * n_columns + col] = [col +
0.5, 2 * n_rows - 2 * row]
facecolors.append(cmap((matrix[row, col] - umatrix_min) /
(umatrix_max) * 255))
else:
offsets[row * n_columns + col] = [col, 2 * n_rows - 2 * row]
facecolors.append(cmap((matrix[row, col] - umatrix_min) /
(umatrix_max) * 255))
polygon = np.zeros((6, 2), float)
polygon[:, 0] = 1.1 * np.array([0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
polygon[:, 1] = 1.1 * np.array([-np.sqrt(3) / 6, np.sqrt(3) / 6,
np.sqrt(3) / 2 + np.sqrt(3) / 6,
np.sqrt(3) / 6, -np.sqrt(3) / 6,
-np.sqrt(3) / 2 - np.sqrt(3) / 6])
polygons = np.expand_dims(polygon, 0) +
|
np.expand_dims(offsets, 1)
|
numpy.expand_dims
|
# --------------
import numpy as np
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print(data.shape)
census=np.concatenate((data, new_record),axis = 0)
print(census.shape)
# --------------
#Code starts here
import numpy as np
age=census[:,0]
print(age)
max_age = np.max(age)
print(max_age)
min_age = np.min(age)
print(min_age)
age_mean =
|
np.mean(age)
|
numpy.mean
|
import numpy as np
import sys
import TrainNetwork.TN_BaseFunctions as bf
from sklearn.neighbors import NearestNeighbors
import skimage.measure as measure
import cv2
if sys.platform == 'darwin':
from mcdc import mcdc
else:
from DeepConcolic.src.mcdc import mcdc
class DetectionRefinement:
def __init__(self, input_image, compensatedImages, BackgroundSubtractionDetections, BackgroundSubtractionProperties, model_binary, aveImg_binary, model_regression, aveImg_regression, attack):
self.num_of_template = len(compensatedImages)
self.img_t = input_image
self.img_tminus1 = compensatedImages[self.num_of_template-1]
self.img_tminus2 = compensatedImages[self.num_of_template-2]
self.img_tminus3 = compensatedImages[self.num_of_template-3]
self.original_detections = BackgroundSubtractionDetections
self.detections = BackgroundSubtractionDetections
self.bgProperties = BackgroundSubtractionProperties
self.model_binary = model_binary
self.aveImg_binary = aveImg_binary
self.model_regression = model_regression
self.aveImg_regression = aveImg_regression
self.refinedDetectionsID = []
self.regressedDetectionID = []
self.refinementID=None
self.attack = attack
def doMovingVehicleRefinement(self):
img_shape = self.img_t.shape
width = img_shape[1]
height = img_shape[0]
num_points = len(self.original_detections)
X = np.ndarray((num_points, 4, 21, 21), dtype=np.float32)
mask1 = np.zeros(num_points, dtype=np.bool)
for i, thisdetection in enumerate(self.original_detections):
minx = np.int32(
|
np.round(thisdetection[0] - 10)
|
numpy.round
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
from model.config import cfg
from model.test import im_detect
from model.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import os, cv2
import argparse
from nets.vgg16 import vgg16
from nets.resnet_v1 import resnetv1
import random
import torch
import xml.etree.ElementTree as ET
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
def softmax(ary):
ary = ary.flatten()
expa = np.exp(ary)
dom = np.sum(expa)
return expa/dom
def choose_model(dir):
'''
get the latest model in in dir'''
lists = os.listdir(dir)
lists.sort(key= lambda fn:os.path.getmtime(os.path.join(dir,fn)))
return lists[-1]
def load_model(net_file ,path):
'''
return caffe.Net'''
import caffe
net = caffe.Net(net_file, path, caffe.TEST)
return net
def judge_y(score):
'''return :
y:np.array len(score)
'''
y=[]
for s in score:
if s==1 or
|
np.log(s)
|
numpy.log
|
'''
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
'''
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
#from scipy.optimize import minimize
## \max_u \mathrm{Cov}(u'X,Y1)^2 - \lambda\mathrm{Cov}(u'X,Y2)^2, s.t. u'u =1
def run(X,y1,y2,dmax=None):
D,N = X.shape
y1_unique,J1 = np.unique(y1, return_inverse=True)
ny1 = y1_unique.size
y2_unique,J2 = np.unique(y2, return_inverse=True)
ny2 = y2_unique.size
Y1 = np.zeros((ny1,N))
Y2 = np.zeros((ny2,N))
Y1[J1,range(N)] = 1.
Y2[J2,range(N)] = 1.
XY2 = np.dot(X,Y2.T) # D x ny2
XY2Y2X = np.dot(XY2,XY2.T) # D x D
XX = np.dot(X,X.T) # D x D
P = np.zeros((D,0))
Sj =
|
np.dot(X,Y1.T)
|
numpy.dot
|
"""
Credits:
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import unittest
import numpy as np
from datetime import date, timedelta
from eolearn.core import EOPatch, FeatureType
from eolearn.features import AddMaxMinNDVISlopeIndicesTask, AddMaxMinTemporalIndicesTask,\
AddSpatioTemporalFeaturesTask
def perdelta(start, end, delta):
curr = start
while curr < end:
yield curr
curr += delta
class TestTemporalFeaturesTasks(unittest.TestCase):
def test_temporal_indices(self):
""" Test case for computation of argmax/argmin of NDVI and another band
Cases with and without data masking are tested
"""
# EOPatch
eopatch = EOPatch()
t, h, w, c = 5, 3, 3, 2
# NDVI
ndvi_shape = (t, h, w, 1)
# VAlid data mask
valid_data = np.ones(ndvi_shape, bool)
valid_data[0] = 0
valid_data[-1] = 0
# Fill in eopatch
eopatch.add_feature(FeatureType.DATA, 'NDVI', np.arange(
|
np.prod(ndvi_shape)
|
numpy.prod
|
# Copyright (c) 2008,2015,2017,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `kinematics` module."""
from collections import namedtuple
import numpy as np
import pytest
import xarray as xr
from metpy.calc import (absolute_vorticity, advection, ageostrophic_wind, coriolis_parameter,
divergence, frontogenesis, geostrophic_wind, inertial_advective_wind,
lat_lon_grid_deltas, montgomery_streamfunction,
potential_temperature, potential_vorticity_baroclinic,
potential_vorticity_barotropic, q_vector, shearing_deformation,
static_stability, storm_relative_helicity, stretching_deformation,
total_deformation, vorticity, wind_components)
from metpy.constants import g, omega, Re
from metpy.testing import (assert_almost_equal, assert_array_almost_equal, assert_array_equal,
get_test_data)
from metpy.units import concatenate, units
def test_default_order():
"""Test using the default array ordering."""
u = np.ones((3, 3)) * units('m/s')
v = vorticity(u, u, 1 * units.meter, 1 * units.meter)
true_v = np.zeros_like(u) / units.sec
assert_array_equal(v, true_v)
def test_zero_vorticity():
"""Test vorticity calculation when zeros should be returned."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
v = vorticity(u, u.T, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_v = np.zeros_like(u) / units.sec
assert_array_equal(v, true_v)
def test_vorticity():
"""Test vorticity for simple case."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
v = vorticity(u, u, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_v = np.ones_like(u) / units.sec
assert_array_equal(v, true_v)
def test_vorticity_asym():
"""Test vorticity calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
vort = vorticity(u, v, 1 * units.meters, 2 * units.meters, dim_order='yx')
true_vort = np.array([[-2.5, 3.5, 13.], [8.5, -1.5, -11.], [-5.5, -1.5, 0.]]) / units.sec
assert_array_equal(vort, true_vort)
# Now try for xy ordered
vort = vorticity(u.T, v.T, 1 * units.meters, 2 * units.meters, dim_order='xy')
assert_array_equal(vort, true_vort.T)
def test_zero_divergence():
"""Test divergence calculation when zeros should be returned."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
c = divergence(u, u.T, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_c = 2. * np.ones_like(u) / units.sec
assert_array_equal(c, true_c)
def test_divergence():
"""Test divergence for simple case."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
c = divergence(u, u, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_c = np.ones_like(u) / units.sec
assert_array_equal(c, true_c)
def test_horizontal_divergence():
"""Test taking the horizontal divergence of a 3D field."""
u = np.array([[[1., 1., 1.],
[1., 0., 1.],
[1., 1., 1.]],
[[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]]]) * units('m/s')
c = divergence(u, u, 1 * units.meter, 1 * units.meter)
true_c = np.array([[[0., -2., 0.],
[-2., 0., 2.],
[0., 2., 0.]],
[[0., 2., 0.],
[2., 0., -2.],
[0., -2., 0.]]]) * units('s^-1')
assert_array_equal(c, true_c)
def test_divergence_asym():
"""Test divergence calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
c = divergence(u, v, 1 * units.meters, 2 * units.meters, dim_order='yx')
true_c = np.array([[-2, 5.5, -2.5], [2., 0.5, -1.5], [3., -1.5, 8.5]]) / units.sec
assert_array_equal(c, true_c)
# Now try for xy ordered
c = divergence(u.T, v.T, 1 * units.meters, 2 * units.meters, dim_order='xy')
assert_array_equal(c, true_c.T)
def test_shearing_deformation_asym():
"""Test shearing deformation calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
sh = shearing_deformation(u, v, 1 * units.meters, 2 * units.meters, dim_order='yx')
true_sh = np.array([[-7.5, -1.5, 1.], [9.5, -0.5, -11.], [1.5, 5.5, 12.]]) / units.sec
assert_array_equal(sh, true_sh)
# Now try for yx ordered
sh = shearing_deformation(u.T, v.T, 1 * units.meters, 2 * units.meters,
dim_order='xy')
assert_array_equal(sh, true_sh.T)
def test_stretching_deformation_asym():
"""Test stretching deformation calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
st = stretching_deformation(u, v, 1 * units.meters, 2 * units.meters, dim_order='yx')
true_st = np.array([[4., 0.5, 12.5], [4., 1.5, -0.5], [1., 5.5, -4.5]]) / units.sec
assert_array_equal(st, true_st)
# Now try for yx ordered
st = stretching_deformation(u.T, v.T, 1 * units.meters, 2 * units.meters,
dim_order='xy')
assert_array_equal(st, true_st.T)
def test_total_deformation_asym():
"""Test total deformation calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
tdef = total_deformation(u, v, 1 * units.meters, 2 * units.meters,
dim_order='yx')
true_tdef = np.array([[8.5, 1.58113883, 12.5399362], [10.30776406, 1.58113883, 11.0113578],
[1.80277562, 7.7781746, 12.8160056]]) / units.sec
assert_almost_equal(tdef, true_tdef)
# Now try for xy ordered
tdef = total_deformation(u.T, v.T, 1 * units.meters, 2 * units.meters,
dim_order='xy')
assert_almost_equal(tdef, true_tdef.T)
def test_frontogenesis_asym():
"""Test frontogensis calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
theta = np.array([[303, 295, 305], [308, 310, 312], [299, 293, 289]]) * units('K')
fronto = frontogenesis(theta, u, v, 1 * units.meters, 2 * units.meters,
dim_order='yx')
true_fronto = np.array([[-52.4746386, -37.3658646, -50.3996939],
[3.5777088, -2.1221867, -16.9941166],
[-23.1417334, 26.0499143, -158.4839684]]
) * units.K / units.meter / units.sec
assert_almost_equal(fronto, true_fronto)
# Now try for xy ordered
fronto = frontogenesis(theta.T, u.T, v.T, 1 * units.meters, 2 * units.meters,
dim_order='xy')
assert_almost_equal(fronto, true_fronto.T)
def test_advection_uniform():
"""Test advection calculation for a uniform 1D field."""
u = np.ones((3,)) * units('m/s')
s = np.ones_like(u) * units.kelvin
a = advection(s, u, (1 * units.meter,), dim_order='xy')
truth = np.zeros_like(u) * units('K/sec')
assert_array_equal(a, truth)
def test_advection_1d_uniform_wind():
"""Test advection for simple 1D case with uniform wind."""
u = np.ones((3,)) * units('m/s')
s = np.array([1, 2, 3]) * units('kg')
a = advection(s, u, (1 * units.meter,), dim_order='xy')
truth = -np.ones_like(u) * units('kg/sec')
assert_array_equal(a, truth)
def test_advection_1d():
"""Test advection calculation with varying wind and field."""
u = np.array([1, 2, 3]) * units('m/s')
s = np.array([1, 2, 3]) * units('Pa')
a = advection(s, u, (1 * units.meter,), dim_order='xy')
truth = np.array([-1, -2, -3]) * units('Pa/sec')
assert_array_equal(a, truth)
def test_advection_2d_uniform():
"""Test advection for uniform 2D field."""
u = np.ones((3, 3)) * units('m/s')
s = np.ones_like(u) * units.kelvin
a = advection(s, [u, u], (1 * units.meter, 1 * units.meter), dim_order='xy')
truth = np.zeros_like(u) * units('K/sec')
assert_array_equal(a, truth)
def test_advection_2d():
"""Test advection in varying 2D field."""
u = np.ones((3, 3)) * units('m/s')
v = 2 * np.ones((3, 3)) * units('m/s')
s = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) * units.kelvin
a = advection(s, [u, v], (1 * units.meter, 1 * units.meter), dim_order='xy')
truth = np.array([[-6, -4, 2], [-8, 0, 8], [-2, 4, 6]]) * units('K/sec')
assert_array_equal(a, truth)
def test_advection_2d_asym():
"""Test advection in asymmetric varying 2D field."""
u = np.arange(9).reshape(3, 3) * units('m/s')
v = 2 * u
s = np.array([[1, 2, 4], [4, 8, 4], [8, 6, 4]]) * units.kelvin
a = advection(s, [u, v], (2 * units.meter, 1 * units.meter), dim_order='yx')
truth = np.array([[0, -20.75, -2.5], [-33., -16., 20.], [-48, 91., 8]]) * units('K/sec')
assert_array_equal(a, truth)
# Now try xy ordered
a = advection(s.T, [u.T, v.T], (2 * units.meter, 1 * units.meter), dim_order='xy')
assert_array_equal(a, truth.T)
def test_geostrophic_wind():
"""Test geostrophic wind calculation with basic conditions."""
z = np.array([[48, 49, 48], [49, 50, 49], [48, 49, 48]]) * 100. * units.meter
# Using g as the value for f allows it to cancel out
ug, vg = geostrophic_wind(z, g.magnitude / units.sec,
100. * units.meter, 100. * units.meter, dim_order='xy')
true_u = np.array([[-2, 0, 2]] * 3) * units('m/s')
true_v = -true_u.T
assert_array_equal(ug, true_u)
assert_array_equal(vg, true_v)
def test_geostrophic_wind_asym():
"""Test geostrophic wind calculation with a complicated field."""
z = np.array([[1, 2, 4], [4, 8, 4], [8, 6, 4]]) * 200. * units.meter
# Using g as the value for f allows it to cancel out
ug, vg = geostrophic_wind(z, g.magnitude / units.sec,
200. * units.meter, 100. * units.meter, dim_order='yx')
true_u = -np.array([[5, 20, 0], [7, 4, 0], [9, -12, 0]]) * units('m/s')
true_v = np.array([[0.5, 1.5, 2.5], [8, 0, -8], [-2, -2, -2]]) * units('m/s')
assert_array_equal(ug, true_u)
assert_array_equal(vg, true_v)
# Now try for xy ordered
ug, vg = geostrophic_wind(z.T, g.magnitude / units.sec,
200. * units.meter, 100. * units.meter, dim_order='xy')
assert_array_equal(ug, true_u.T)
assert_array_equal(vg, true_v.T)
def test_geostrophic_geopotential():
"""Test geostrophic wind calculation with geopotential."""
z = np.array([[48, 49, 48], [49, 50, 49], [48, 49, 48]]) * 100. * units('m^2/s^2')
ug, vg = geostrophic_wind(z, 1 / units.sec, 100. * units.meter, 100. * units.meter,
dim_order='xy')
true_u = np.array([[-2, 0, 2]] * 3) * units('m/s')
true_v = -true_u.T
assert_array_equal(ug, true_u)
assert_array_equal(vg, true_v)
def test_geostrophic_3d():
"""Test geostrophic wind calculation with 3D array."""
z = np.array([[48, 49, 48], [49, 50, 49], [48, 49, 48]]) * 100.
# Using g as the value for f allows it to cancel out
z3d = np.dstack((z, z)) * units.meter
ug, vg = geostrophic_wind(z3d, g.magnitude / units.sec,
100. * units.meter, 100. * units.meter, dim_order='xy')
true_u = np.array([[-2, 0, 2]] * 3) * units('m/s')
true_v = -true_u.T
true_u = concatenate((true_u[..., None], true_u[..., None]), axis=2)
true_v = concatenate((true_v[..., None], true_v[..., None]), axis=2)
assert_array_equal(ug, true_u)
assert_array_equal(vg, true_v)
def test_geostrophic_gempak():
"""Test of geostrophic wind calculation against gempak values."""
z = np.array([[5586387.00, 5584467.50, 5583147.50],
[5594407.00, 5592487.50, 5591307.50],
[5604707.50, 5603247.50, 5602527.50]]).T \
* (9.80616 * units('m/s^2')) * 1e-3
dx = np.deg2rad(0.25) * Re * np.cos(np.deg2rad(44))
# Inverting dy since latitudes in array increase as you go up
dy = -np.deg2rad(0.25) * Re
f = (2 * omega * np.sin(np.deg2rad(44))).to('1/s')
ug, vg = geostrophic_wind(z * units.m, f, dx, dy, dim_order='xy')
true_u = np.array([[21.97512, 21.97512, 22.08005],
[31.89402, 32.69477, 33.73863],
[38.43922, 40.18805, 42.14609]])
true_v = np.array([[-10.93621, -7.83859, -4.54839],
[-10.74533, -7.50152, -3.24262],
[-8.66612, -5.27816, -1.45282]])
assert_almost_equal(ug[1, 1], true_u[1, 1] * units('m/s'), 2)
assert_almost_equal(vg[1, 1], true_v[1, 1] * units('m/s'), 2)
def test_no_ageostrophic_geopotential():
"""Test ageostrophic wind calculation with geopotential and no ageostrophic wind."""
z = np.array([[48, 49, 48], [49, 50, 49], [48, 49, 48]]) * 100. * units('m^2/s^2')
u = np.array([[-2, 0, 2]] * 3) * units('m/s')
v = -u.T
uag, vag = ageostrophic_wind(z, 1 / units.sec, 100. * units.meter, 100. * units.meter,
u, v, dim_order='xy')
true =
|
np.array([[0, 0, 0]] * 3)
|
numpy.array
|
import numpy as np
class PriorFactor(object):
def __init__(self, var, A, b, sigma):
"""
Initializes a prior Gaussian factor on a single variable as follows
exp^(|| A * x - b ||^2)
:param var: Variable corresponding to x
:param A: Linear transformation of Variable x
:param b: Prior
:param sigma: Noise
"""
assert (A.shape[0] == b.size), "Measurement not in transformation codomain"
assert (A.shape[1] == var.dim), "Variable not in transformation domain"
assert (sigma.size == b.size), "Measurement and sigma must be the same dimension"
self.var = var
self.A = A
self.b = b
self.sigma = sigma
class LinearFactor(object):
def __init__(self, head, tail, A1, A2, b, sigma):
"""
Initializes a linear Gaussain factor between two variables, modeled as follows
exp^(|| A1 * x1 - A2 * x2 - b ||^2)
:param head: Head Variable corresponding to x1
:param tail: Tail Variable corresponding to x2
:param A1: Linear transformation of Variable x1
:param A2: Linear transformation of Variable x2
:param b: Measurement vector
:param sigma: Measurement noise
"""
assert (A1.shape[0] == b.size), "Measurement not in head transformation codomain"
assert (A2.shape[0] == b.size), "Measurement not in tail transformation codomain"
assert (A1.shape[1] == head.dim), "Head Variable not in transformation domain"
assert (A2.shape[1] == tail.dim), "Tail Variable not in transformation domain"
assert (sigma.size == b.size), "Measurement and sigma must be the same dimension"
self.head = head
self.tail = tail
self.A1 = A1
self.A2 = A2
self.b = b
self.sigma = sigma
class OdometryFactor(LinearFactor):
def __init__(self, start, end, R, t, sigma):
"""
Odometry factors are linear Gaussian factors between pairs of position variables modeled as follows
exp^(|| p2 - p1 - R*t ||^2)
Note that the rotation R transforms t from the robot frame to a shared frame of reference.
This can be supplied using the Compass module.
:param start: Starting PointVariable
:param end: Ending PointVariable
:param R: Coordinate frame to express the displacement in
:param t: Displacement/translation vector
:param sigma: Odometry_noise
"""
t_ = np.dot(R, t)
if np.isscalar(t_):
t_ =
|
np.array([t_])
|
numpy.array
|
from abc import ABC, abstractmethod
import numpy as np
import scipy.signal
class smoother(ABC):
@abstractmethod
def smooth(self, y_true):
"""
Smooths a 1-D array of values.
Parameters:
-----------
y_truth : array
Actual values.
Returns:
--------
y_smoothed : array
Smoothed values.
"""
pass
def smooth_df(self, df, col, inplace=False, **kwargs):
"""
Similar to `smooth` method but accepts a DataFrame with a DatetimeIndex and handles NaNs.
Parameters
----------
df : DataFrame
A DataFrame with a DatetimeIndex.
col : str
The column containing the vallues to be smoothed.
inplace : bool
If True updates `df` by storing results in column `$(col)_smoothed`.
Returns
-------
A DataFrame with a `{$col}_smoothed` column if inplace=False, None otherwise.
"""
# don't modify input dataframe unless specified
if inplace:
df.sort_index(ascending=True, inplace=True)
else :
df = df.sort_index(ascending=True)
# find when data is available
idx = df.index[~df[col].isnull()]
# perform smoothing
df.loc[idx, col + '_smoothed'] = self.smooth(df.loc[idx, col].values, **kwargs)
if inplace:
return None
else:
return df
class geometric(smoother):
"""
A smoothing function that computes the geometric average of the current day with neighboring days,
possibly with unequal, but symmetrical weights.
Parameters
----------
weights : array > 0
The length of the array is the number of days in the future and past that will be used in smoothing.
The weights are normalised so they sum to 1. If unspecified defaults to [.5, .3, .2].
"""
def __init__(self, weights=None):
# set default values
if weights is None:
weights = [.5, .3, .2]
# check validity
weights = np.asarray(weights)
if weights.ndim != 1 or
|
np.any(weights < 0)
|
numpy.any
|
#!/usr/bin/env python
"""
.. module:: rrlmod
:platform: Unix
:synopsis: RRL model tools.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#__docformat__ = 'reStructuredText'
from __future__ import division
import os
import glob
import re
import pickle
import numpy as np
from scipy.constants import physical_constants as pc
import astropy.units as u
import astropy.constants as ac
from astropy.constants import h, k_B, c, m_e, Ryd, e
from astropy.modeling.blackbody import blackbody_nu
from crrlpy import frec_calc as fc
from crrlpy.crrls import natural_sort, f2n, n2f, load_ref
from crrlpy.utils import best_match_indx
LOCALDIR = os.path.dirname(os.path.realpath(__file__))
def beta(n, bn, te, line='RRL_CIalpha'):
"""
Computes the correction factor for stimulated emission.
:param n: principal quantum number.
:param bn: level population departure coefficient.
:param te: electron temperature.
"""
qns, freq = load_ref(line) # qns lists the final quantum numbers, nu->nl=qns.
nmin_idx = np.argmin(abs(qns - n.min()))
nmax_idx = np.argmin(abs(qns - n.max()))
freq = freq[nmin_idx:nmax_idx]*1e6 # Hz
h_ = pc['Planck constant'][0]*1e7
kboltzmann_ = pc['Boltzmann constant'][0]*1e7
hnukt = h_*freq/kboltzmann_/te
beta = (1. - bn[1:]/bn[:-1]*np.exp(-hnukt))/(1. - np.exp(-hnukt))
return beta
def bnbeta_approx(n, Te, ne, Tr):
"""
Approximates :math:`b_{n}\\beta_{n^{\\prime}n}`
for a particular set of conditions.
Uses Equation (B1) of Salas et al. (2016).
:param n: Principal quantum number
:type n: int
:param Te: Electron temperature in K.
:type Te: float
:param ne: Electron density per cubic centimeters.
:type ne: float
:param Tr: Temperature of the radiation field in K at 100 MHz.
:type Tr: float
:returns: The value of :math:`b_{n}\\beta_{n^{\\prime}n}` given an approximate expression.
:rtype: float
"""
bnbeta0 = load_betabn('5d1', 0.05,
other='case_diffuse_2d3')
bnbeta0 = bnbeta0[np.where(bnbeta0[:,0] == n), -1]
bnbeta = bnbeta0*(Te/50.)*np.power(ne/0.05, 0.5)*np.power(Tr/2000., -0.1)
return bnbeta
def bnbeta_approx_full(Te, ne, Tr, coefs):
"""
Approximates :math:`b_{n}\\beta_{n^{\\prime}n}` given a set of coefficients.
Uses Equations (5) and (B1)-(B5) of Salas et al. (2016).
"""
a0 = coefs[0] + coefs[1]*Tr + coefs[2]*np.power(Tr, 2.)
a1 = coefs[3] + coefs[4]*Tr
b0 = coefs[5] + coefs[6]*Tr + coefs[7]*np.power(Tr, 2.)
b1 = coefs[8] + coefs[9]*Tr + coefs[10]*np.power(Tr, 2.)
c0 = coefs[11] + coefs[12]*Tr + coefs[13]*np.power(Tr, 2.)
c1 = coefs[14] + coefs[15]*Tr + coefs[16]*np.power(Tr, 2.)
bnbeta = (a0 + a1*Te)/np.power((b0 + b1*Te)/ne + 1., c0 + c1*Te)
return bnbeta
def broken_plaw(nu, nu0, T0, alpha1, alpha2):
"""
Defines a broken power law.
.. math::
T(\\nu) = T_{0}\\left(\\dfrac{\\nu}{\\nu_{0}}\\right)^{\\alpha_1}\\mbox{ if }\\nu<\\nu_{0}
T(\\nu) = T_{0}\\left(\\dfrac{\\nu}{\\nu_{0}}\\right)^{\\alpha_2}\\mbox{ if }\\nu\\geq\\nu_{0}
:param nu: Frequency.
:param nu0: Frequency at which the power law breaks.
:param T0: Value of the power law at nu0.
:param alpha1: Index of the power law for nu<nu0.
:param alpha2: Index of the power law for nu>=nu0.
:returns: Broken power law evalueated at nu.
"""
low = plaw(nu, nu0, T0, alpha1) * (nu < nu0)
hgh = plaw(nu, nu0, T0, alpha2) * (nu >= nu0)
return low + hgh
def eta(freq, Te, ne, nion, Z, Tr, trans, n_max=1500):
"""
Returns the correction factor for the Planck function.
"""
kl = kappa_line_lte(Te, ne, nion, Z, Tr, trans, n_max=1500)
kc = kappa_cont(freq, Te, ne, nion, Z)
return (kc + kl*bni)/(kc + kl*bnf*bnfni)
def fnnp_app(n, dn):
"""
Eq. (1) Menzel (1969)
"""
return n*mdn(dn)*(1. + 1.5*dn/n)
def I_Bnu(specie, Z, n, Inu_funct, *args):
"""
Calculates the product :math:`B_{n+\\Delta n,n}I_{\\nu}` to \
compute the line broadening due to a radiation field :math:`I_{\\nu}`.
:param specie: Atomic specie to calculate for.
:type specie: string
:param n: Principal quantum number at which to evaluate :math:`\\dfrac{2}{\\pi}\\sum\\limits_{\\Delta n}B_{n+\\Delta n,n}I_{n+\\Delta n,n}(\\nu)`.
:type n: int or list
:param Inu_funct: Function to call and evaluate :math:`I_{n+\\Delta n,n}(\\nu)`. It's first argument must be the frequency.
:type Inu_funct: function
:param args: Arguments to `Inu_funct`. The frequency must be left out. \
The frequency will be passed internally in units of MHz. Use the same \
unit when required. `Inu_funct` must take the frequency as first parameter.
:returns: (Hz)
:rtype: array
:Example:
>>> I_Bnu('CI', 1., 500, I_broken_plaw, 800, 26*u.MHz.to('Hz'), -1., -2.6)
array([6.6554062])
"""
cte = 2.*np.pi**2.*e.gauss**2./(h.cgs.value*m_e.cgs.value*c.cgs.value**2.*Ryd.to('1/cm')*Z**2.)
try:
Inu = np.empty((len(n), 5))
BninfInu = np.empty((len(n), 5))
nu = np.empty((len(n), 5))
except TypeError:
Inu = np.empty((1, 5))
BninfInu = np.empty((1, 5))
nu = np.empty((1, 5))
for dn in range(1,6):
nu[:,dn-1] = n2f(n, specie+fc.set_trans(dn))
Inu[:,dn-1] = Inu_funct(nu[:,dn-1]*1e6, *args).cgs.value
BninfInu[:,dn-1] = cte.cgs.value*n**6./(dn*(n + dn)**2.)*mdn(dn)*Inu[:,dn-1]
return 2./np.pi*BninfInu.sum(axis=1)
def I_broken_plaw(nu, Tr, nu0, alpha1, alpha2):
"""
Returns the blackbody function evaluated at nu.
As temperature a broken power law is used.
The power law shape has parameters: Tr, nu0, alpha1 and alpha2.
:param nu: Frequency. (Hz) or astropy.units.Quantity_
:type nu: (Hz) or astropy.units.Quantity_
:param Tr: Temperature at nu0. (K) or astropy.units.Quantity_
:param nu0: Frequency at which the spectral index changes. (Hz) or astropy.units.Quantity_
:param alpha1: spectral index for :math:`\\nu<\\nu_0`
:param alpha2: spectral index for :math:`\\nu\\geq\\nu_0`
:returns: Specific intensity in :math:`\\rm{erg}\\,\\rm{cm}^{-2}\\,\\rm{Hz}^{-1}\\,\\rm{s}^{-1}\\,\\rm{sr}^{-1}`. See `astropy.analytic_functions.blackbody.blackbody_nu`__
:rtype: astropy.units.Quantity_
.. _astropy.units.Quantity: http://docs.astropy.org/en/stable/api/astropy.units.Quantity.html#astropy.units.Quantity
__ blackbody_
.. _blackbody: http://docs.astropy.org/en/stable/api/astropy.analytic_functions.blackbody.blackbody_nu.html#astropy.analytic_functions.blackbody.blackbody_nu
"""
Tbpl = broken_plaw(nu, nu0, Tr, alpha1, alpha2)
bnu_bpl = blackbody_nu(nu, Tbpl)
return bnu_bpl
def I_cont(nu, Te, tau, I0, unitless=False):
"""
Computes the specific intensity due to a blackbody at temperature :math:`T_{e}` and optical depth :math:`\\tau`. It considers that there is
background radiation with :math:`I_{0}`.
:param nu: Frequency.
:type nu: (Hz) or astropy.units.Quantity_
:param Te: Temperature of the source function. (K) or astropy.units.Quantity_
:param tau: Optical depth of the medium.
:param I0: Specific intensity of the background radiation. Must have units of erg / (cm2 Hz s sr) or see `unitless`.
:param unitless: If True the return
:returns: The specific intensity of a ray of light after traveling in an LTE \
medium with source function :math:`B_{\\nu}(T_{e})` after crossing an optical \
depth :math:`\\tau_{\\nu}`. The units are erg / (cm2 Hz s sr). See `astropy.analytic_functions.blackbody.blackbody_nu`__
__ blackbody_
.. _blackbody: http://docs.astropy.org/en/stable/api/astropy.analytic_functions.blackbody.blackbody_nu.html#astropy.analytic_functions.blackbody.blackbody_nu
"""
bnu = blackbody_nu(nu, Te)
if unitless:
bnu = bnu.cgs.value
return bnu*(1. - np.exp(-tau)) + I0*np.exp(-tau)
def I_external(nu, Tbkg, Tff, tau_ff, Tr, nu0=100e6*u.MHz, alpha=-2.6):
"""
This method is equivalent to the IDL routine
:param nu: Frequency. (Hz) or astropy.units.Quantity_
.. _astropy.units.Quantity: http://docs.astropy.org/en/stable/api/astropy.units.Quantity.html#astropy.units.Quantity
"""
if Tbkg.value != 0:
bnu_bkg = blackbody_nu(nu, Tbkg)
else:
bnu_bkg = 0
if Tff.value != 0:
bnu_ff = blackbody_nu(nu, Tff)
exp_ff = (1. - np.exp(-tau_ff))
else:
bnu_ff = 0
exp_ff = 0
if Tr.value != 0:
Tpl = plaw(nu, nu0, Tr, alpha)#Tr*np.power(nu/nu0, alpha)
bnu_pl = blackbody_nu(nu, Tpl)
else:
bnu_pl = 0
return bnu_bkg + bnu_ff*exp_ff + bnu_pl
def I_total(nu, Te, tau, I0, eta):
"""
"""
bnu = blackbody_nu(nu, Te)
exp = np.exp(-tau)
return bnu*eta*(1. - exp) + I0*exp
def itau(temp, dens, line, n_min=5, n_max=1000, other='', verbose=False, value='itau', location=LOCALDIR):
"""
Gives the integrated optical depth for a given temperature and density.
It assumes that the background radiation field dominates the continuum emission.
The emission measure is unity. The output units are Hz.
:param temp: Electron temperature. Must be a string of the form '8d1'.
:type temp: string
:param dens: Electron density.
:type dens: float
:param line: Line to load models for.
:type line: string
:param n_min: Minimum n value to include in the output. Default 1
:type n_min: int
:param n_max: Maximum n value to include in the output. Default 1500, Maximum allowed value 9900
:type n_max: int
:param other: String to search for different radiation fields and others.
:type other: string
:param verbose: Verbose output?
:type verbose: bool
:param value: ['itau'|'bbnMdn'|'None'] Value to output. itau will output the integrated optical depth. \
bbnMdn will output the :math:`\\beta_{n,n^{\\prime}}b_{n}` times the oscillator strenght :math:`M(\\Delta n)`. \
None will output the :math:`\\beta_{n,n^{\\prime}}b_{n}` values.
:type value: string
:returns: The principal quantum number and its asociated value.
"""
t = str2val(temp)
d = float(dens)
dn = fc.set_dn(line)
mdn_ = mdn(dn)
bbn = load_betabn(temp, dens, other, line, verbose, location=location)
nimin = best_match_indx(n_min, bbn[:,0])
nimax = best_match_indx(n_max, bbn[:,0])
n = bbn[nimin:nimax,0]
b = bbn[nimin:nimax,1]
if value == 'itau':
i = itau_norad(n, t, b, dn, mdn_)
elif value == 'bbnMdn':
i = b*dn*mdn_
else:
i = b
return n, i
def itau_h(temp, dens, trans, n_max=1000, other='', verbose=False, value='itau'):
"""
Gives the integrated optical depth for a given temperature and density.
The emission measure is unity. The output units are Hz.
"""
t = str2val(temp)
d = dens
dn = fc.set_dn(trans)
mdn_ = mdn(dn)
bbn = load_betabn_h(temp, dens, other, trans, verbose)
n = bbn[:,0]
b = bbn[:,1]
b = b[:n_max]
n = n[:n_max]
if value == 'itau':
#i = -1.069e7*dn*mdn*b*np.exp(1.58e5/(np.power(n, 2)*t))/np.power(t, 5./2.)
i = itau_norad(n, t, b, dn, mdn_)
elif value == 'bbnMdn':
i = b*dn*mdn_
else:
i = b
return n, i
def itau_norad(n, Te, b, dn, mdn_):
"""
Returns the optical depth using the approximate solution to the
radiative transfer problem.
"""
return -1.069e7*dn*mdn_*b*np.exp(1.58e5/(np.power(n, 2.)*Te))/np.power(Te, 5./2.)
def itau_lte(n, Te, dn, mdn_, em):
"""
Returns the CRRL optical depth integrated in velocity in units of Hz.
"""
return 1.069e7*dn*mdn_*np.exp(1.58e5/(np.power(n, 2.)*Te))/np.power(Te, 5./2.)*em
def j_line_lte(n, ne, nion, Te, Z, trans):
"""
"""
trans = fc.set_name(trans)
Aninf = np.loadtxt('{0}/rates/einstein_Anm_{1}.txt'.format(LOCALDIR, trans))
cte = h/4./np.pi
Nni = level_pop_lte(n, ne, nion, Te, Z)
lc = line_center(nu)
return cte*Aninf[:,2]*Nni*lc
def kappa_cont(freq, Te, ne, nion, Z):
"""
Computes the absorption coefficient for the free-free process.
"""
nu = freq.to('GHz').value
v = 0.65290+2./3.*np.log10(nu) - np.log10(Te.to('K').value)
kc = np.zeros(len(nu))
#mask_1 = v <= -2.6
mask_2 = (v > -5) & (v <= -0.25)
mask_3 = v > -0.25
#kc[mask_1] = 6.9391e-8*np.power(Z, 2)*ne*nion*np.power(nu[mask_1], -2.)* \
#np.power(1e4/Te.to('K').value, 1.5)* \
#(4.6950 - np.log10(Z) + 1.5*np.log(Te.to('K').value/1e4) - np.log10(nu[mask_1]))
v_2 = v[mask_2]
log10I_m0 = -1.232644*v_2 + 0.098747
kc[mask_2] = kappa_cont_base(nu[mask_2], Te.to('K').value, ne.cgs.value,
nion.cgs.value, Z)*np.power(10., log10I_m0)
v_3 = v[mask_3]
log10I_m0 = -1.084191*v_3 + 0.135860
kc[mask_3] = kappa_cont_base(nu[mask_3], Te.to('K').value, ne.cgs.value,
nion.cgs.value, Z)*np.power(10., log10I_m0)
#if v < -2.:
#kc = 6.9391e-8*np.power(Z, 2)*ne*nion*np.power(nu, -2.)*np.power(1e4/Te, 1.5)* \
#(4.6950 - np.log10(Z) + 1.5*np.log(Te/1e4) - np.log10(nu))
#elif v >= -2. and v <= -0.25:
#log10I_m0 = -1.232644*v + 0.098747
#elif v > -0.25:
#log10I_m0 = -1.084191*v + 0.135860
#if v >= -2.:
#kc = 4.6460/np.power(nu, 7./3.)/np.power(Te, 1.5)* \
#(np.exp(4.7993e-2*nu/Te) - 1.)* \
#np.exp(aliv/np.log10(np.exp(1)))#*np.exp(-h*freq/k_B/Te)
return kc*u.pc**-1*np.exp(-h.cgs.value*nu/k_B.cgs.value/Te.cgs.value)
def kappa_cont_base(nu, Te, ne, nion, Z):
"""
"""
return 4.6460/np.power(nu, 7./3.)/np.power(Te, 1.5)* \
(np.exp(4.7993e-2*nu/Te) - 1.)*np.power(Z, 8./3.)*ne*nion
def kappa_line(Te, ne, nion, Z, Tr, trans, n_max=1500):
"""
Computes the line absorption coefficient for CRRLs between levels :math:`n_{i}` and :math:`n_{f}`, :math:`n_{i}>n_{f}`.
This can only go up to :math:`n_{\\rm{max}}` 1500 because of the tables used for the Einstein Anm coefficients.
:param Te: Electron temperature of the gas. (K)
:type Te: float
:param ne: Electron density. (:math:`\\mbox{cm}^{-3}`)
:type ne: float
:param nion: Ion density. (:math:`\\mbox{cm}^{-3}`)
:type nion: float
:param Z: Electric charge of the atoms being considered.
:type Z: int
:param Tr: Temperature of the radiation field felt by the gas. This specifies the temperature of the field at 100 MHz. (K)
:type Tr: float
:param trans: Transition for which to compute the absorption coefficient.
:type trans: string
:param n_max: Maximum principal quantum number to include in the output.
:type n_max: int<1500
:returns:
:rtype: array
"""
cte = np.power(c, 2.)/(16.*np.pi)*np.power(np.power(h, 2)/(2.*np.pi*m_e*k_B), 3./2.)
bn = load_bn(val2str(Te), ne, other='case_diffuse_{0}'.format(val2str(Tr)))
bn = bn[:np.where(bn[:,0] == n_max)[0]]
Anfni = np.loadtxt('{0}/rates/einstein_Anm_{1}.txt'.format(LOCALDIR, trans))
# Cut the Einstein Amn coefficients table to match the bn values
i_bn_i = best_match_indx(bn[0,0], Anfni[:,1])
i_bn_f = best_match_indx(bn[-1,0], Anfni[:,0])
Anfni = Anfni[i_bn_i:i_bn_f+1]
ni = Anfni[:,0]
nf = Anfni[:,1]
omega_ni = 2*np.power(ni, 2)
omega_i = 1.
xi_ni = xi(ni, Te, Z)
xi_nf = xi(nf, Te, Z)
exp_ni = np.exp(xi_ni.value)
exp_nf = np.exp(xi_nf.value)
#print len(Anfni), len(bn[1:,-1]), len(bn[:-1,-1]), len(omega_ni[:]), len(ni), len(exp_ni), len(exp_nf)
kl = cte.value/np.power(Te, 3./2.)*ne*nion*Anfni[:,2]*omega_ni[:]/omega_i*(bn[1:,-1]*exp_ni - bn[:-1,-1]*exp_nf)
return kl
def kappa_line_lte(nu, Te, ne, nion, Z, Tr, line, n_min=1, n_max=1500):
"""
Returns the line absorption coefficient under LTE conditions.
:param nu: Frequency. (Hz)
:type nu: array
:param Te: Electron temperature of the gas. (K)
:type Te: float
:param ne: Electron density. (:math:`\\mbox{cm}^{-3}`)
:type ne: float
:param nion: Ion density. (:math:`\\mbox{cm}^{-3}`)
:type nion: float
:param Z: Electric charge of the atoms being considered.
:type Z: int
:param Tr: Temperature of the radiation field felt by the gas. This specifies the temperature of the field at 100 MHz. (K)
:type Tr: float
:param trans: Transition for which to compute the absorption coefficient.
:type trans: string
:param n_max: Maximum principal quantum number to include in the output.
:type n_max: int<1500
:returns:
:rtype: array
"""
ni = f2n(nu.to('MHz').value, line, n_max) + 1.
trans = fc.set_name(line)
cte = (np.power(c, 2.)/(8.*np.pi))
Aninf = np.loadtxt('{0}/rates/einstein_Anm_{1}.txt'.format(LOCALDIR, trans))
Aninf = Aninf[np.where(Aninf[:,1] == n_min)[0]:np.where(Aninf[:,1] == n_max)[0]]
exp = np.exp(-h*nu/k_B/Te)
Nni = level_pop_lte(ni, ne, nion, Te, Z)
return cte/np.power(nu, 2.)*Nni*Aninf[:,2]*(1. - exp)#*np.power(Aninf[:,0]/Aninf[:,1], 2.)
def level_pop_lte(n, ne, nion, Te, Z):
"""
Returns the level population of level n.
The return has units of :math:`\\mbox{cm}^{-3}`.
"""
omega_ni = 2.*np.power(n, 2.)
omega_i = 1.
xi_n = xi(n, Te, Z)
exp_xi_n = np.exp(xi_n.value)
Nn = ne*nion*np.power(np.power(h, 2.)/(2.*np.pi*m_e*k_B*Te), 1.5)*omega_ni/omega_i/2.*exp_xi_n
return Nn
def load_bn(te, ne, tr='', ncrit='1.5d3', n_min=5, n_max=1000, verbose=False, location=LOCALDIR):
"""
Loads the bn values from the CRRL models.
:param te: Electron temperature of the model.
:type te: string
:param ne: Electron density of the model.
:type ne: string
:param other: Radiation field of the model or any other string with model characteristics.
:type other: string
:param verbose: Verbose output?
:type verbose: bool
:returns: The :math:`b_{n}` value for the given model conditions.
:rtype: array
"""
#LOCALDIR = os.path.dirname(os.path.realpath(__file__))
if tr == '-' or tr == '' or tr == 0:
model_file = 'Carbon_opt_T_{0}_ne_{1}_ncrit_{2}_vriens_delta_500_vrinc_nmax_9900_dat'.format(te, ne, ncrit)
if verbose:
print("Loading {0}".format(model_file))
else:
model_file = 'Carbon_opt_T_{0}_ne_{1}_ncrit_{2}_{3}_vriens_delta_500_vrinc_nmax_9900_dat'.format(te, ne, ncrit, tr)
if verbose:
print("Loading {0}".format(model_file))
model_path = glob.glob('{0}/{1}'.format(location, model_file))[0]
if verbose:
print("Loaded {0}".format(model_path))
bn = np.loadtxt(model_path)
nimin = best_match_indx(n_min, bn[:,0])
nimax = best_match_indx(n_max, bn[:,0])
bn = bn[nimin:nimax+1]
return bn
def load_bn_h(te, ne, other='', n_min=5, n_max=1000, verbose=False):
"""
Loads the bn values from the HRRL models.
:param te: Electron temperature of the model.
:type te: string
:param ne: Electron density of the model.
:type ne: string
:param other: Radiation field of the model or any other string with model characteristics.
:type other: string
:param verbose: Verbose output?
:type verbose: bool
:returns: The :math:`b_{n}` value for the given model conditions.
:rtype: array
"""
#LOCALDIR = os.path.dirname(os.path.realpath(__file__))
if other == '-' or other == '':
mod_file = 'H_bn2/Hydrogen_opt_T_{1}_ne_{2}_ncrit_8d2_vriens_delta_500_vrinc_nmax_9900_dat'.format(LOCALDIR, te, ne)
if verbose:
print("Loading {0}".format(mod_file))
mod_file = glob.glob('{0}/H_bn2/Hydrogen_opt_T_{1}_ne_{2}*_ncrit_8d2_vriens_delta_500_vrinc_nmax_9900_dat'.format(LOCALDIR, te, ne))[0]
else:
mod_file = 'H_bn2/Hydrogen_opt_T_{1}_ne_{2}_ncrit_8d2_{3}_vriens_delta_500_vrinc_nmax_9900_dat'.format(LOCALDIR, te, ne, other)
if verbose:
print("Loading {0}".format(mod_file))
mod_file = glob.glob('{0}/H_bn2/Hydrogen_opt_T_{1}_ne_{2}*_ncrit_8d2_{3}_vriens_delta_500_vrinc_nmax_9900_dat'.format(LOCALDIR, te, ne, other))[0]
if verbose:
print("Loaded {0}".format(mod_file))
bn = np.loadtxt(mod_file)
nimin = best_match_indx(n_min, bn[:,0])
nimax = best_match_indx(n_max, bn[:,0])
bn = bn[nimin:nimax+1]
return bn
def load_bn_all(n_min=5, n_max=1000, verbose=False, location=LOCALDIR):
"""
"""
models = glob.glob('{0}/bn2/*_dat'.format(location))
natural_sort(models)
models =
|
np.asarray(models)
|
numpy.asarray
|
import numpy as np
import cv2
import glob
import itertools
import os
from tqdm import tqdm
from .frames_data import get_frame_and_ground_truth_crop, get_video_wise_list
from .standalone_IoU_model_libs import get_bounding_boxes
from ..data_utils.bounding_box_based_network_utils import get_im_patch
from ..models.config import IMAGE_ORDERING
from .augmentation import augment_seg, two_stream_augment_seg
from . import standalone_IoU_model_libs
import random
random.seed(0)
class_colors = [ ( random.randint(0,255),random.randint(0,255),random.randint(0,255) ) for _ in range(5000) ]
def get_pairs_from_paths( images_path , segs_path ):
images = glob.glob( os.path.join(images_path,"*.jpg") ) + glob.glob( os.path.join(images_path,"*.png") ) + glob.glob( os.path.join(images_path,"*.jpeg") )
segmentations = glob.glob( os.path.join(segs_path,"*.png") )
segmentations_d = dict( zip(segmentations,segmentations ))
ret = []
for im in images:
seg_bnme = os.path.basename(im).replace(".jpg" , ".png").replace(".jpeg" , ".png")
seg = os.path.join( segs_path , seg_bnme )
assert ( seg in segmentations_d ), (im + " is present in "+images_path +" but "+seg_bnme+" is not found in "+segs_path + " . Make sure annotation image are in .png" )
ret.append((im , seg) )
return ret
def get_pairs_from_paths_i3d(features_path, segs_path):
i3d_feature_paths = glob.glob(os.path.join(features_path, '*.npy'))
ret = []
for i3d_feature_path in i3d_feature_paths:
filename = os.path.basename(i3d_feature_path)
filename_wo_ext, _ = os.path.splitext(filename)
segmentation_path = os.path.join(segs_path, f'{filename_wo_ext}.png')
ret.append((i3d_feature_path, segmentation_path))
return ret
def two_stream_get_pairs_from_paths(images_path, flows_path, segs_path):
images = glob.glob(os.path.join(images_path, "*.jpg")) + glob.glob(os.path.join(images_path, "*.png")) + glob.glob(
os.path.join(images_path, "*.jpeg"))
segmentations = glob.glob(os.path.join(segs_path, "*.png"))
flows = glob.glob(os.path.join(flows_path, "*.png"))
segmentations_d = dict(zip(segmentations, segmentations))
flows_d = dict(zip(flows, flows))
ret = []
for im in images:
seg_bnme = os.path.basename(im).replace(".jpg", ".png").replace(".jpeg", ".png")
seg = os.path.join(segs_path, seg_bnme)
flw = os.path.join(flows_path, seg_bnme)
assert (seg in segmentations_d), (
im + " is present in " + images_path + " but " + seg_bnme + " is not found in " + segs_path + " . Make sure annotation image are in .png")
assert (flw in flows_d), (
im + " is present in " + images_path + " but " + seg_bnme + " is not found in " + flows_path + " . Make sure flow image are in .png")
ret.append((im, flw, seg))
return ret
def get_image_arr( path , width , height , imgNorm="sub_mean" , odering='channels_first' ):
if type( path ) is np.ndarray:
img = path
else:
img = cv2.imread(path, 1)
if imgNorm == "sub_and_divide":
img = np.float32(cv2.resize(img, ( width , height ))) / 127.5 - 1
elif imgNorm == "sub_mean":
img = cv2.resize(img, ( width , height ))
img = img.astype(np.float32)
img[:,:,0] -= 103.939
img[:,:,1] -= 116.779
img[:,:,2] -= 123.68
img = img[ : , : , ::-1 ]
elif imgNorm == "divide":
img = cv2.resize(img, ( width , height ))
img = img.astype(np.float32)
img = img/255.0
if odering == 'channels_first':
img = np.rollaxis(img, 2, 0)
return img
def get_segmentation_arr( path , nClasses , width , height , no_reshape=False ):
seg_labels = np.zeros(( height , width , nClasses ))
if type( path ) is np.ndarray:
img = path
else:
img = cv2.imread(path, 1)
img = cv2.resize(img, ( width , height ) , interpolation=cv2.INTER_NEAREST )
img = img[:, : , 0]
for c in range(nClasses):
seg_labels[: , : , c ] = (img == c ).astype(int)
if no_reshape:
return seg_labels
seg_labels = np.reshape(seg_labels, ( width*height , nClasses ))
return seg_labels
def verify_segmentation_dataset( images_path , segs_path , n_classes ):
img_seg_pairs = get_pairs_from_paths( images_path , segs_path )
assert len(img_seg_pairs)>0 , "Dataset looks empty or path is wrong "
for im_fn , seg_fn in tqdm(img_seg_pairs) :
img = cv2.imread( im_fn )
seg = cv2.imread( seg_fn )
assert ( img.shape[0]==seg.shape[0] and img.shape[1]==seg.shape[1] ) , "The size of image and the annotation does not match or they are corrupt "+ im_fn + " " + seg_fn
assert ( np.max(seg[:,:,0]) < n_classes) , "The pixel values of seg image should be from 0 to "+str(n_classes-1) + " . Found pixel value "+str(np.max(seg[:,:,0]))
print("Dataset verified! ")
def two_stream_verify_segmentation_dataset(images_path, flows_path, segs_path, n_classes):
img_seg_pairs = two_stream_get_pairs_from_paths(images_path, flows_path, segs_path)
assert len(img_seg_pairs) > 0, "Dataset looks empty or path is wrong "
for im_fn, flow_fn, seg_fn in tqdm(img_seg_pairs):
img = cv2.imread(im_fn)
flw = cv2.imread(flow_fn)
seg = cv2.imread(seg_fn)
assert (img.shape[0] == seg.shape[0] and img.shape[1] == seg.shape[
1]), "The size of image and the annotation does not match or they are corrupt " + im_fn + " " + seg_fn
assert (img.shape[0] == flw.shape[0] and img.shape[1] == flw.shape[
1]), "The size of image and the flow does not match or they are corrupt " + im_fn + " " + flow_fn
assert (np.max(seg[:, :, 0]) < n_classes), "The pixel values of seg image should be from 0 to " + str(
n_classes - 1) + " . Found pixel value " + str(np.max(seg[:, :, 0]))
print("Dataset verified! ")
def image_segmentation_generator( images_path , segs_path , batch_size, n_classes , input_height , input_width , output_height , output_width , do_augment=False ):
img_seg_pairs = get_pairs_from_paths( images_path , segs_path )
random.shuffle( img_seg_pairs )
zipped = itertools.cycle( img_seg_pairs )
while True:
X = []
Y = []
for _ in range(batch_size):
im, seg = next(zipped)
im = cv2.imread(im, 1)
seg = cv2.imread(seg, 1)
if do_augment:
img, seg[:, :, 0] = augment_seg(img, seg[:, :, 0])
X.append(get_image_arr(im, input_width, input_height, odering=IMAGE_ORDERING))
Y.append(get_segmentation_arr(seg, n_classes, output_width, output_height))
yield np.array(X), np.array(Y)
def image_segmentation_generator_i3d_inception(features_folder, segmentation_folder, batch_size, n_classes,
input_height, input_width, output_height, output_width):
feature_seg_pairs = get_pairs_from_paths_i3d(features_folder, segmentation_folder)
random.shuffle(feature_seg_pairs)
zipped = itertools.cycle(feature_seg_pairs)
while True:
X = []
Y = []
for _ in range(batch_size):
feature_volume, seg = next(zipped)
feature_volume = np.load(feature_volume)
seg = cv2.imread(seg, 1)
for frame_number in range(feature_volume.shape[0]):
feature_volume[frame_number] = get_image_arr(feature_volume[frame_number], input_width, input_height, odering=IMAGE_ORDERING)
X.append(feature_volume)
Y.append(get_segmentation_arr(seg, n_classes, output_width, output_height))
yield np.array(X), np.array(Y)
def image_segmentation_generator_with_weighted_output(images_path, segs_path, batch_size, n_classes, input_height,
input_width, output_height, output_width, do_augment=False):
img_seg_pairs = get_pairs_from_paths(images_path, segs_path)
random.shuffle(img_seg_pairs)
zipped = itertools.cycle(img_seg_pairs)
while True:
X = []
Y = []
for _ in range(batch_size):
im, seg = next(zipped)
im = cv2.imread(im, 1)
seg = cv2.imread(seg, 1)
if do_augment:
img, seg[:, :, 0] = augment_seg(im, seg[:, :, 0])
X.append(get_image_arr(im, input_width, input_height, odering=IMAGE_ORDERING))
Y.append(get_segmentation_arr(seg, n_classes, output_width, output_height))
yield np.array(X), {"main_output_activation": np.array(Y), "second_output_activation": np.array(Y)}
def image_segmentation_temporal_generator_with_weighted_output(images_path, segs_path, batch_size, n_classes,
input_height,
input_width, output_height, output_width,
do_augment=False):
frames_in_cuboid = 3
middle_number = 1
image_paths = glob.glob(os.path.join(images_path, '*.png')) + glob.glob(os.path.join(images_path, '*.jpg'))
video_wise = get_video_wise_list(image_paths)
video_wise = [video_wise[k] for k in video_wise.keys()]
random.shuffle(video_wise)
video_wise_paths = itertools.cycle(video_wise)
X = []
Y = []
while True:
video_frames = next(video_wise_paths)
for frame_number in range(len(video_frames) - frames_in_cuboid):
__frame_paths = video_frames[frame_number:frame_number + frames_in_cuboid]
__frames = [cv2.imread(__frame_path) for __frame_path in __frame_paths]
__fname = os.path.splitext(os.path.basename(__frame_paths[middle_number]))[0]
gt = cv2.imread(os.path.join(segs_path, __fname + '.png'))
frame_patches = [[] for i in range(9)]
for __frame in __frames:
patches = get_frame_and_ground_truth_crop(__frame)
for i in range(len(patches)):
frame_patches[i].append(
get_image_arr(patches[i], input_width, input_height, odering=IMAGE_ORDERING))
frame_patches = [np.array(frame_patch) for frame_patch in frame_patches]
gt_patches = get_frame_and_ground_truth_crop(gt)
for frame_patch, gt_patch in zip(frame_patches, gt_patches):
X.append(frame_patch)
Y.append(get_segmentation_arr(gt_patch, n_classes, output_width, output_height))
if len(X) == batch_size:
yield np.array(X), {"main_output_activation": np.array(Y), "second_output_activation": np.array(Y)}
X = []
Y = []
def image_segmentation_generator_bounding_box_based_network(images_path, segs_path, batch_size, n_classes, input_height,
input_width, output_height, output_width, do_augment=False):
img_seg_pairs = get_pairs_from_paths(images_path, segs_path)
random.shuffle(img_seg_pairs)
zipped = itertools.cycle(img_seg_pairs)
X = []
Y = []
while True:
im, seg = next(zipped)
im = cv2.imread(im, 1)
seg = cv2.imread(seg, 1)
# get height and width of the image
height, width, _ = seg.shape
# get bounding boxes
bounding_boxes = get_bounding_boxes(seg)
for bounding_box in bounding_boxes:
# get coords
x1 = bounding_box['x1']
x2 = bounding_box['x2']
y1 = bounding_box['y1']
y2 = bounding_box['y2']
w = x2 - x1
h = y2 - y1
bbox_coords = (x1, y1, w, h)
# get patch from image
im_patch, _ = get_im_patch(im, bounding_box)
X.append(get_image_arr(im_patch, input_width, input_height, odering=IMAGE_ORDERING))
Y.append(bbox_coords)
# check if batch is filled
if len(X) == batch_size or len(Y) == batch_size:
yield np.array(X), np.array(Y)
X = []
Y = []
def image_segmentation_generator_bounding_box_iou_based_network(images_path, segs_path, batch_size, n_classes, input_height,
input_width, output_height, output_width, do_augment=False):
img_seg_pairs = get_pairs_from_paths(images_path, segs_path)
random.shuffle(img_seg_pairs)
zipped = itertools.cycle(img_seg_pairs)
X = []
Y = []
while True:
im, seg = next(zipped)
im = cv2.imread(im, 1)
seg = cv2.imread(seg, 1)
# get height and width of the image
height, width, _ = seg.shape
# get bounding boxes
bounding_boxes = get_bounding_boxes(seg)
for bounding_box in bounding_boxes:
# get coords
x1 = bounding_box['x1']
x2 = bounding_box['x2']
y1 = bounding_box['y1']
y2 = bounding_box['y2']
w = x2 - x1
h = y2 - y1
bbox_coords = (x1, y1, w, h)
# get patch from image
im_patch, _ = get_im_patch(im, bounding_box)
X.append(get_image_arr(im_patch, input_width, input_height, odering=IMAGE_ORDERING))
Y.append(bbox_coords)
# check if batch is filled
if len(X) == batch_size or len(Y) == batch_size:
yield np.array(X), np.array(Y).astype(np.float64)
X = []
Y = []
def IoU_network_image_segmentation_generator(images_path, segs_path, batch_size, n_classes, input_height, input_width,
output_height, output_width, do_augment=False):
img_seg_pairs = get_pairs_from_paths(images_path, segs_path)
random.shuffle(img_seg_pairs)
zipped = itertools.cycle(img_seg_pairs)
X = []
Y = []
while True:
im, seg = next(zipped)
im = cv2.imread(im, 1)
seg = cv2.imread(seg, 1)
bounding_boxes = standalone_IoU_model_libs.get_bounding_boxes(seg)
# neglecting any image that has more than one bounding box
if len(bounding_boxes) > 1:
for bounding_box in bounding_boxes:
for new_mask, iou_score, bbox_coords in standalone_IoU_model_libs.generate_augmentations(seg,
bounding_box,
100, 0.1):
if do_augment:
im, seg[:, :, 0] = augment_seg(im, seg[:, :, 0])
im_patch = standalone_IoU_model_libs.get_patch_of_image_with_bounding_box_coords(im, bbox_coords)
X.append(get_image_arr(im_patch, input_width, input_height, odering=IMAGE_ORDERING))
# Y.append(get_segmentation_arr(seg, n_classes, output_width, output_height))
Y.append(iou_score)
if len(X) == batch_size:
yield np.array(X), np.array(Y)
X = []
Y = []
else:
if do_augment:
im, seg[:, :, 0] = augment_seg(im, seg[:, :, 0])
X.append(get_image_arr(im, input_width, input_height, odering=IMAGE_ORDERING))
# Y.append(get_segmentation_arr(seg, n_classes, output_width, output_height))
Y.append(0)
if len(X) == batch_size:
yield np.array(X), np.array(Y)
X = []
Y = []
def image_segmentation_generator_i3d(images_path, segs_path, batch_size, n_classes, input_height, input_width,
output_height, output_width, do_augment=False):
img_seg_pairs = get_pairs_from_paths_i3d(images_path, segs_path)
random.shuffle(img_seg_pairs)
zipped = itertools.cycle(img_seg_pairs)
while True:
X = []
Y = []
for _ in range(batch_size):
im, seg = next(zipped)
im = np.load(im)
seg = cv2.imread(seg, 1)
# if do_augment:
# img, seg[:, :, 0] = augment_seg(img, seg[:, :, 0])
X.append(im)
Y.append(get_segmentation_arr(seg, n_classes, output_width, output_height))
yield np.array(X), {"main_output_activation":
|
np.array(Y)
|
numpy.array
|
import numpy as np
import os
from data_cluster import load_cluster_labels, attach_cluster_id_arr_manual
from data_rowreduce import prune_rows
from data_settings import DATADIR, PIPELINES_VALID, CELLSTOCLUSTERS_2018SCMCA, NPZ_2018SCMCA_MEMS, NPZ_2018SCMCA_ORIG, \
NPZ_2018SCMCA_ORIG_WITHCLUSTER, NPZ_2014MEHTA_ORIG, PIPELINES_DIRS
from data_standardize import save_npz_of_arr_genes_cells, load_npz_of_arr_genes_cells
"""
Purpose: process standardized expression data (i.e. converted to npz of arr, genes, cells)
- cluster data, or load in clustered results and attach it to first row of gene, expression in the npz
- save clustered raw data in standard npz format (npz of arr, genes, cells)
- convert raw data into "cluster dict": dictionary that maps cluster data to submatrix of genes x cells
- binarize data within each cluster dict
- create binarized cluster dict
- from binarized cluster dict: create "memory" / "cell type" matrix (get representative column from each cluster)
- save memory matrix in standard npz format (npz of mems, genes, types)
- reduce row number with various pruning techniques
- save total row reduction in file "removed_rows.txt"
- save reduced memory matrix in standard npz format (npz of mems, genes, types)
- use "removed_rows.txt" to delete rows of original raw data
- save reduced clustered raw data in standard npz format (npz of arr, genes, cells)
- save reduced unclustered raw data in standard npz format (npz of arr, genes, cells)
Main output:
- reduced memory matrix is used as input to singlecell module
"""
# TODO pass metadata to all functions?
# TODO test and optimize build_basin_states
# TODO build remaining functions + unit tests
# TODO have report script which stores all processing flags/choices/order
# TODO maybe have rundir for results of each proc run
# TODO how to save cluster dict? as npz?
def binarize_data(xi):
return 1.0 * np.where(xi > 0, 1, -1) # mult by 1.0 to cast as float
def binarize_cluster_dict(cluster_dict, metadata, binarize_method="by_gene", savedir=None):
"""
Args:
- cluster_dict: {k: N x M array for k in 0 ... K-1 (i.e. cluster index)}
- binarize_method: options for different binarization methods: by_cluster or by_gene (default)
- savedir: dir to save cluster_dict
Returns:
- binarized_cluster_dict: {k: N x M array for k in 0 ... K-1 (i.e. cluster index)}
"""
assert binarize_method in ['by_cluster', 'by_gene']
num_clusters = metadata['num_clusters']
print(num_clusters, np.max(list(cluster_dict.keys())), cluster_dict[0].shape)
binarize_cluster_dict = {}
if binarize_method == 'by_gene':
for k in range(num_clusters):
cluster_data = cluster_dict[k]
min_gene_vals = np.amin(cluster_data, axis=1) # min value each gene has over all cells in the cluster
max_gene_vals = np.amax(cluster_data, axis=1)
mids = 0.5 * (min_gene_vals - max_gene_vals)
# TODO vectorize this
binarized_cluster = np.zeros(cluster_data.shape, dtype=np.int8)
for idx in range(cluster_data.shape[0]):
binarized_cluster[idx,:] = np.where(cluster_data[idx,:] > mids[idx], 1.0, -1.0) # mult by 1.0 to cast as float
binarize_cluster_dict[k] = binarized_cluster
else:
print("WARNING: binarize_method by_cluster is not stable (data too sparse)")
for k in range(num_clusters):
cluster_data = cluster_dict[k]
min_val = np.min(cluster_data)
max_val = np.max(cluster_data)
mid = 0.5 * (max_val - min_val)
binarized_cluster = 1.0 * np.where(cluster_data > mid, 1, -1) # mult by 1.0 to cast as float
binarized_cluster.astype(np.int8)
binarize_cluster_dict[k] = binarized_cluster
# save cluster_dict
if savedir is not None:
cdnpz = savedir + os.sep + 'clusterdict_boolean_compressed.npz'
save_cluster_dict(cdnpz, binarize_cluster_dict)
return binarize_cluster_dict
def binary_cluster_dict_to_memories(binarized_cluster_dict, gene_labels, memory_method="default", savedir=None):
"""
Args:
- binarized_cluster_dict: {k: N x M array for k in 0 ... K-1 (i.e. cluster index)}
- gene_labels: N x 1 array of 'gene_labels' for each row
- memory_method: options for different memory processing algos
- savedir: where to save the memory file (None -> don't save)
Returns:
- memory_array: i.e. xi matrix, will be N x K (one memory from each cluster)
"""
if gene_labels[0] == 'cluster_id':
print("Warning: gene_labels[0] == 'cluster_id', removing first element")
gene_labels = gene_labels[1:]
num_genes = len(gene_labels)
num_clusters = len(list(binarized_cluster_dict.keys()))
print("num_genes", num_genes)
eps = 1e-4 # used to bias the np.sign(call) to be either 1 or -1 (breaks ties towards on state)
memory_array = np.zeros((num_genes, num_clusters))
for k in range(num_clusters):
cluster_arr = binarized_cluster_dict[k]
cluster_arr_rowsum = np.sum(cluster_arr, axis=1)
memory_vec = np.sign(cluster_arr_rowsum + eps)
memory_array[:,k] = memory_vec
if savedir is not None:
npzpath = savedir + os.sep + 'mems_genes_types_compressed.npz'
store_memories_genes_clusters(npzpath, memory_array, np.array(gene_labels))
return memory_array
def store_memories_genes_clusters(npzpath, mem_arr, genes):
# TODO move cluster labels to metadata with gene labels, pass to this function
cluster_id = load_cluster_labels(DATADIR + os.sep + '2018_scMCA' + os.sep + 'SI_cluster_labels.csv')
clusters = np.array([cluster_id[idx] for idx in range(len(list(cluster_id.keys())))])
save_npz_of_arr_genes_cells(npzpath, mem_arr, genes, clusters)
return
def load_memories_genes_clusters(npzpath):
mem_arr, genes, clusters = load_npz_of_arr_genes_cells(npzpath, verbose=False)
return mem_arr, genes, clusters
def prune_memories_genes(npzpath):
rows_to_delete, mem_arr, genes, clusters = prune_rows(npzpath, save_pruned=True, save_rows=True)
return rows_to_delete, mem_arr, genes, clusters
def prune_cluster_dict(cluster_dict, rows_to_delete, savedir=None):
"""
Args:
- cluster_dict: {k: N x M array for k in 0 ... K-1 (i.e. cluster index)}
- rows_to_delete: rows to delete from each array (val) in cluster_dict
- savedir: where to save the memory file (None -> don't save)
"""
pruned_cluster_dict = {k: 0 for k in list(cluster_dict.keys())}
for k in range(len(list(cluster_dict.keys()))):
cluster_data = cluster_dict[k]
pruned_cluster_dict[k] = np.delete(cluster_data, rows_to_delete, axis=0)
# save pruned_cluster_dict
if savedir is not None:
cdnpz = savedir + os.sep + 'clusterdict_boolean_compressed_pruned.npz'
save_cluster_dict(cdnpz, pruned_cluster_dict)
return pruned_cluster_dict
def save_cluster_dict(npzpath, cluster_dict):
# convert int keys to str (and deconvert on loading)
print("saving cluster dict at %s..." % npzpath)
cluster_dict = {str(k):v for k,v in cluster_dict.items()}
np.savez_compressed(npzpath, **cluster_dict)
print("done saving cluster dict")
return
def load_cluster_dict(npzpath):
print("loading cluster dict at %s..." % npzpath)
cluster_dict =
|
np.load(npzpath)
|
numpy.load
|
from abc import abstractmethod
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
import csv
# ********* Geometric *********
class Shape:
"""
An abstract class for geometric shapes defining some key methods required
"""
@abstractmethod
def get_perimeter(self, start, end, num_points):
"""
Create a list of points between the user defined start and end positions on the perimeter of the shape
:param start: Position at which the list of points should begin
:type start: float
:param end: Position at which the list of points should end
:type end: float
:param num_points: Number of points
:type num_points: int
:return: A list of points (x,y) evenly spaced on the perimeter of the shape between the start and end positions
:rtype: numpy.ndarray
"""
pass
@abstractmethod
def get_grid(self, spacing):
"""
Create a grid of points spaced uniformly across the shape
:param spacing: Spacing between points in the grid
:type spacing: float
:return: A list of points (x,y) uniformly space across the shape
:rtype: numpy.ndarray
"""
pass
@abstractmethod
def is_point_inside(self, point):
"""
Check whether or not a point is inside the shape
:param point: list/tuple of the coordinates (x, y) of a point
:type point: list
:return: A bool stating whether or not the point is within the shape
:rtype: bool
"""
pass
class Circle(Shape):
"""
A geometric class for a circle
Attributes
----------
centre : list
A list of coordinates (x, y) describing the centre of the circle
radius : float
The radius of the circle
"""
def __init__(self, centre, radius):
"""
Creates a circle
:param centre: The coordinates (x,y) of centre of the circle
:type centre: list
:param radius: The radius of the circle
:type radius: float
:rtype: Circle
"""
self._centre = centre
self._radius = radius
@property
def centre(self):
"""
A list of coordinates (x, y) describing the centre of the circle
:return: (x, y) of the centre of the circle
:rtype: list
"""
return self._centre
@property
def radius(self):
"""
The radius of the circle
:return: The radius
:rtype: float
"""
return self._radius
def get_circular_points(self, start_angle, end_angle, num_points, radius, decimal_places=None):
"""
Create a list of points between the user defined start and end angles (in degrees) on the perimeter of a new circle sharing
the centre point of this circle with a different radius
:param start_angle: Position at which the list of points should begin
:type start_angle: float
:param end_angle: Position at which the list of points should end
:type end_angle: float
:param num_points: Number of points
:type num_points: int
:param radius: Radius of the circle on which the points are placed
:type radius: float
:param decimal_places: Number of decimal places the coordinates are returned with - None: there is no rounding
:type decimal_places: int
:return: An array of points (x,y) evenly spaced on the perimeter of the new circle between the start and end angles
:rtype: numpy.ndarray
"""
points = np.zeros((num_points, 2), float)
full_angle = 180 - abs(abs(end_angle - start_angle) - 180)
if full_angle == 0:
full_angle = 360
delta_angle = full_angle / num_points
for i in range(num_points):
points[i][0] = self._centre[0] + np.cos(np.radians(90 + start_angle + delta_angle * i)) * radius
points[i][1] = self._centre[1] + np.sin(np.radians(90 + start_angle + delta_angle * i)) * radius
if decimal_places is not None:
return np.array(np.around(points, decimal_places))
else:
return np.array(points)
def get_perimeter(self, start_angle, end_angle, num_points, decimal_places=None):
"""
Create a list of points between the user defined start and end angles on the perimeter of the circle
:param start_angle: Position at which the list of points should begin
:type start_angle: float
:param end_angle: Position at which the list of points should end
:type end_angle: float
:param num_points: Number of points
:type num_points: int
:param decimal_places: Number of decimal places the coordinates are returned with - None: there is no rounding
:type decimal_places: int
:return: A list of points (x,y) evenly spaced on the perimeter of the shape between the start and end angles
:rtype: numpy.ndarray
"""
return np.array(self.get_circular_points(start_angle, end_angle, num_points, self._radius, decimal_places))
def get_grid(self, spacing, alpha=2):
"""
Create a grid of points spaced uniformly across the circle using the sunflower seed arrangement algorithm
:param spacing: Approximate spacing between points in the grid
:type spacing: float
:param alpha: Determines the evenness of the boundary - 0 is jagged, 2 is smooth. Above 2 is not recommended
:type alpha: float
:return: A list of points (x,y) uniformly spaced across the circle
:rtype: numpy.ndarray
"""
# Algorithm is found at the stack overflow thread linked below:
# https://stackoverflow.com/questions/28567166/uniformly-distribute-x-points-inside-a-circle
# Calculates the number of points (n) from the spacing
area = np.pi * self._radius**2
n = int(area / spacing**2)
points = np.zeros((n, 2), float)
b = int(alpha * np.sqrt(n)) # number of boundary points
golden_ratio = (np.sqrt(5) + 1) / 2
for point in range(1, n + 1):
if point > n - b:
r = 1
else:
r = np.sqrt(point - 1 / 2) / np.sqrt(n - (b + 1) / 2)
theta = 2 * np.pi * point / golden_ratio**2
points[point - 1][0] = self._centre[0] + r*np.cos(theta) * self._radius
points[point - 1][1] = self._centre[1] + r*np.sin(theta) * self._radius
return np.array(points)
def is_point_inside(self, point):
"""
Check whether or not a point is inside the circle
:param point: List/tuple of the coordinates (x, y) of a point
:type point: list
:return: A bool stating whether or not the point is within the circle
:rtype: bool
"""
# checks if the distance from the centre of the circle to the point, d, is less than or equal to the radius
d = np.sqrt((point[0] - self.centre[0])**2 + (point[1] - self.centre[1])**2)
return d <= self.radius
class Rectangle(Shape):
"""
A geometric class for a rectangle
Attributes
----------
coordinate : list
A list of coordinates (x, y) describing the centre or bottom left of the rectangle
width : float
The width of the rectangle
height : float
The height of the rectangle
coordinate_pos : str
Describes the position of the coordinate parameter - either "centre" or "bottom left"
"""
def __init__(self, coordinate, width, height, coordinate_pos="bottom left"):
"""
Creates a rectangle
:param coordinate: A list of coordinates (x, y) describing the centre or bottom left of the rectangle
:type coordinate: list
:param width: The width of the rectangle
:type width: float
:param height: The height of the rectangle
:type height: float
:param coordinate_pos: Description of the position of the coordinate - "centre" or "bottom left" of the rectangle
:type coordinate_pos: str
:rtype: Rectangle
"""
if coordinate_pos == 'centre':
self._xy = [coordinate[0] - width / 2, coordinate[1] - height / 2]
elif coordinate_pos == 'bottom left':
self._xy = coordinate
else:
print("coordinate_pos must be in \"centre\" or \"bottom left\"")
quit(1)
self._width = width
self._height = height
@property
def xy(self):
"""
A list of coordinates (x, y) describing the bottom left of the rectangle
:return: (x, y) of the bottom left of the rectangle
:rtype: list
"""
return self._xy
@property
def width(self):
"""
The width of the rectangle
:return: The width
:rtype: float
"""
return self._width
@property
def height(self):
"""
The height of the rectangle
:return: The height
:rtype: float
"""
return self._height
def get_perimeter(self, start_point, end_point, num_points):
pass
def get_grid(self, spacing):
"""
Create a grid of points spaced uniformly across the rectangle
:param spacing: Approximate spacing between points in the grid
:type spacing: float
:return: A list of points (x,y) uniformly spaced across the rectangle
:rtype: numpy.ndarray
"""
num_x = int(np.floor(self._width / spacing)) + 1
num_y = int(np.floor(self._height / spacing)) + 1
num_points = int(num_x * num_y)
points = np.zeros((num_points, 2), float)
for x in range(num_x):
for y in range(num_y):
points[y * num_x + x][0] = self._xy[0] + x * spacing
points[y * num_x + x][1] = self._xy[1] + y * spacing
return np.array(points)
def is_point_inside(self, point):
"""
Check whether or not a point is inside the rectangle
:param point: list/tuple of the coordinates (x, y) of a point
:type point: tuple
:return: A bool stating whether or not the point is within the rectangle
:rtype: bool
"""
# checks that the x and y distance between the point and the bottom_left of the rectangle is less than the
# width and height
dx = abs(point[0] - self._xy[0]) + abs(self._xy[0] + self._width - point[0])
dy = abs(point[1] - self._xy[1]) + abs(self._xy[1] + self._height - point[1])
return dy <= self._height and dx <= self._width
# ********* PyZones specific setup classes *********
class Zone(Circle):
"""
A sound zone to be used in setup of the soundfield's geometry
Attributes
----------
centre : list
A list of coordinates (x, y) describing the centre of the circle
radius : float
The radius of the circle
colour : list
A list of float values (r, g, b)
"""
def __init__(self, centre, radius, colour=None):
"""
Creates a sound zone
:param centre: A list of coordinates (x, y) describing the centre of the circle
:type centre: list
:param radius: The radius of the circle
:type radius: float
:param colour: A list of float values (r, g, b) - None results in black (0, 0, 0)
:type colour: list
:rtype: Zone
"""
if colour is None:
self._colour = [0, 0, 0]
else:
self._colour = colour
Circle.__init__(self, centre, radius)
@property
def colour(self):
"""
A list of float values (r, g, b)
:return: A list of float values (r, g, b)
:rtype: list
"""
return self._colour
class Soundfield(Rectangle):
"""
The soundfield being used in the simulation. Can be thought of as the room, however no room reflections are modelled
Attributes
----------
_zones : list
A list of the zones used in the simulation.
_fig : float
The figure from the matplotlib.pyplot
_axes : float
The axes from the matplotlib.pyplot
"""
def __init__(self, coordinate, width, height, coordinate_pos="bottom left"):
"""
Creates a soundfield to be used for simulations. This class is exclusively for the graphics and visualisations
:param coordinate: A list of coordinates (x, y) describing the centre or bottom left of the rectangle
:type coordinate: list
:param width: The width of the rectangle
:type width: float
:param height: The height of the rectangle
:type height: float
:param coordinate_pos: The position of the coordinate - "centre" or "bottom left" of the rectangle
:type coordinate_pos: str
:rtype: Soundfield
"""
Rectangle.__init__(self, coordinate, width, height, coordinate_pos=coordinate_pos)
self._zones = []
self._fig = plt.figure(figsize=(6, 6), dpi=300)
self._axes = self._fig.add_subplot(111)
self._axes.set_xlim([self.xy[0], self.xy[0] + width])
self._axes.set_ylim([self.xy[1], self.xy[1] + height])
self._cax = self._fig.add_axes([0.125, 0.94, 0.775, 0.04])
def add_zones(self, zones):
"""
Add the sound zone(s) to the soundfield such that they can be seen in the visualisations of the soundfield
:param zones: The zone(s) to be added to the soundfield
:type zones: list[Zone]
"""
if type(zones) is not list:
zones = [zones]
for zone in zones:
circle = plt.Circle(zone.centre, zone.radius, fill=False)
circle.set_edgecolor(zone.colour)
self._axes.add_patch(circle)
self._zones.append(zone)
def add_sound_objects(self, *args):
"""
Add the sound objects to the soundfield such that they can be seen in the visualisations of the soundfield
:param args: a single Microphone/Loudspeaker or a MicrophoneArray/LoudspeakerArray
"""
def add_ls(ls):
centre = ls.position
x = centre[0] - (ls.width / 2)
y = centre[1] - (ls.height / 2)
angle = 0
# change the orientation of the loudspeaker such that it's looking at a point (purely aesthetic)
if ls.look_at is not None:
x_dif = ls.look_at[0] - centre[0]
y_dif = ls.look_at[1] - centre[1]
if x_dif == 0:
angle = 0
elif y_dif == 0:
angle = np.pi / 2
elif x_dif > 0:
angle = np.arctan(y_dif / x_dif) - np.pi / 2
else:
angle = np.arctan(y_dif / x_dif) + np.pi / 2
new_x = (x - centre[0]) * np.cos(angle) - (y - centre[1]) * np.sin(angle) + centre[0]
new_y = (x - centre[0]) * np.sin(angle) + (y - centre[1]) *
|
np.cos(angle)
|
numpy.cos
|
from speaksee.data import TextField, ImageDetectionsField
from data import COCOControlSetField, FlickrDetectionField, FlickrControlSetField
from data.dataset import COCOEntities, FlickrEntities
from models import ControllableCaptioningModel
from models import ControllableCaptioningModel_NoVisualSentinel, ControllableCaptioningModel_SingleSentinel
from speaksee.data import DataLoader, DictionaryDataset, RawField
from speaksee.evaluation import Bleu, Meteor, Rouge, Cider, Spice
from speaksee.evaluation import PTBTokenizer
from utils import NounIoU
from utils import SinkhornNet
from config import *
import torch
import random
import numpy as np
import itertools
import argparse
import os
import munkres
from tqdm import tqdm
def selfBLEU(caption_set, text_field):
caption_set = np.concatenate(caption_set, axis=0)
gen = {}
for i, cap in enumerate(caption_set):
pred_cap = text_field.decode(cap, join_words=False)
pred_cap = ' '.join([k for k, g in itertools.groupby(pred_cap)])
gen[i] = [pred_cap]
set1 = {}
set2 = {}
count = 0
for i in range(0, len(gen)):
for j in range(i+1, len(gen)):
set1[count] = gen[i]
set2[count] = gen[j]
count += 1
set1_t = PTBTokenizer.tokenize(set1)
set2_t = PTBTokenizer.tokenize(set2)
val_bleu, _ = Bleu(n=4).compute_score(set1_t, set2_t)
method = ['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4']
scores = []
for metric, score in zip(method, val_bleu):
scores.append(score)
return scores
random.seed(1234)
torch.manual_seed(1234)
device = torch.device('cuda')
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', default='ours', type=str,
help='model name: ours | ours_without_visual_sentinel | ours_with_single_sentinel')
parser.add_argument('--dataset', default='coco', type=str, help='dataset: coco | flickr')
parser.add_argument('--sample_rl', action='store_true', help='test the model with cider optimization')
parser.add_argument('--sample_rl_nw', action='store_true', help='test the model with cider + nw optimization')
parser.add_argument('--batch_size', default=16, type=int, help='batch size')
parser.add_argument('--nb_workers', default=0, type=int, help='number of workers')
opt_test = parser.parse_args()
print(opt_test)
assert(opt_test.exp_name in ['ours', 'ours_without_visual_sentinel', 'ours_with_single_sentinel'])
if not opt_test.sample_rl and not opt_test.sample_rl_nw:
exp_name ='%s_%s' % (opt_test.exp_name, opt_test.dataset)
print('Loading \"%s\" model trained with cross-entropy loss.' % opt_test.exp_name)
if opt_test.sample_rl:
exp_name = '%s_%s_%s' % (opt_test.exp_name, opt_test.dataset, 'rl')
print('Loading \"%s\" model trained with CIDEr optimization.' % opt_test.exp_name)
if opt_test.sample_rl_nw:
exp_name = '%s_%s_%s' % (opt_test.exp_name, opt_test.dataset, 'rl_nw')
print('Loading \"%s\" model trained with CIDEr + NW optimization.' % opt_test.exp_name)
saved_data = torch.load('saved_models/%s/%s.pth' % (opt_test.exp_name, exp_name))
opt = saved_data['opt']
saved_data_sinkhorn = torch.load('saved_models/sinkhorn_network/sinkhorn_network_%s.pth' % opt_test.dataset)
opt_sinkhorn = saved_data_sinkhorn['opt']
if opt_test.dataset == 'coco':
image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False)
det_field = COCOControlSetField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'),
classes_path=os.path.join(coco_root, 'object_class_list.txt'),
img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'),
precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'),
fix_length=opt_sinkhorn.max_len, max_detections=20)
text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20)
dataset = COCOEntities(image_field, det_field, text_field,
img_root='',
ann_root=os.path.join(coco_root, 'annotations'),
entities_file=os.path.join(coco_root, 'coco_entities.json'),
id_root=os.path.join(coco_root, 'annotations'))
test_dataset = COCOEntities(image_field, det_field, RawField(),
img_root='',
ann_root=os.path.join(coco_root, 'annotations'),
entities_file=os.path.join(coco_root, 'coco_entities.json'),
id_root=os.path.join(coco_root, 'annotations'),
filtering=True)
noun_iou = NounIoU(pre_comp_file=os.path.join(coco_root, '%s_noun_glove.pkl' % opt_test.dataset))
elif opt_test.dataset == 'flickr':
image_field = FlickrDetectionField(detections_path=os.path.join(flickr_root, 'flickr30k_detections.hdf5'))
det_field = FlickrControlSetField(detections_path=os.path.join(flickr_root, 'flickr30k_detections.hdf5'),
classes_path=os.path.join(flickr_root, 'object_class_list.txt'),
img_shapes_path=os.path.join(flickr_root, 'flickr_img_shapes.json'),
precomp_glove_path=os.path.join(flickr_root, 'object_class_glove.pkl'),
fix_length=opt_sinkhorn.max_len)
text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20)
dataset = FlickrEntities(image_field, text_field, det_field,
img_root='',
ann_file=os.path.join(flickr_root, 'flickr30k_annotations.json'),
entities_root=flickr_entities_root)
test_dataset = FlickrEntities(image_field, RawField(), det_field,
img_root='',
ann_file=os.path.join(flickr_root, 'flickr30k_annotations.json'),
entities_root=flickr_entities_root)
noun_iou = NounIoU(pre_comp_file=os.path.join(flickr_root, '%s_noun_glove.pkl' % opt_test.dataset))
else:
raise NotImplementedError
train_dataset, val_dataset, _ = dataset.splits
text_field.build_vocab(train_dataset, val_dataset, min_freq=5)
sinkhorn_net = SinkhornNet(opt_sinkhorn.max_len, opt_sinkhorn.n_iters, opt_sinkhorn.tau).to(device)
if opt_test.exp_name == 'ours':
model = ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'],
h2_first_lstm=opt.h2_first_lstm, img_second_lstm=opt.img_second_lstm).to(device)
elif opt_test.exp_name == 'ours_without_visual_sentinel':
model = ControllableCaptioningModel_NoVisualSentinel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'],
h2_first_lstm=opt.h2_first_lstm,
img_second_lstm=opt.img_second_lstm).to(device)
elif opt_test.exp_name == 'ours_with_single_sentinel':
model = ControllableCaptioningModel_SingleSentinel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'],
h2_first_lstm=opt.h2_first_lstm,
img_second_lstm=opt.img_second_lstm).to(device)
else:
raise NotImplementedError
_, _, test_dataset = test_dataset.splits
test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image')
dataloader_test = DataLoader(test_dataset, batch_size=opt_test.batch_size, num_workers=opt_test.nb_workers)
model.eval()
model.load_state_dict(saved_data['state_dict'])
sinkhorn_net.eval()
sinkhorn_net.load_state_dict(saved_data_sinkhorn['state_dict'])
predictions = []
gt_captions = []
max_len = 20
diversity_scores = []
with tqdm(desc='Test', unit='it', total=len(iter(dataloader_test))) as pbar:
for it, (keys, values) in enumerate(iter(dataloader_test)):
detections = keys
det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, captions = values
for i in range(detections.size(0)):
det_seqs_all_i = det_seqs_all[i].numpy()
if opt_test.dataset == 'coco':
det_seqs_all_sum = np.sum(np.abs(det_seqs_all_i), axis=-1)
elif opt_test.dataset == 'flickr':
det_seqs_all_sum = np.sum(np.abs(det_seqs_vis[i].numpy()), axis=-1)
else:
raise NotImplementedError
_, unique_indices, unique_inverse = np.unique(det_seqs_all_sum, axis=0, return_index=True, return_inverse=True)
det_seqs_vis_unique = det_seqs_vis[i][unique_indices]
det_seqs_txt_unique = det_seqs_txt[i][unique_indices]
det_seqs_pos_unique = det_seqs_pos[i][unique_indices]
det_seqs_all_unique = det_seqs_all_i[unique_indices]
this_captions = [[captions[i][ii] for ii in range(len(unique_inverse)) if unique_inverse[ii] == jj] for jj in range(det_seqs_all_unique.shape[0])]
det_seqs_perm = torch.cat((det_seqs_txt_unique, det_seqs_vis_unique, det_seqs_pos_unique), dim=-1).to(device)
matrices = sinkhorn_net(det_seqs_perm)
matrices = torch.transpose(matrices, 1, 2)
if isinstance(matrices, torch.Tensor):
matrices = matrices.detach().cpu().numpy()
m = munkres.Munkres()
#To generate a diverse set of 5 captions
current_div_set = []
for _ in range(0,5):
det_seqs_recons = np.zeros(det_seqs_all_unique.shape)
for j, matrix in enumerate(matrices):
seqs = []
ass = m.compute(munkres.make_cost_matrix(matrix))
perm_matrix = np.zeros_like(matrix)
for a in ass:
if np.random.random() > 0.8:
perm_matrix[a] = 0.2
else:
perm_matrix[a] = 0.8
perm = np.reshape(det_seqs_all_unique[j], (det_seqs_all_unique.shape[1], -1))
recons = np.dot(perm_matrix, perm)
recons = np.reshape(recons, det_seqs_all_unique.shape[1:])
recons = recons[np.sum(recons, (1, 2)) != 0]
last = recons.shape[0] - 1
det_seqs_recons[j, :recons.shape[0]] = recons
det_seqs_recons[:, last + 1:] = recons[last:last+1]
detections_i, det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device)
detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1))
out, _ = model.beam_search((detections_i, det_seqs_recons), eos_idxs=[text_field.vocab.stoi['<eos>'], -1],
beam_size=5, out_size=1)
out = out[0].data.cpu().numpy()
for o, caps in zip(out, this_captions):
predictions.append(np.expand_dims(o, axis=0))
current_div_set.append(
|
np.expand_dims(o, axis=0)
|
numpy.expand_dims
|
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import cv2
import imageio
import os
import argparse
import time
import numpy.matlib
from scipy.signal import medfilt2d
from scipy.signal import medfilt
from scipy.sparse import coo_matrix
from scipy.sparse import bsr_matrix
import math
from utils import fill_invalid, weighted_median_filter
###
# Fast Cost-Volume Filtering for Visual Correspondence and Beyond,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, CVPR 2011
# 이해를 위해 수정해본 코드
###
save_path = './fast_images'
os.makedirs(save_path, exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--left_image', '-l', default = './source_images/tsukuba-l.png', type=str, help='left image path')
parser.add_argument('--right_image', '-r', default = './source_images/tsukuba-r.png', type=str, help='right image path')
parser.add_argument('--disparity_image', '-o', default = './fast_images/fast_tsukuba_my.png', type=str, help='disparity image path')
parser.add_argument('--bilateral', '-b', action='store_true', help='use bilateral filter for cost aggregation, default=guided filter')
args = parser.parse_args()
# guided filter constants
r = 9
eps = 0.01 # 0.0001 # 엡실론이 작을수록 엣지가 살아나며 클수록 mean필터에 가까워짐
# cost matching constants
thresColor = 7./255.
thresGrad = 2./255.
threshBorder = 3./255.
gamma = 0.11
# weight median filter constants
sigma_c = 0.1
sigma_s = 9
r_median = 19 # window size
def computeDisp(left_image_path, right_image_path, max_disp):
# Image read as grayscale
left_img_origin = Image.open(left_image_path)
img_L = left_img_origin.convert('L') # grayscale
left_img_origin = np.asarray(left_img_origin).astype(np.float32) / 255.
img_L = np.asarray(img_L).astype(np.float32) / 255.
right_img_origin = Image.open(right_image_path)
img_R = right_img_origin.convert('L') # grayscale
right_img_origin =
|
np.asarray(right_img_origin)
|
numpy.asarray
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import scipy as sc
from scipy import ndimage
import random as rand
from sklearn import preprocessing, linear_model
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.ticker import MaxNLocator
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
from tabulate import tabulate
import dill
import copy
from core.controllers import PDController
from core.dynamics import ConfigurationDynamics
from koopman_core.controllers import OpenLoopController, MPCController, PerturbedController, NonlinearMPCControllerNb, BilinearMPCControllerNb
from koopman_core.dynamics import LinearLiftedDynamics, BilinearLiftedDynamics
from koopman_core.learning import Edmd, BilinearEdmd
from koopman_core.basis_functions import PlanarQuadBasis
from koopman_core.learning.utils import differentiate_vec
from koopman_core.systems import PlanarQuadrotorForceInput
class QuadrotorPdOutput(ConfigurationDynamics):
def __init__(self, dynamics, xd, t_d, n, m):
ConfigurationDynamics.__init__(self, dynamics, 1)
self.xd = xd
self.t_d = t_d
self.xd_dot = differentiate_vec(self.xd, self.t_d)
self.n = n
self.m = m
def proportional(self, x, t):
q, q_dot = x[:int(n/2)], x[int(n/2):]
return self.y(q) - self.y_d(t)
def derivative(self, x, t):
q, q_dot = x[:int(n/2)], x[int(n/2):]
return self.dydq(q)@q_dot - self.y_d_dot(t)
def y(self, q):
return q
def dydq(self, q):
return np.eye(int(self.n/2))
def d2ydq2(self, q):
return np.zeros((int(self.n/2), int(self.n/2), int(self.n/2)))
def y_d(self, t):
return self.desired_state_(t)[:int(self.n/2)]
def y_d_dot(self, t):
return self.desired_state_(t)[int(self.n/2):]
def y_d_ddot(self, t):
return self.desired_state_dot_(t)[int(self.n/2):]
def desired_state_(self, t):
return [np.interp(t, self.t_d.flatten(),self.xd[:,ii].flatten()) for ii in range(self.xd.shape[1])]
def desired_state_dot_(self, t):
return [np.interp(t, self.t_d.flatten(),self.xd_dot[:,ii].flatten()) for ii in range(self.xd_dot.shape[1])]
class PlanarQuadrotorForceInputDiscrete(PlanarQuadrotorForceInput):
def __init__(self, mass, inertia, prop_arm, g=9.81, dt=1e-2):
PlanarQuadrotorForceInput.__init__(self, mass, inertia, prop_arm, g=g)
self.dt=dt
def eval_dot(self, x, u, t):
return x + self.dt*self.drift(x, t) + self.dt*np.dot(self.act(x, t),u)
def get_linearization(self, x0, x1, u0, t):
m, J, b, g = self.params
A_lin = np.eye(self.n) + self.dt*np.array([[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, -(1/m)*np.cos(x0[2])*u0[0] -(1/m)*np.cos(x0[2])*u0[1], 0, 0, 0],
[0, 0, -(1/m)*np.sin(x0[2])*u0[0] -(1/m)*np.sin(x0[2])*u0[1], 0, 0, 0],
[0, 0, 0, 0, 0, 0],])
B_lin = self.dt*np.array([[0, 0],
[0, 0],
[0, 0],
[-(1/m)*np.sin(x0[2]), -(1/m)*np.sin(x0[2])],
[(1/m)*np.cos(x0[2]), (1/m)*
|
np.cos(x0[2])
|
numpy.cos
|
# This is automatically-generated code.
# Uses the jinja2 library for templating.
import cvxpy as cp
import numpy as np
import scipy as sp
# setup
problemID = "huber_0"
prob = None
opt_val = None
# Variable declarations
import scipy.sparse as sps
np.random.seed(0)
m = 5000
n = 200
x0 =
|
np.random.randn(n)
|
numpy.random.randn
|
# -*- coding: utf-8 -*-
import math
import operator
from dataclasses import dataclass
from functools import reduce
from typing import *
import numpy as np
from .typing_ import *
__all__ = [
'MetricStats',
'MetricCollector', 'GeneralMetricCollector',
'ScalarMetricCollector', 'ScalarMetricsLogger',
]
@dataclass
class MetricStats(object):
"""Mean and std(dev) of a metric."""
__slots__ = ('mean', 'var', 'std')
mean: MetricValue
var: Optional[MetricValue]
std: Optional[MetricValue]
def to_json(self, mean_only: bool = False
) -> Union[MetricValue, Dict[str, MetricValue]]:
"""
Get the JSON representation of this metric stats object.
>>> MetricStats(mean=1.0, var=None, std=None).to_json()
1.0
>>> MetricStats(mean=1.0, var=4.0, std=2.0).to_json()
{'mean': 1.0, 'std': 2.0}
>>> MetricStats(mean=1.0, var=4.0, std=2.0).to_json(mean_only=True)
1.0
Args:
mean_only: Whether or not to include only the mean of the metric.
Returns:
The JSON metric value.
"""
if mean_only or self.std is None:
return self.mean
return {'mean': self.mean, 'std': self.std}
class MetricCollector(object):
"""Base class of a metric statistics collector."""
def reset(self):
"""Reset the collector to initial state."""
raise NotImplementedError()
@property
def has_stats(self) -> bool:
"""Whether or not any value has been collected?"""
raise NotImplementedError()
@property
def stats(self) -> Optional[MetricStats]:
"""
Get the metric object.
Returns:
The statistics, or :obj:`None` if no value has been collected.
"""
raise NotImplementedError()
def update(self, values: MetricValue, weight: MetricValue = 1.):
"""
Update the metric statistics from values.
This method uses the following equation to update `mean` and `square`:
.. math::
\\frac{\\sum_{i=1}^n w_i f(x_i)}{\\sum_{j=1}^n w_j} =
\\frac{\\sum_{i=1}^m w_i f(x_i)}{\\sum_{j=1}^m w_j} +
\\frac{\\sum_{i=m+1}^n w_i}{\\sum_{j=1}^n w_j} \\Bigg(
\\frac{\\sum_{i=m+1}^n w_i f(x_i)}{\\sum_{j=m+1}^n w_j} -
\\frac{\\sum_{i=1}^m w_i f(x_i)}{\\sum_{j=1}^m w_j}
\\Bigg)
Args:
values: Values to be collected in batch, numpy array or scalar
whose shape ends with ``self.shape``. The leading shape in
front of ``self.shape`` is regarded as the batch shape.
weight: Weights of the `values`, should be broadcastable against
the batch shape. (default is 1)
Raises:
ValueError: If the shape of `values` does not end with `self.shape`.
"""
raise NotImplementedError()
class GeneralMetricCollector(MetricCollector):
"""
Class to collect statistics of metric values.
To collect statistics of a scalar:
>>> collector = GeneralMetricCollector()
>>> collector.stats is None
True
>>> collector.update(1.)
>>> collector.stats # doctest: +ELLIPSIS
MetricStats(mean=1.0, var=None, std=None)
>>> for value in [2., 3., 4.]:
... collector.update(value)
>>> collector.stats # doctest: +ELLIPSIS
MetricStats(mean=2.5, var=1.25, std=1.11803...)
>>> collector.update(np.array([5., 6., 7., 8.]))
>>> collector.stats # doctest: +ELLIPSIS
MetricStats(mean=4.5, var=5.25, std=2.29128...)
weighted statistics:
>>> collector = GeneralMetricCollector()
>>> for value in [1., 2., 3., 4.]:
... collector.update(value, weight=value)
>>> collector.stats # doctest: +ELLIPSIS
MetricStats(mean=3.0, var=1.0, std=1.0)
>>> collector.update(np.array([5., 6., 7., 8.]),
... weight=np.array([5., 6., 7., 8.]))
>>> collector.stats # doctest: +ELLIPSIS
MetricStats(mean=5.66666..., var=3.88888..., std=1.97202...)
To collect element-wise statistics of a vector:
>>> collector = GeneralMetricCollector(shape=[3])
>>> x = np.arange(12).reshape([4, 3])
>>> for value in x:
... collector.update(value)
>>> collector.stats # doctest: +ELLIPSIS
MetricStats(mean=array([4.5, 5.5, 6.5]), var=array([11.25, 11.25, 11.25]), std=array([3.35410..., 3.35410..., 3.35410...]))
"""
__slots__ = ('shape', 'dtype', 'mean', 'second_order_moment',
'counter', 'weight_sum', '_array_to_value_type',
'_value')
shape: ArrayShape
dtype: np.dtype
mean: np.ndarray
second_order_moment: np.ndarray
counter: int # count the number of times where `add` is called
weight_sum: float # sum of all weights of added values
_array_to_value_type: Callable[[Any], MetricValue]
_value: Optional[MetricStats]
def __init__(self,
shape: Sequence[int] = (),
dtype: np.dtype = np.float64):
self.shape = tuple(map(int, shape))
self.dtype = np.dtype(dtype)
self.reset()
if self.shape == ():
self._array_to_value_type = float
else:
self._array_to_value_type = lambda v: v
def reset(self):
self.mean = np.zeros(shape=self.shape, dtype=self.dtype) # E[X]
self.second_order_moment = np.zeros(shape=self.shape, dtype=self.dtype) # E[X^2]
self.counter = 0
self.weight_sum = 0.
self._value = None
@property
def has_stats(self) -> bool:
return self.counter > 0
def _make_value(self):
if self.counter > 0:
mean = self._array_to_value_type(self.mean)
if self.counter > 1:
var = self._array_to_value_type(
np.maximum(self.second_order_moment - self.mean ** 2, 0.))
std = self._array_to_value_type(np.sqrt(var))
else:
var = std = None
self._value = MetricStats(mean=mean, var=var, std=std)
@property
def stats(self) -> Optional[MetricStats]:
if self._value is None:
self._make_value()
return self._value
def update(self, values: MetricValue, weight: MetricValue = 1.):
values = np.asarray(values)
if not values.size:
return
weight =
|
np.asarray(weight)
|
numpy.asarray
|
import numpy as np
import matplotlib.pyplot as plt
def make_meshgrid(x, y, num_pts=300, lims=None):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
if lims is None:
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
else:
x_min, x_max, y_min, y_max = lims
xx, yy = np.meshgrid(np.linspace(x_min, x_max, num_pts),
np.linspace(y_min, y_max, num_pts))
return xx, yy
def plot_contours(ax, clf, xx, yy, proba=False, transformation=None, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
X = np.c_[xx.ravel(), yy.ravel()]
if transformation is not None:
X = transformation(X)
if proba == "raw":
Z = clf.decision_function(X)
Z = Z.reshape(xx.shape)
# out = ax.contourf(xx, yy, Z, **params)
out = ax.imshow(Z,extent=(np.min(xx), np.max(xx), np.min(yy), np.max(yy)), origin='lower', **params)
ax.contour(xx, yy, Z, levels=[0])
elif proba:
Z = clf.predict_proba(X)[:,-1]
Z = Z.reshape(xx.shape)
out = ax.imshow(Z,extent=(np.min(xx),
|
np.max(xx)
|
numpy.max
|
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
import openmdao.api as om
import wisdem.pyframe3dd.pyframe3dd as frame3dd
from wisdem.commonse import gravity
from wisdem.commonse.csystem import DirectionVector
from wisdem.commonse.utilities import find_nearest, nodal2sectional
from wisdem.commonse.cross_sections import Tube, IBeam
from wisdem.commonse.utilization_constraints import vonMisesStressUtilization
RIGID = 1
FREE = 0
def tube_prop(s, Di, ti):
L = s.max() - s.min()
def equal_pts(xi):
if len(xi) < len(s) and len(xi) == 2:
x = np.interp((s - s.min()) / L, [0, 1], xi)
elif len(xi) == len(s):
x = xi
else:
raise ValueError("Unknown grid of input", str(xi))
return x
D = equal_pts(Di)
t = equal_pts(ti)
return Tube(nodal2sectional(D)[0], nodal2sectional(t)[0])
class Hub_Rotor_LSS_Frame(om.ExplicitComponent):
"""
Run structural analysis of hub system with the generator rotor and main (LSS) shaft.
Parameters
----------
tilt : float, [deg]
Shaft tilt
s_lss : numpy array[5], [m]
Discretized s-coordinates along drivetrain, measured from bedplate (direct) or tower center (geared)
lss_diameter : numpy array[2], [m]
LSS outer diameter from hub to bearing 2
lss_wall_thickness : numpy array[2], [m]
LSS wall thickness
hub_system_mass : float, [kg]
Hub system mass
hub_system_cm : float, [m]
Hub system center of mass distance from hub flange
hub_system_I : numpy array[6], [kg*m**2]
Hub system moment of inertia
F_hub : numpy array[3, n_dlcs], [N]
Force vector applied to the hub (WITH WEIGHT???)
M_hub : numpy array[3, n_dlcs], [N*m]
Moment vector applied to the hub
s_mb1 : float, [m]
Bearing 1 s-coordinate along drivetrain, measured from bedplate (direct) or tower center (geared)
s_mb2 : float, [m]
Bearing 2 s-coordinate along drivetrain, measured from bedplate (direct) or tower center (geared)
s_rotor : float, [m]
Generator rotor attachment to lss s-coordinate measured from bedplate (direct) or tower center (geared)
generator_rotor_mass : float, [kg]
Generator rotor mass
generator_rotor_I : numpy array[3], [kg*m**2]
Generator rotor moment of inertia (measured about its cm)
gearbox_mass : float, [kg]
Gearbox rotor mass
gearbox_I : numpy array[3], [kg*m**2]
Gearbox moment of inertia (measured about its cm)
lss_E : float, [Pa]
modulus of elasticity
lss_G : float, [Pa]
shear modulus
lss_rho : float, [kg/m**3]
material density
lss_Xy : float, [Pa]
yield stress
Returns
-------
torq_deflection : float, [m]
Maximum deflection distance at rotor (direct) or gearbox (geared) attachment
torq_rotation : float, [rad]
Maximum rotation angle at rotor (direct) or gearbox (geared) attachment
torq_axial_stress : numpy array[5, n_dlcs], [Pa]
Axial stress in Curved_beam structure
torq_shear_stress : numpy array[5, n_dlcs], [Pa]
Shear stress in Curved_beam structure
torq_bending_stress : numpy array[5, n_dlcs], [Pa]
Hoop stress in Curved_beam structure calculated with Roarks formulae
constr_lss_vonmises : numpy array[5, n_dlcs]
Sigma_y/Von_Mises
F_mb1 : numpy array[3, n_dlcs], [N]
Force vector applied to bearing 1 in hub c.s.
F_mb2 : numpy array[3, n_dlcs], [N]
Force vector applied to bearing 2 in hub c.s.
F_torq : numpy array[3, n_dlcs], [N]
Force vector applied to generator rotor (direct) or gearbox (geared) in hub c.s.
M_mb1 : numpy array[3, n_dlcs], [N*m]
Moment vector applied to bearing 1 in hub c.s.
M_mb2 : numpy array[3, n_dlcs], [N*m]
Moment vector applied to bearing 2 in hub c.s.
M_torq : numpy array[3, n_dlcs], [N*m]
Moment vector applied to generator rotor (direct) or gearbox (geared) in hub c.s.
"""
def initialize(self):
self.options.declare("n_dlcs")
self.options.declare("direct_drive", default=True)
self.options.declare("modeling_options")
def setup(self):
n_dlcs = self.options["n_dlcs"]
self.add_discrete_input("upwind", True)
self.add_input("tilt", 0.0, units="deg")
self.add_input("s_lss", val=np.zeros(5), units="m")
self.add_input("lss_diameter", val=np.zeros(2), units="m")
self.add_input("lss_wall_thickness", val=np.zeros(2), units="m")
self.add_input("hub_system_mass", 0.0, units="kg")
self.add_input("hub_system_cm", 0.0, units="m")
self.add_input("hub_system_I", np.zeros(6), units="kg*m**2")
self.add_input("F_hub", val=np.zeros((3, n_dlcs)), units="N")
self.add_input("M_hub", val=np.zeros((3, n_dlcs)), units="N*m")
self.add_input("s_mb1", val=0.0, units="m")
self.add_input("s_mb2", val=0.0, units="m")
self.add_input("s_rotor", val=0.0, units="m")
self.add_input("generator_rotor_mass", val=0.0, units="kg")
self.add_input("generator_rotor_I", val=np.zeros(3), units="kg*m**2")
self.add_input("gearbox_mass", val=0.0, units="kg")
self.add_input("gearbox_I", val=np.zeros(3), units="kg*m**2")
self.add_input("brake_mass", val=0.0, units="kg")
self.add_input("brake_I", val=np.zeros(3), units="kg*m**2")
self.add_input("carrier_mass", val=0.0, units="kg")
self.add_input("carrier_I", val=np.zeros(3), units="kg*m**2")
self.add_input("lss_E", val=0.0, units="Pa")
self.add_input("lss_G", val=0.0, units="Pa")
self.add_input("lss_rho", val=0.0, units="kg/m**3")
self.add_input("lss_Xy", val=0.0, units="Pa")
self.add_output("torq_deflection", val=0.0, units="m")
self.add_output("torq_rotation", val=0.0, units="rad")
self.add_output("lss_axial_stress", np.zeros((4, n_dlcs)), units="Pa")
self.add_output("lss_shear_stress", np.zeros((4, n_dlcs)), units="Pa")
self.add_output("constr_lss_vonmises", np.zeros((4, n_dlcs)))
self.add_output("F_mb1", val=np.zeros((3, n_dlcs)), units="N")
self.add_output("F_mb2", val=np.zeros((3, n_dlcs)), units="N")
self.add_output("F_torq", val=np.zeros((3, n_dlcs)), units="N")
self.add_output("M_mb1", val=np.zeros((3, n_dlcs)), units="N*m")
self.add_output("M_mb2", val=np.zeros((3, n_dlcs)), units="N*m")
self.add_output("M_torq", val=np.zeros((3, n_dlcs)), units="N*m")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
upwind = discrete_inputs["upwind"]
Cup = -1.0 if upwind else 1.0
tilt = float(np.deg2rad(inputs["tilt"]))
s_lss = inputs["s_lss"]
D_lss = inputs["lss_diameter"]
t_lss = inputs["lss_wall_thickness"]
s_mb1 = float(inputs["s_mb1"])
s_mb2 = float(inputs["s_mb2"])
if self.options["direct_drive"]:
s_rotor = float(inputs["s_rotor"])
m_rotor = float(inputs["generator_rotor_mass"])
I_rotor = inputs["generator_rotor_I"]
m_brake = float(inputs["brake_mass"])
I_brake = inputs["brake_I"]
else:
m_gearbox = float(inputs["gearbox_mass"])
I_gearbox = inputs["gearbox_I"]
m_carrier = float(inputs["carrier_mass"])
I_carrier = inputs["carrier_I"]
rho = float(inputs["lss_rho"])
E = float(inputs["lss_E"])
G = float(inputs["lss_G"])
sigma_y = float(inputs["lss_Xy"])
gamma_f = float(self.options["modeling_options"]["gamma_f"])
gamma_m = float(self.options["modeling_options"]["gamma_m"])
gamma_n = float(self.options["modeling_options"]["gamma_n"])
m_hub = float(inputs["hub_system_mass"])
cm_hub = float(inputs["hub_system_cm"])
I_hub = inputs["hub_system_I"]
F_hub = inputs["F_hub"]
M_hub = inputs["M_hub"]
# ------- node data ----------------
n = len(s_lss)
inode = np.arange(1, n + 1)
ynode = znode = rnode = np.zeros(n)
xnode = Cup * s_lss.copy()
nodes = frame3dd.NodeData(inode, xnode, ynode, znode, rnode)
# Grab indices for later
i1 = inode[find_nearest(xnode, Cup * s_mb1)]
i2 = inode[find_nearest(xnode, Cup * s_mb2)]
iadd = inode[1]
# Differences between direct annd geared
if self.options["direct_drive"]:
itorq = inode[find_nearest(xnode, Cup * s_rotor)]
m_torq = m_rotor
I_torq = I_rotor
m_add = m_brake
I_add = I_brake
else:
itorq = inode[0]
m_torq = m_gearbox - m_carrier
I_torq = I_gearbox - I_carrier
m_add = m_carrier
I_add = I_carrier
# ------------------------------------
# ------ reaction data ------------
# Reactions at main bearings
rnode = np.r_[i1, i2, itorq]
Rx = np.array([RIGID, FREE, FREE]) # Upwind bearing restricts translational
Ry = np.array([RIGID, FREE, FREE]) # Upwind bearing restricts translational
Rz = np.array([RIGID, FREE, FREE]) # Upwind bearing restricts translational
Rxx = np.array([FREE, FREE, RIGID]) # Torque is absorbed by stator, so this is the best way to capture that
Ryy = np.array([FREE, RIGID, FREE]) # downwind bearing carry moments
Rzz = np.array([FREE, RIGID, FREE]) # downwind bearing carry moments
reactions = frame3dd.ReactionData(rnode, Rx, Ry, Rz, Rxx, Ryy, Rzz, rigid=RIGID)
# -----------------------------------
# ------ frame element data ------------
lsscyl = tube_prop(s_lss, D_lss, t_lss)
ielement = np.arange(1, n)
N1 = np.arange(1, n)
N2 = np.arange(2, n + 1)
roll = np.zeros(n - 1)
myones = np.ones(n - 1)
Ax = lsscyl.Area
As = lsscyl.Asx
S = lsscyl.S
C = lsscyl.C
J0 = lsscyl.J0
Jx = lsscyl.Jxx
elements = frame3dd.ElementData(
ielement, N1, N2, Ax, As, As, J0, Jx, Jx, E * myones, G * myones, roll, rho * myones
)
# -----------------------------------
# ------ options ------------
shear = geom = True
dx = -1
options = frame3dd.Options(shear, geom, dx)
# -----------------------------------
# initialize frameDD3 object
myframe = frame3dd.Frame(nodes, reactions, elements, options)
# ------ add hub and generator rotor (direct) or gearbox (geared) extra mass ------------
three0 = np.zeros(3).tolist()
myframe.changeExtraNodeMass(
np.r_[inode[-1], itorq, iadd],
[m_hub, m_torq, m_add],
[I_hub[0], I_torq[0], I_add[0]],
[I_hub[1], I_torq[1], I_add[1]],
[I_hub[2], I_torq[2], I_add[2]],
three0,
three0,
three0,
[cm_hub, 0.0, 0.0],
three0,
three0,
True,
)
# ------------------------------------
# ------- NO dynamic analysis ----------
# myframe.enableDynamics(NFREQ, discrete_inputs['Mmethod'], discrete_inputs['lump'], float(inputs['tol']), float(inputs['shift']))
# ----------------------------
# ------ static load cases ------------
n_dlcs = self.options["n_dlcs"]
gy = 0.0
gx = -gravity * np.sin(tilt)
gz = -gravity * np.cos(tilt)
for k in range(n_dlcs):
# gravity in the X, Y, Z, directions (global)
load = frame3dd.StaticLoadCase(gx, gy, gz)
# point loads
# TODO: Are input loads aligned with the lss? If so they need to be rotated.
load.changePointLoads(
[inode[-1]], [F_hub[0, k]], [F_hub[1, k]], [F_hub[2, k]], [M_hub[0, k]], [M_hub[1, k]], [M_hub[2, k]]
)
# -----------------------------------
# Put all together and run
myframe.addLoadCase(load)
# myframe.write('myframe1.3dd') # Debugging
displacements, forces, reactions, internalForces, mass3dd, modal = myframe.run()
# Loop over DLCs and append to outputs
rotor_gearbox_deflection = np.zeros(n_dlcs)
rotor_gearbox_rotation = np.zeros(n_dlcs)
outputs["F_mb1"] = np.zeros((3, n_dlcs))
outputs["F_mb2"] = np.zeros((3, n_dlcs))
outputs["F_torq"] = np.zeros((3, n_dlcs))
outputs["M_mb1"] = np.zeros((3, n_dlcs))
outputs["M_mb2"] = np.zeros((3, n_dlcs))
outputs["M_torq"] = np.zeros((3, n_dlcs))
outputs["lss_axial_stress"] = np.zeros((n - 1, n_dlcs))
outputs["lss_shear_stress"] = np.zeros((n - 1, n_dlcs))
outputs["constr_lss_vonmises"] = np.zeros((n - 1, n_dlcs))
for k in range(n_dlcs):
# Deflections and rotations at torq attachment
rotor_gearbox_deflection[k] = np.sqrt(
displacements.dx[k, itorq - 1] ** 2
+ displacements.dy[k, itorq - 1] ** 2
+ displacements.dz[k, itorq - 1] ** 2
)
rotor_gearbox_rotation[k] = (
displacements.dxrot[k, itorq - 1]
+ displacements.dyrot[k, itorq - 1]
+ displacements.dzrot[k, itorq - 1]
)
# shear and bending, one per element (convert from local to global c.s.)
Fx = forces.Nx[k, 1::2]
Vy = forces.Vy[k, 1::2]
Vz = -forces.Vz[k, 1::2]
F = np.sqrt(Vz ** 2 + Vy ** 2)
Mxx = forces.Txx[k, 1::2]
Myy = forces.Myy[k, 1::2]
Mzz = -forces.Mzz[k, 1::2]
M = np.sqrt(Myy ** 2 + Mzz ** 2)
# Record total forces and moments
outputs["F_mb1"][:, k] = -1.0 * np.array([reactions.Fx[k, 0], reactions.Fy[k, 0], reactions.Fz[k, 0]])
outputs["F_mb2"][:, k] = -1.0 * np.array([reactions.Fx[k, 1], reactions.Fy[k, 1], reactions.Fz[k, 1]])
outputs["F_torq"][:, k] = -1.0 * np.array([reactions.Fx[k, 2], reactions.Fy[k, 2], reactions.Fz[k, 2]])
outputs["M_mb1"][:, k] = -1.0 * np.array([reactions.Mxx[k, 0], reactions.Myy[k, 0], reactions.Mzz[k, 0]])
outputs["M_mb2"][:, k] = -1.0 * np.array([reactions.Mxx[k, 1], reactions.Myy[k, 1], reactions.Mzz[k, 1]])
outputs["M_torq"][:, k] = -1.0 * np.array([reactions.Mxx[k, 2], reactions.Myy[k, 2], reactions.Mzz[k, 2]])
outputs["lss_axial_stress"][:, k] = np.abs(Fx) / Ax + M / S
outputs["lss_shear_stress"][:, k] = 2.0 * F / As + np.abs(Mxx) / C
hoop = np.zeros(F.shape)
outputs["constr_lss_vonmises"][:, k] = vonMisesStressUtilization(
outputs["lss_axial_stress"][:, k],
hoop,
outputs["lss_shear_stress"][:, k],
gamma_f * gamma_m * gamma_n,
sigma_y,
)
outputs["torq_deflection"] = rotor_gearbox_deflection.max()
outputs["torq_rotation"] = rotor_gearbox_rotation.max()
class HSS_Frame(om.ExplicitComponent):
"""
Run structural analysis of high speed shaft (HSS) between gearbox and generator (only for geared configurations).
Parameters
----------
tilt : float, [deg]
Shaft tilt
s_hss : numpy array[3], [m]
Discretized s-coordinates along drivetrain, measured from bedplate (direct) or tower center (geared)
hss_diameter : numpy array[2], [m]
Lss discretized diameter values at coordinates
hss_wall_thickness : numpy array[2], [m]
Lss discretized thickness values at coordinates
M_hub : numpy array[3, n_dlcs], [N*m]
Moment vector applied to the hub
m_generator : float, [kg]
Gearbox rotor mass
cm_generator : float, [kg]
Gearbox center of mass (measured from tower center)
I_generator : numpy array[3], [kg*m**2]
Gearbox moment of inertia (measured about its cm)
hss_E : float, [Pa]
modulus of elasticity
hss_G : float, [Pa]
shear modulus
hss_rho : float, [kg/m**3]
material density
hss_Xy : float, [Pa]
yield stress
Returns
-------
hss_axial_stress : numpy array[5, n_dlcs], [Pa]
Axial stress in Curved_beam structure
hss_shear_stress : numpy array[5, n_dlcs], [Pa]
Shear stress in Curved_beam structure
hss_bending_stress : numpy array[5, n_dlcs], [Pa]
Hoop stress in Curved_beam structure calculated with Roarks formulae
constr_hss_vonmises : numpy array[5, n_dlcs]
Sigma_y/Von_Mises
F_generator : numpy array[3, n_dlcs], [N]
Force vector applied to generator rotor (direct) or gearbox (geared) in hub c.s.
M_generator : numpy array[3, n_dlcs], [N*m]
Moment vector applied to generator rotor (direct) or gearbox (geared) in hub c.s.
"""
def initialize(self):
self.options.declare("n_dlcs")
self.options.declare("modeling_options")
def setup(self):
n_dlcs = self.options["n_dlcs"]
self.add_input("tilt", 0.0, units="deg")
self.add_input("s_hss", val=np.zeros(3), units="m")
self.add_input("hss_diameter", val=np.zeros(2), units="m")
self.add_input("hss_wall_thickness", val=np.zeros(2), units="m")
self.add_input("M_hub", val=np.zeros((3, n_dlcs)), units="N*m")
self.add_input("gear_ratio", val=1.0)
self.add_input("s_generator", val=0.0, units="m")
self.add_input("generator_mass", val=0.0, units="kg")
self.add_input("generator_I", val=np.zeros(3), units="kg*m**2")
self.add_input("brake_mass", val=0.0, units="kg")
self.add_input("brake_I", val=np.zeros(3), units="kg*m**2")
self.add_input("hss_E", val=0.0, units="Pa")
self.add_input("hss_G", val=0.0, units="Pa")
self.add_input("hss_rho", val=0.0, units="kg/m**3")
self.add_input("hss_Xy", val=0.0, units="Pa")
self.add_output("hss_axial_stress", np.zeros((2, n_dlcs)), units="Pa")
self.add_output("hss_shear_stress", np.zeros((2, n_dlcs)), units="Pa")
self.add_output("hss_bending_stress", np.zeros((2, n_dlcs)), units="Pa")
self.add_output("constr_hss_vonmises", np.zeros((2, n_dlcs)))
self.add_output("F_generator", val=np.zeros((3, n_dlcs)), units="N")
self.add_output("M_generator", val=np.zeros((3, n_dlcs)), units="N*m")
def compute(self, inputs, outputs):
# Unpack inputs
tilt = float(np.deg2rad(inputs["tilt"]))
s_hss = inputs["s_hss"]
D_hss = inputs["hss_diameter"]
t_hss = inputs["hss_wall_thickness"]
s_generator = float(inputs["s_generator"])
m_generator = float(inputs["generator_mass"])
I_generator = inputs["generator_I"]
m_brake = float(inputs["brake_mass"])
I_brake = inputs["brake_I"]
rho = float(inputs["hss_rho"])
E = float(inputs["hss_E"])
G = float(inputs["hss_G"])
sigma_y = float(inputs["hss_Xy"])
gamma_f = float(self.options["modeling_options"]["gamma_f"])
gamma_m = float(self.options["modeling_options"]["gamma_m"])
gamma_n = float(self.options["modeling_options"]["gamma_n"])
M_hub = inputs["M_hub"]
gear_ratio = float(inputs["gear_ratio"])
# ------- node data ----------------
n = len(s_hss)
inode = np.arange(1, n + 1)
ynode = znode = rnode = np.zeros(n)
xnode = s_hss.copy()
nodes = frame3dd.NodeData(inode, xnode, ynode, znode, rnode)
# ------------------------------------
# ------ reaction data ------------
# Reaction at generator attachment
rnode = [inode[0]]
Rx = Ry = Rz = Rxx = Ryy = Rzz = np.array([RIGID])
reactions = frame3dd.ReactionData(rnode, Rx, Ry, Rz, Rxx, Ryy, Rzz, rigid=RIGID)
# -----------------------------------
# ------ frame element data ------------
hsscyl = tube_prop(s_hss, D_hss, t_hss)
ielement = np.arange(1, n)
N1 = np.arange(1, n)
N2 = np.arange(2, n + 1)
roll = np.zeros(n - 1)
myones = np.ones(n - 1)
Ax = hsscyl.Area
As = hsscyl.Asx
S = hsscyl.S
C = hsscyl.C
J0 = hsscyl.J0
Jx = hsscyl.Jxx
elements = frame3dd.ElementData(
ielement, N1, N2, Ax, As, As, J0, Jx, Jx, E * myones, G * myones, roll, rho * myones
)
# -----------------------------------
# ------ options ------------
shear = geom = True
dx = -1
options = frame3dd.Options(shear, geom, dx)
# -----------------------------------
# initialize frameDD3 object
myframe = frame3dd.Frame(nodes, reactions, elements, options)
# ------ add brake hub and generator rotor (direct) or generator (geared) extra mass ------------
myframe.changeExtraNodeMass(
np.r_[inode[1], inode[0]],
[m_brake, m_generator],
[I_brake[0], I_generator[0]],
[I_brake[1], I_generator[1]],
[I_brake[2], I_generator[2]],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, s_generator - s_hss[-1]],
[0.0, 0.0],
[0.0, 0.0],
True,
)
# ------------------------------------
# ------ static load cases ------------
n_dlcs = self.options["n_dlcs"]
gy = 0.0
gx = -gravity * np.sin(tilt)
gz = -gravity * np.cos(tilt)
for k in range(n_dlcs):
# gravity in the X, Y, Z, directions (global)
load = frame3dd.StaticLoadCase(gx, gy, gz)
# point loads
Fx = Fy = Fz = My = Mz = np.zeros(1)
Mx = M_hub[0] / gear_ratio
load.changePointLoads([inode[-1]], Fx, Fy, Fz, Mx, My, Mz)
# -----------------------------------
# Put all together and run
myframe.addLoadCase(load)
# myframe.write('myframe2.3dd') # Debugging
displacements, forces, reactions, internalForces, mass3dd, modal = myframe.run()
# Loop over DLCs and append to outputs
outputs["F_generator"] = np.zeros((3, n_dlcs))
outputs["M_generator"] = np.zeros((3, n_dlcs))
outputs["hss_axial_stress"] = np.zeros((n - 1, n_dlcs))
outputs["hss_shear_stress"] = np.zeros((n - 1, n_dlcs))
outputs["hss_bending_stress"] = np.zeros((n - 1, n_dlcs))
outputs["constr_hss_vonmises"] = np.zeros((n - 1, n_dlcs))
for k in range(n_dlcs):
# shear and bending, one per element (convert from local to global c.s.)
Fx = forces.Nx[k, 1::2]
Vy = forces.Vy[k, 1::2]
Vz = -forces.Vz[k, 1::2]
F = np.sqrt(Vz ** 2 + Vy ** 2)
Mxx = forces.Txx[k, 1::2]
Myy = forces.Myy[k, 1::2]
Mzz = -forces.Mzz[k, 1::2]
M = np.sqrt(Myy ** 2 + Mzz ** 2)
# Record total forces and moments
outputs["F_generator"][:, k] = -1.0 * np.array([reactions.Fx[k, 0], reactions.Fy[k, 0], reactions.Fz[k, 0]])
outputs["M_generator"][:, k] = -1.0 * np.array(
[reactions.Mxx[k, 0], reactions.Myy[k, 0], reactions.Mzz[k, 0]]
)
outputs["hss_axial_stress"][:, k] = np.abs(Fx) / Ax + M / S
outputs["hss_shear_stress"][:, k] = 2.0 * F / As + np.abs(Mxx) / C
hoop = np.zeros(F.shape)
outputs["constr_hss_vonmises"][:, k] = vonMisesStressUtilization(
outputs["hss_axial_stress"][:, k],
hoop,
outputs["hss_shear_stress"][:, k],
gamma_f * gamma_m * gamma_n,
sigma_y,
)
class Nose_Stator_Bedplate_Frame(om.ExplicitComponent):
"""
Run structural analysis of nose/turret with the generator stator and bedplate
Parameters
----------
upwind : boolean
Flag whether the design is upwind or downwind
tilt : float, [deg]
Lss tilt
s_nose : numpy array[5], [m]
Discretized s-coordinates along drivetrain, measured from bedplate
nose_diameter : numpy array[2], [m]
Nose outer diameter from bearing 1 to bedplate
nose_wall_thickness : numpy array[2], [m]
Nose wall thickness
x_bedplate : numpy array[12], [m]
Bedplate centerline x-coordinates
z_bedplate : numpy array[12], [m]
Bedplate centerline z-coordinates
x_bedplate_inner : numpy array[12], [m]
Bedplate lower curve x-coordinates
z_bedplate_inner : numpy array[12], [m]
Bedplate lower curve z-coordinates
x_bedplate_outer : numpy array[12], [m]
Bedplate outer curve x-coordinates
z_bedplate_outer : numpy array[12], [m]
Bedplate outer curve z-coordinates
D_bedplate : numpy array[12], [m]
Bedplate diameters
t_bedplate : numpy array[12], [m]
Bedplate wall thickness (mirrors input)
s_mb1 : float, [m]
Bearing 1 s-coordinate along drivetrain, measured from bedplate
s_mb2 : float, [m]
Bearing 2 s-coordinate along drivetrain, measured from bedplate
mb1_mass : float, [kg]
component mass
mb1_I : numpy array[3], [kg*m**2]
component I
mb1_max_defl_ang : float, [rad]
Maximum allowable deflection angle
mb2_mass : float, [kg]
component mass
mb2_I : numpy array[3], [kg*m**2]
component I
mb2_max_defl_ang : float, [rad]
Maximum allowable deflection angle
s_stator : float, [m]
Generator stator attachment to lss s-coordinate measured from bedplate
generator_stator_mass : float, [kg]
Generator stator mass
generator_stator_I : numpy array[3], [kg*m**2]
Generator stator moment of inertia (measured about cm)
F_mb1 : numpy array[3, n_dlcs], [N]
Force vector applied to bearing 1 in hub c.s.
F_mb2 : numpy array[3, n_dlcs], [N]
Force vector applied to bearing 2 in hub c.s.
M_mb1 : numpy array[3, n_dlcs], [N*m]
Moment vector applied to bearing 1 in hub c.s.
M_mb2 : numpy array[3, n_dlcs], [N*m]
Moment vector applied to bearing 2 in hub c.s.
other_mass : float, [kg]
Mass of other nacelle components that rest on mainplate
bedplate_E : float, [Pa]
modulus of elasticity
bedplate_G : float, [Pa]
shear modulus
bedplate_rho : float, [kg/m**3]
material density
bedplate_Xy : float, [Pa]
yield stress
Returns
-------
mb1_deflection : numpy array[n_dlcs], [m]
Total deflection distance of bearing 1
mb2_deflection : numpy array[n_dlcs], [m]
Total deflection distance of bearing 2
stator_deflection : float, [m]
Maximum deflection distance at stator attachment
mb1_rotation : numpy array[n_dlcs], [rad]
Total rotation angle of bearing 1
mb2_rotation : numpy array[n_dlcs], [rad]
Total rotation angle of bearing 2
stator_rotation : float, [rad]
Maximum rotation angle at stator attachment
base_F : numpy array[3, n_dlcs], [N]
Total reaction force at bedplate base in tower top coordinate system
base_M : numpy array[3, n_dlcs], [N*m]
Total reaction moment at bedplate base in tower top coordinate system
bedplate_nose_axial_stress : numpy array[12+3, n_dlcs], [Pa]
Axial stress in Curved_beam structure
bedplate_nose_shear_stress : numpy array[12+3, n_dlcs], [Pa]
Shear stress in Curved_beam structure
bedplate_nose_bending_stress : numpy array[12+3, n_dlcs], [Pa]
Hoop stress in Curved_beam structure calculated with Roarks formulae
constr_bedplate_vonmises : numpy array[12+3, n_dlcs]
Sigma_y/Von_Mises
constr_mb1_defl : numpy array[n_dlcs]
Angular deflection relative to limit of bearing 1 (should be <1)
constr_mb2_defl : numpy array[n_dlcs]
Angular deflection relative to limit of bearing 2 (should be <1)
"""
def initialize(self):
self.options.declare("n_dlcs")
self.options.declare("modeling_options")
def setup(self):
n_dlcs = self.options["n_dlcs"]
self.add_discrete_input("upwind", True)
self.add_input("tilt", 0.0, units="deg")
self.add_input("s_nose", val=np.zeros(5), units="m")
self.add_input("nose_diameter", np.zeros(2), units="m")
self.add_input("nose_wall_thickness", np.zeros(2), units="m")
self.add_input("x_bedplate", val=np.zeros(12), units="m")
self.add_input("z_bedplate", val=np.zeros(12), units="m")
self.add_input("x_bedplate_inner", val=np.zeros(12), units="m")
self.add_input("z_bedplate_inner", val=np.zeros(12), units="m")
self.add_input("x_bedplate_outer", val=np.zeros(12), units="m")
self.add_input("z_bedplate_outer", val=np.zeros(12), units="m")
self.add_input("D_bedplate", val=np.zeros(12), units="m")
self.add_input("t_bedplate", val=np.zeros(12), units="m")
self.add_input("s_mb1", val=0.0, units="m")
self.add_input("s_mb2", val=0.0, units="m")
self.add_input("mb1_mass", 0.0, units="kg")
self.add_input("mb1_I", np.zeros(3), units="kg*m**2")
self.add_input("mb1_max_defl_ang", 0.0, units="rad")
self.add_input("mb2_mass", 0.0, units="kg")
self.add_input("mb2_I", np.zeros(3), units="kg*m**2")
self.add_input("mb2_max_defl_ang", 0.0, units="rad")
self.add_input("s_stator", val=0.0, units="m")
self.add_input("generator_stator_mass", val=0.0, units="kg")
self.add_input("generator_stator_I", val=np.zeros(3), units="kg*m**2")
self.add_input("F_mb1", val=np.zeros((3, n_dlcs)), units="N")
self.add_input("F_mb2", val=np.zeros((3, n_dlcs)), units="N")
self.add_input("M_mb1", val=
|
np.zeros((3, n_dlcs))
|
numpy.zeros
|
"""
This library collects a bunch of Optimizers inspired by the paper
The older optimizers are stored in Optimizer.py. Those classes are equipped with a `step_simple` function taking in
scores and codes to generate the next batch of codes.
"""
# from matplotlib import use as use_backend
# use_backend("Agg")
import matplotlib.pylab as plt
# plt.ioff()
#
import os
import time
import sys
# import utils
import numpy as np
from numpy.linalg import norm
from numpy.random import randn
from numpy import sqrt, zeros, abs, floor, log, log2, eye, exp
from geometry_utils import ExpMap, VecTransport, radial_proj, orthogonalize, renormalize
orig_stdout = sys.stdout
#%% Classic Optimizers as Reference
class CholeskyCMAES:
""" Note this is a variant of CMAES Cholesky suitable for high dimensional optimization"""
def __init__(self, space_dimen, population_size=None, init_sigma=3.0, init_code=None, Aupdate_freq=10,
maximize=True, random_seed=None, optim_params={}):
N = space_dimen
self.space_dimen = space_dimen
# Overall control parameter
self.maximize = maximize # if the program is to maximize or to minimize
# Strategy parameter setting: Selection
if population_size is None:
self.lambda_ = int(4 + floor(3 * log2(N))) # population size, offspring number
# the relation between dimension and population size.
else:
self.lambda_ = population_size # use custom specified population size
mu = self.lambda_ / 2 # number of parents/points for recombination
# Select half the population size as parents
weights = log(mu + 1 / 2) - (log(np.arange(1, 1 + floor(mu)))) # muXone array for weighted recombination
self.mu = int(floor(mu))
self.weights = weights / sum(weights) # normalize recombination weights array
mueff = self.weights.sum() ** 2 / sum(self.weights ** 2) # variance-effectiveness of sum w_i x_i
self.weights.shape = (1, -1) # Add the 1st dim 1 to the weights mat
self.mueff = mueff # add to class variable
self.sigma = init_sigma # Note by default, sigma is None here.
print("Space dimension: %d, Population size: %d, Select size:%d, Optimization Parameters:\nInitial sigma: %.3f"
% (self.space_dimen, self.lambda_, self.mu, self.sigma))
# Strategy parameter settiself.weightsng: Adaptation
self.cc = 4 / (N + 4) # defaultly 0.0009756
self.cs = sqrt(mueff) / (sqrt(mueff) + sqrt(N)) # 0.0499
self.c1 = 2 / (N + sqrt(2)) ** 2 # 1.1912701410022985e-07
if "cc" in optim_params.keys(): # if there is outside value for these parameter, overwrite them
self.cc = optim_params["cc"]
if "cs" in optim_params.keys():
self.cs = optim_params["cs"]
if "c1" in optim_params.keys():
self.c1 = optim_params["c1"]
self.damps = 1 + self.cs + 2 * max(0, sqrt((mueff - 1) / (N + 1)) - 1) # damping for sigma usually close to 1
print("cc=%.3f, cs=%.3f, c1=%.3f damps=%.3f" % (self.cc, self.cs, self.c1, self.damps))
if init_code is not None:
self.init_x = np.asarray(init_code)
self.init_x.shape = (1, N)
else:
self.init_x = None # FIXED Nov. 1st
self.xmean = zeros((1, N))
self.xold = zeros((1, N))
# Initialize dynamic (internal) strategy parameters and constants
self.pc = zeros((1, N))
self.ps = zeros((1, N)) # evolution paths for C and sigma
self.A = eye(N, N) # covariant matrix is represent by the factors A * A '=C
self.Ainv = eye(N, N)
self.eigeneval = 0 # track update of B and D
self.counteval = 0
if Aupdate_freq is None:
self.update_crit = self.lambda_ / self.c1 / N / 10
else:
self.update_crit = Aupdate_freq * self.lambda_
self.chiN = sqrt(N) * (1 - 1 / (4 * N) + 1 / (21 * N ** 2))
# expectation of ||N(0,I)|| == norm(randn(N,1)) in 1/N expansion formula
self._istep = 0
def step_simple(self, scores, codes):
""" Taking scores and codes to return new codes, without generating images
Used in cases when the images are better handled in outer objects like Experiment object
"""
# Note it's important to decide which variable is to be saved in the `Optimizer` object
# Note to confirm with other code, this part is transposed.
# set short name for everything to simplify equations
N = self.space_dimen
lambda_, mu, mueff, chiN = self.lambda_, self.mu, self.mueff, self.chiN
cc, cs, c1, damps = self.cc, self.cs, self.c1, self.damps
sigma, A, Ainv, ps, pc, = self.sigma, self.A, self.Ainv, self.ps, self.pc,
# Sort by fitness and compute weighted mean into xmean
if self.maximize is False:
code_sort_index = np.argsort( scores) # add - operator it will do maximization.
else:
code_sort_index = np.argsort(-scores)
# scores = scores[code_sort_index] # Ascending order. minimization
if self._istep == 0:
# Population Initialization: if without initialization, the first xmean is evaluated from weighted average all the natural images
if self.init_x is None:
select_n = len(code_sort_index[0:mu])
temp_weight = self.weights[:, :select_n] / np.sum(self.weights[:, :select_n]) # in case the codes is not enough
self.xmean = temp_weight @ codes[code_sort_index[0:mu], :]
else:
self.xmean = self.init_x
else:
self.xold = self.xmean
self.xmean = self.weights @ codes[code_sort_index[0:mu], :] # Weighted recombination, new mean value
# Cumulation statistics through steps: Update evolution paths
randzw = self.weights @ self.randz[code_sort_index[0:mu], :]
ps = (1 - cs) * ps + sqrt(cs * (2 - cs) * mueff) * randzw
pc = (1 - cc) * pc +
|
sqrt(cc * (2 - cc) * mueff)
|
numpy.sqrt
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (run_module_suite, assert_allclose, assert_,
assert_raises)
import pywt
# Check that float32, float64, complex64, complex128 are preserved.
# Other real types get converted to float64.
# complex256 gets converted to complex128
dtypes_in = [np.int8, np.float16, np.float32, np.float64, np.complex64,
np.complex128]
dtypes_out = [np.float64, np.float32, np.float32, np.float64, np.complex64,
np.complex128]
# test complex256 as well if it is available
try:
dtypes_in += [np.complex256, ]
dtypes_out += [np.complex128, ]
except AttributeError:
pass
def test_dwt_idwt_basic():
x = [3, 7, 1, 1, -2, 5, 4, 6]
cA, cD = pywt.dwt(x, 'db2')
cA_expect = [5.65685425, 7.39923721, 0.22414387, 3.33677403, 7.77817459]
cD_expect = [-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
# mismatched dtypes OK
x_roundtrip2 = pywt.idwt(cA.astype(np.float64), cD.astype(np.float32),
'db2')
assert_allclose(x_roundtrip2, x, rtol=1e-7, atol=1e-7)
assert_(x_roundtrip.dtype == np.float64)
def test_dwt_idwt_dtypes():
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
x = np.ones(4, dtype=dt_in)
errmsg = "wrong dtype returned for {0} input".format(dt_in)
cA, cD = pywt.dwt(x, wavelet)
assert_(cA.dtype == cD.dtype == dt_out, "dwt: " + errmsg)
x_roundtrip = pywt.idwt(cA, cD, wavelet)
assert_(x_roundtrip.dtype == dt_out, "idwt: " + errmsg)
def test_dwt_idwt_basic_complex():
x = np.asarray([3, 7, 1, 1, -2, 5, 4, 6])
x = x + 0.5j*x
cA, cD = pywt.dwt(x, 'db2')
cA_expect = np.asarray([5.65685425, 7.39923721, 0.22414387, 3.33677403,
7.77817459])
cA_expect = cA_expect + 0.5j*cA_expect
cD_expect = np.asarray([-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487])
cD_expect = cD_expect + 0.5j*cD_expect
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
def test_dwt_idwt_partial_complex():
x = np.asarray([3, 7, 1, 1, -2, 5, 4, 6])
x = x + 0.5j*x
cA, cD = pywt.dwt(x, 'haar')
cA_rec_expect = np.array([5.0+2.5j, 5.0+2.5j, 1.0+0.5j, 1.0+0.5j,
1.5+0.75j, 1.5+0.75j, 5.0+2.5j, 5.0+2.5j])
cA_rec = pywt.idwt(cA, None, 'haar')
assert_allclose(cA_rec, cA_rec_expect)
cD_rec_expect = np.array([-2.0-1.0j, 2.0+1.0j, 0.0+0.0j, 0.0+0.0j,
-3.5-1.75j, 3.5+1.75j, -1.0-0.5j, 1.0+0.5j])
cD_rec = pywt.idwt(None, cD, 'haar')
assert_allclose(cD_rec, cD_rec_expect)
assert_allclose(cA_rec + cD_rec, x)
def test_dwt_wavelet_kwd():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
cA, cD = pywt.dwt(x, wavelet=w, mode='constant')
cA_expect = [4.38354585, 3.80302657, 7.31813271, -0.58565539, 4.09727044,
7.81994027]
cD_expect = [-1.33068221, -2.78795192, -3.16825651, -0.67715519,
-0.09722957, -0.07045258]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
def test_dwt_coeff_len():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
ln_modes = [pywt.dwt_coeff_len(len(x), w.dec_len, mode) for mode in
pywt.Modes.modes]
expected_result = [6, ] * len(pywt.Modes.modes)
expected_result[pywt.Modes.modes.index('periodization')] = 4
assert_allclose(ln_modes, expected_result)
ln_modes = [pywt.dwt_coeff_len(len(x), w, mode) for mode in
pywt.Modes.modes]
assert_allclose(ln_modes, expected_result)
def test_idwt_none_input():
# None input equals arrays of zeros of the right length
res1 = pywt.idwt([1, 2, 0, 1], None, 'db2', 'symmetric')
res2 = pywt.idwt([1, 2, 0, 1], [0, 0, 0, 0], 'db2', 'symmetric')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
res1 = pywt.idwt(None, [1, 2, 0, 1], 'db2', 'symmetric')
res2 = pywt.idwt([0, 0, 0, 0], [1, 2, 0, 1], 'db2', 'symmetric')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
# Only one argument at a time can be None
assert_raises(ValueError, pywt.idwt, None, None, 'db2', 'symmetric')
def test_idwt_invalid_input():
# Too short, min length is 4 for 'db4':
assert_raises(ValueError, pywt.idwt, [1, 2, 4], [4, 1, 3], 'db4', 'symmetric')
def test_dwt_single_axis():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA, cD = pywt.dwt(x, 'db2', axis=-1)
cA0, cD0 = pywt.dwt(x[0], 'db2')
cA1, cD1 = pywt.dwt(x[1], 'db2')
assert_allclose(cA[0], cA0)
assert_allclose(cA[1], cA1)
assert_allclose(cD[0], cD0)
|
assert_allclose(cD[1], cD1)
|
numpy.testing.assert_allclose
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8
'''
opyright (c) <NAME> 2016
Implements the Instruments ojects that are used by the Curve objects to hold
attributes of market data and return discount factors.
Note that cash and forward instruments calculate discount factors analytically,
discount factors for swaps are calculated using a root-finding algorithm.
'''
# python libraries
from __future__ import division
import dateutil.relativedelta
import datetime
import numpy as np
import scipy.interpolate
import scipy.optimize
import sys
import time
# qlib libraries
from qbootstrapper.swapscheduler import Schedule
if sys.version_info > (3,):
long = int
class Instrument(object):
'''Base Instrument convenience class
Class is primarily used for the date adjustment methods that are used
by the sub-classes.
'''
def __init__(self):
pass
def _date_adjust(self, date, adjustment):
'''Method to return a date that is adjusted according to the
adjustment convention method defined
Arguments:
date (datetime) : Date to be adjusted
adjustment (str) : Adjustment type
available: unadjusted,
following,
preceding,
modified following
'''
if adjustment == 'unadjusted':
return date
elif adjustment == 'following':
if date.weekday() < 5:
return date
else:
return date + self._timedelta(7 - date.weekday(), 'days')
elif adjustment == 'preceding':
if date.weekday() < 5:
return date
else:
return date - self._timedelta(max(0, date.weekday() - 5), 'days')
elif adjustment == 'modified following':
if date.month == self._date_adjust(date, 'following').month:
return self._date_adjust(date, 'following')
else:
return date - self._timedelta(7 - date.weekday(), 'days')
else:
raise Exception('Adjustment period "{adjustment}" '
'not recognized'.format(**locals()))
@staticmethod
def _timedelta(length_num, length_type):
'''Static method to return the date +/- some length with a length type
Arguments:
length_num (int) : Length of the period (e.g., if the period
is 6 months, this is 6)
length_type (str) : Period type (e.g., if the period is 6 months,
this is months)
available: months,
weeks,
days
'''
if length_type == 'months':
return dateutil.relativedelta.relativedelta(months=length_num)
elif length_type == 'weeks':
return dateutil.relativedelta.relativedelta(weeks=length_num)
elif length_type == 'days':
return dateutil.relativedelta.relativedelta(days=length_num)
else:
raise Exception('Period length "{length_type}" '
'not recognized'.format(**locals()))
@staticmethod
def daycount(effective, maturity, basis):
'''Static method to return the accrual length, as a decimal,
between an effective and a maturity subject to a basis convention
Arguments:
effective (datetime) : First day of the accrual period
maturity (datetime) : Last day of the accrual period
basis (str) : Basis convention
available: Act360,
Act365,
30360,
30E360
'''
if type(effective) == np.datetime64:
timestamp = effective.astype('<M8[s]').astype(np.uint64)
effective = datetime.datetime.fromtimestamp(timestamp)
timestamp = maturity.astype('<M8[s]').astype(np.uint64)
maturity = datetime.datetime.fromtimestamp(timestamp)
if basis.lower() == 'act360':
accrual_period = (maturity - effective).days / 360
elif basis.lower() == 'act365':
accrual_period = (maturity - effective).days / 365
elif basis.lower() == '30360':
start, end = min(effective.day, 30), min(maturity.day, 30)
months = (30 * (maturity.month - effective.month) +
360 * (maturity.year - effective.year))
accrual_period = (end - start + months) / 360
elif basis.lower() == '30e360':
start, end = max(0, 30 - effective.day), min(30, maturity.day)
months = 30 * (maturity.month - effective.month - 1)
years = 360 * (maturity.year - effective.year)
accrual_period = (years + months + start + end) / 360
else:
raise Exception('Accrual basis "{basis}" '
'not recognized'.format(**locals()))
return accrual_period
class LIBORInstrument(Instrument):
'''LIBOR cash instrument class for use with the Swap Curve bootstrapper.
This class can be utilized to hold the market data and conventions
for a single cash LIBOR-equivalent contract, which is later utilized
The forward rate is calculated as the
1
-------------
1 + (r * accrual_days / days_in_year)
Arguments:
effective (datetime) : Effective date of the LIBOR-equivalent
cash instrument
rate (float) : Interest rate of the instrument
term_length (int) : Length of the instrument period
curve (Curve) : Curve being built, necessary for callbacks
to the curve for discount factors
kwargs
------
basis (str) : Accrual basis for the period
[default: Act360]
length_type : Length of the term_length in units
[default: months]
payment_adjustment (str): Adjustment to the payment date from the
end of the accrual period
[default: unadjusted]
'''
def __init__(self, effective, rate, term_length, curve,
basis='Act360', length_type='months',
payment_adjustment='unadjusted'):
# assignments
self.effective = effective
self.rate = rate
self.term_length = term_length
self.basis = basis
self.length_type = length_type
self.payment_adjustment = payment_adjustment
self.instrument_type = 'Cash'
# calculations
self._date_calculations()
self.accrual_period = super(LIBORInstrument,
self).daycount(self.effective,
self.maturity,
self.basis)
def _date_calculations(self):
'''Method for setting the accrual period and dates for a Cash
instrument
'''
self._term = super(LIBORInstrument, self)._timedelta(self.term_length,
self.length_type)
self.maturity = self.effective + self._term
self.payment_date = super(LIBORInstrument,
self)._date_adjust(self.maturity,
self.payment_adjustment)
def discount_factor(self):
'''Method for returning the discount factor for a Cash rate
'''
return np.log(1 / (1 + (self.rate * self.accrual_period)))
class FRAInstrumentByDates(Instrument):
'''FRA instrument class for use with the Swap Curve bootstrapper.
This class can be utilized to hold the market data and conventions
for a single FRA contract, which is later utilized
The forward rate is calculated as the
DF[effective]
-------------
1 + (r * accrual_days / days_in_year)
Arguments:
effective (datetime) : First day of the accrual period of the FRA
maturity (datetime) : Last day of the accrual period of the FRA
rate (float) : Fixing rate of the FRA
curve (Curve) : Curve being built, necessary for callbacks
to the curve for discount factors
kwargs
------
basis (str) : Accrual basis for the period
[default: Act360]
TODO: Add FRAInstrumentByTenor
'''
def __init__(self, effective, maturity, rate, curve, basis='Act360'):
# assignments
self.effective = effective
self.maturity = maturity
self.rate = rate
self.basis = basis
self.curve = curve
self.accrual_period = super(FRAInstrumentByDates,
self).daycount(self.effective,
self.maturity,
self.basis)
self.instrument_type = 'FRA'
def discount_factor(self):
'''Method for returning the discount factor for a FRA
'''
numerator = self.curve.discount_factor(self.effective)
denominator = 1 + (self.rate * self.accrual_period)
discount_factor = numerator / denominator
return np.log(discount_factor)
class FuturesInstrumentByDates(Instrument):
'''Futures instrument class for use with Swap Curve bootstrapper.
This class can be utilized to hold the market data and conventions
for a single Futures contract, which is later utilized in when the
.build() method is called on the curve where this instrument
has been added.
The future rate is calculated as the
DF[effective]
-------------
1 + ((100 - price) / 100 * accrual_days / days_in_year)
Arguments:
effective (datetime) : First day of the accrual period of the future
maturity (datetime) : Last day of the accrual period of the future
price (float) : Price of the future (assumes expiry price
of the future is 100)
curve (Curve) : Curve being built, necessary for callbacks
to the curve for discount factors
kwargs
------
basis (str) : Accrual basis for the period
[default: Act360]
TODO: Add FuturesInstrumentByTicker
TODO: Add Futures convexity calculation
'''
def __init__(self, effective, maturity, price, curve,
basis='Act360'):
# assignments
self.effective = effective
self.maturity = maturity
self.price = price
self.rate = (100 - price) / 100
self.basis = basis
self.curve = curve
self.accrual_period = super(FuturesInstrumentByDates,
self).daycount(self.effective,
self.maturity,
self.basis)
self.instrument_type = 'Futures'
def discount_factor(self):
'''Method for returning the discount factor for a future
'''
discount_factor = (self.curve.discount_factor(self.effective) /
(1 + (self.rate * self.accrual_period)))
return np.log(discount_factor)
class SwapInstrument(Instrument):
'''Base class for swap instruments. See OISSwapInstrument and
LIBORSwapInstrument for more detailed specs.
'''
def __init__(self, effective, maturity, rate, curve,
fixed_basis='30360', float_basis='Act360',
fixed_length=6, float_length=6,
fixed_period_length='months', float_period_length='months',
fixed_period_adjustment='unadjusted',
float_period_adjustment='unadjusted',
fixed_payment_adjustment='unadjusted',
float_payment_adjustment='unadjusted',
second=False, penultimate=False, fixing_lag=0, notional=100,
rate_period=1, rate_period_length='days', rate_basis='Act360'):
# assignments
self.effective = effective
self.maturity = maturity
if bool(second): self.second = second
if bool(penultimate): self.penultimate = penultimate
self.rate = rate
self.curve = curve
self.fixing_lag = fixing_lag
self.notional = notional
self.fixed_basis = fixed_basis
self.fixed_length = fixed_length
self.fixed_period_length = fixed_period_length
self.fixed_period_adjustment = fixed_period_adjustment
self.fixed_payment_adjustment = fixed_payment_adjustment
self.float_basis = float_basis
self.float_length = float_length
self.float_period_length = float_period_length
self.float_period_adjustment = float_period_adjustment
self.float_payment_adjustment = float_payment_adjustment
self.rate_period = rate_period
self.rate_period_length = rate_period_length
self.rate_basis = rate_basis
self._set_schedules()
def _set_schedules(self):
'''Sets the fixed and floating schedules of the swap.
'''
if hasattr(self, 'second'):
self.fixed_schedule = Schedule(self.effective, self.maturity,
self.fixed_length,
period_length=self.fixed_period_length,
second=self.second,
penultimate=self.penultimate,
period_adjustment=self.fixed_period_adjustment,
payment_adjustment=self.fixed_payment_adjustment)
self.float_schedule = Schedule(self.effective, self.maturity,
self.float_length,
period_length=self.float_period_length,
second=self.second,
penultimate=self.penultimate,
period_adjustment=self.float_period_adjustment,
payment_adjustment=self.float_payment_adjustment)
else:
self.fixed_schedule = Schedule(self.effective, self.maturity,
self.fixed_length,
period_length=self.fixed_period_length,
period_adjustment=self.fixed_period_adjustment,
payment_adjustment=self.fixed_payment_adjustment)
self.float_schedule = Schedule(self.effective, self.maturity,
self.float_length,
period_length=self.float_period_length,
period_adjustment=self.float_period_adjustment,
payment_adjustment=self.float_payment_adjustment)
class OISSwapInstrument(SwapInstrument):
'''OIS swap instrument class for use with Swap Curve bootstrapper.
This class can be utilized to hold the market data and conventions
for a single swap, which is later utilized in when the .build()
method is called on the curve where this instrument has been added.
Arguments:
effective (datetime) : First accrual start date of
the swap
maturity (datetime) : Last accrual end date of
the swap
rate (float) : Fixed rate
curve (Curve object) : Associated curve object.
There are callbacks to the
curve for prior discount
factors, so it must be
assigned
kwargs (optional)
-----------------
fixed_basis (string) : Accrual basis of the fixed
leg. See daycount method of
base Instrument class for
implemented conventions
[default: '30360']
float_basis (string) : Accrual basis of the floating
leg. See daycount method of
base Instrument class for
implemented conventions
[default: 'Act360']
fixed_length (int) : Length of the fixed accrual
period, must be combined with the
'fixed_period_length' argument
[default: 6]
float_length (int) : Length of the floating accrual
period, must be combined with
the 'float_period_length'
argument. Should usually match the
rate_period argument
[default: 6]
fixed_period_length (string) : Length of the fixed accrual
period timescale. See the
_timedelta method of the base
Instrument class for implemented
conventions
[default: 'months']
float_period_length (string) : Length of the floating accrual
period timescale. See the
_timedelta method of the base
Instrument class for implemented
conventions
[default: 'months']
fixed_period_adjustment (string) : Adjustment type for fixed
accrual periods. See the
_date_adjust method of the base
Instrument class for implemented
conventions
[default: 'unadjusted']
float_period_adjustment (string) : Adjustment type for floating
accrual periods. See the
_date_adjust method for the base
Instrument class for implemented
conventions
[default: 'unadjusted']
fixed_payment_adjustment (string) : Adjustment type for fixed
payment periods. See the
_date_adjust method for the base
Instrument class for implemented
conventions
[default: 'unadjusted']
float_payment_adjustment (string) : Adjustment type for floating
payment periods. See the
_date_adjust method for the base
Instrument class for implemented
conventions
[default: 'unadjusted']
second (datetime) : Specify the first regular roll
date for the accrual periods
[default: False]
penultimate (datetime) : Specify the last regular roll
date for the accrual periods
[default: False]
fixing_lag (int) : Days prior to the first accrual
period that the floating rate
is fixed
[default: 0]
notional (int) : Notional amount for use with
calculating swap the swap value.
Larger numbers will be slower,
but more exact.
[default: 100]
rate_period (int) : Length of the floating rate
accrual period, must be combined
with the 'rate_period_length'
argument. Should usually match
the float_length argument.
[default: 1]
rate_period_length (string) : Length of the rate accrual period
timescale. See the _timedelta
method of the base Instrument
class for implemented convetions
[default: 'days']
rate_basis (string) : Accrual basis of the LIBOR rate.
See the daycount method of the
base Instrument class for
implemented conventions.
[default: 'Act360']
'''
def __init__(self, *args, **kwargs):
super(OISSwapInstrument, self).__init__(*args, **kwargs)
self.instrument_type = 'OIS_swap'
def discount_factor(self):
'''Returns the discount factor for the swap using Newton's method
root finder.
'''
return scipy.optimize.newton(self._swap_value, 0)
def _swap_value(self, guess, args=()):
'''Private method used for root finding discount factor
The main function for use with the root-finder. This function returns
the value of a swap given a discount factor. It appends the discount
factor to the existent array with the date of the instrument, calculates
each cashflow and PV for each leg, and returns the net value of the pay
fixed swap.
Arguments:
guess (float) : guess to be appended to a copy of the attached
curve.
'''
if not isinstance(guess, (int, float, long, complex)):
# simultaneous bootstrapping sets the guess[0] as the ois guess
guess = guess[0]
temp_curve = self.curve.curve
temp_curve = np.append(self.curve.curve,
np.array([(np.datetime64(self.maturity.strftime('%Y-%m-%d')),
time.mktime(self.maturity.timetuple()),
guess)],
dtype=self.curve.curve.dtype))
interpolator = scipy.interpolate.PchipInterpolator(temp_curve['timestamp'],
temp_curve['discount_factor'])
for period in self.float_schedule.periods:
forward_rate = self.__forward_rate(interpolator, period)
period['cashflow'] = forward_rate * self.notional
payment_dates = self.float_schedule.periods['payment_date'].astype('<M8[s]')
discount_factors = np.exp(interpolator(payment_dates.astype(np.uint64)))
self.float_schedule.periods['PV'] = self.float_schedule.periods['cashflow'] * discount_factors
float_leg = self.float_schedule.periods['PV'].sum()
for period in self.fixed_schedule.periods:
forward_rate = self.rate
accrual_period = super(OISSwapInstrument,
self).daycount(period['accrual_start'],
period['accrual_end'],
self.fixed_basis)
period['cashflow'] = forward_rate * accrual_period * self.notional
payment_dates = self.fixed_schedule.periods['payment_date'].astype('<M8[s]')
discount_factors = np.exp(interpolator(payment_dates.astype(np.uint64)))
self.fixed_schedule.periods['PV'] = self.fixed_schedule.periods['cashflow'] * discount_factors
fixed_leg = self.fixed_schedule.periods['PV'].sum()
return float_leg - fixed_leg
def __forward_rate(self, interpolator, period):
'''Private method for calculating the compounded forward rate for an OIS
swap.
The compounded forward rate is calculated as the
DF[i]
Π [ ------- ] - 1
i DF[i+1]
Note that it achieves very speedily by calculating each forward
rate (+ 1) for the entire date array, and then calculating the product
of the array. Additionally, there are 3 entries for every Friday, as
each friday should compound 3 times (no new rates on weekends).
Arguments:
interpolator (scipy.interpolate): temporary interpolator object
that includes the current swap
maturity guess discount factor.
period (np.recarray) : 1 line of the swapschedule array
must contain the accrual start
and end dates
'''
start_date = period['accrual_start'].astype('<M8[s]')
end_date = period['accrual_end'].astype('<M8[s]')
one_day = np.timedelta64(1, 'D')
start_day = start_date.astype(object).weekday()
rate = 1
first_dates =
|
np.arange(start_date, end_date, one_day)
|
numpy.arange
|
import numpy as np
import functools
class Ket:
__slots__ = "data", "_order", "_num", "__dict__"
def __init__(self, data: iter):
self.data = data
self._order = list(range(len(data)))
self._num = int(''.join([str(e) for e in data]), 2)
def __iter__(self):
''' Returns the Iterator object '''
return iter(self.data)
def __len__(self):
return len(self.data)
def __eq__(self, other):
assert False
return list(self.data) == list(other.data) and list(self._order) == list(other._order)
# this breaks putting it inside a numpy array?!
# def __getitem__(self, item):
# return self.data[item]
def __repr__(self) -> str:
SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
return f"|{self.num},{self.energy}:" + f"{''.join([['↓', '↑'][int(e)] + str(self._order[i]) for i, e in enumerate(self)])}⟩".translate(SUB)
def __lt__(self, other):
assert isinstance(other, Ket)
return self.energy < other.energy
def __add__(self, other):
return Ket(np.array(list(self) + list(other))) # THIS IS INELEGANT
@functools.cached_property
def energy(self) -> int:
return sum([int(d) for d in self])
@property
def num(self) -> int:
return self._num
def reorder(self, order):
self._order = order
self._num = int(''.join([str(e) for e in self.data[self._order]]), 2)
class Basis(tuple):
@functools.cached_property
def num_qubits(self):
return int(np.log2(len(self)))
def reorder(self, order):
x = np.empty((len(self)), dtype=Ket)
x[:] = self
return Basis(tuple(x[order]))
def __repr__(self):
return "[" + ' '.join([str(b.num) for b in self]) + "]"
def tensor(self, *others):
res = self
for other in others:
res = Basis((i + j for i in res for j in other))
return res
@functools.lru_cache(maxsize=1000, typed=False)
def canonical_basis(n):
return Basis([Ket(np.array(list(f"{i:b}".zfill(n)))) for i in range(2 ** n)])
@functools.lru_cache(maxsize=1000, typed=False)
def energy_basis(n):
basis = canonical_basis(n)
energy = [b.energy for b in basis]
nums = [b.num for b in basis]
idx =
|
np.lexsort((nums, energy))
|
numpy.lexsort
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
from math import gcd
from collections import namedtuple
from typing import Callable, Tuple, Union
import numpy as np
from scipy import special
from scipy.stats import distributions
from ... import tensor as mt
from ...core import ExecutableTuple
from ...typing import TileableType
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
Ks_2sampResult = KstestResult
def _compute_prob_inside_method(m, n, g, h): # pragma: no cover
"""
Count the proportion of paths that stay strictly inside two diagonal lines.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The proportion of paths that stay inside the two lines.
Count the integer lattice paths from (0, 0) to (m, n) which satisfy
|x/m - y/n| < h / lcm(m, n).
The paths make steps of size +1 in either positive x or positive y directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
<NAME>.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Probability is symmetrical in m, n. Computation below uses m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Count the integer lattice paths from (0, 0) to (m, n) which satisfy
# |nx/g - my/g| < h.
# Compute matrix A such that:
# A(x, 0) = A(0, y) = 1
# A(x, y) = A(x, y-1) + A(x-1, y), for x,y>=1, except that
# A(x, y) = 0 if |x/m - y/n|>= h
# Probability is A(m, n)/binom(m+n, n)
# Optimizations exist for m==n, m==n*p.
# Only need to preserve a single column of A, and only a sliding window of it.
# minj keeps track of the slide.
minj, maxj = 0, min(int(np.ceil(h / mg)), n + 1)
curlen = maxj - minj
# Make a vector long enough to hold maximum window needed.
lenA = min(2 * maxj + 2, n + 1)
# This is an integer calculation, but the entries are essentially
# binomial coefficients, hence grow quickly.
# Scaling after each column is computed avoids dividing by a
# large binomial coefficient at the end, but is not sufficient to avoid
# the large dynamic range which appears during the calculation.
# Instead we rescale based on the magnitude of the right most term in
# the column and keep track of an exponent separately and apply
# it at the end of the calculation. Similarly when multiplying by
# the binomial coefficient
dtype = np.float64
A = np.zeros(lenA, dtype=dtype)
# Initialize the first column
A[minj:maxj] = 1
expnt = 0
for i in range(1, m + 1):
# Generate the next column.
# First calculate the sliding window
lastminj, lastlen = minj, curlen
minj = max(int(np.floor((ng * i - h) / mg)) + 1, 0)
minj = min(minj, n)
maxj = min(int(np.ceil((ng * i + h) / mg)), n + 1)
if maxj <= minj:
return 0
# Now fill in the values
A[0:maxj - minj] = np.cumsum(A[minj - lastminj:maxj - lastminj])
curlen = maxj - minj
if lastlen > curlen:
# Set some carried-over elements to 0
A[maxj - minj:maxj - minj + (lastlen - curlen)] = 0
# Rescale if the right most value is over 2**900
val = A[maxj - minj - 1]
_, valexpt = math.frexp(val)
if valexpt > 900:
# Scaling to bring down to about 2**800 appears
# sufficient for sizes under 10000.
valexpt -= 800
A = np.ldexp(A, -valexpt)
expnt += valexpt
val = A[maxj - minj - 1]
# Now divide by the binomial (m+n)!/m!/n!
for i in range(1, n + 1):
val = (val * i) / (m + i)
_, valexpt = math.frexp(val)
if valexpt < -128:
val = np.ldexp(val, -valexpt)
expnt += valexpt
# Finally scale if needed.
return np.ldexp(val, expnt)
def _compute_prob_outside_square(n, h): # pragma: no cover
"""
Compute the proportion of paths that pass outside the two diagonal lines.
Parameters
----------
n : integer
n > 0
h : integer
0 <= h <= n
Returns
-------
p : float
The proportion of paths that pass outside the lines x-y = +/-h.
"""
# Compute Pr(D_{n,n} >= h/n)
# Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... ) / binom(2n, n)
# This formulation exhibits subtractive cancellation.
# Instead divide each term by binom(2n, n), then factor common terms
# and use a Horner-like algorithm
# P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
P = 0.0
k = int(np.floor(n / h))
while k >= 0:
p1 = 1.0
# Each of the Ai terms has numerator and denominator with h simple terms.
for j in range(h):
p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
P = p1 * (1.0 - P)
k -= 1
return 2 * P
def _count_paths_outside_method(m, n, g, h): # pragma: no cover
"""
Count the number of paths that pass outside the specified diagonal.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The number of paths that go low.
The calculation may overflow - check for a finite answer.
Raises
------
FloatingPointError: Raised if the intermediate computation goes outside
the range of a float.
Notes
-----
Count the integer lattice paths from (0, 0) to (m, n), which at some
point (x, y) along the path, satisfy:
m*y <= n*x - h*g
The paths make steps of size +1 in either positive x or positive y directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
<NAME>.,
"The Significance Probability of the Smirnov Two-Sample Test,"
<NAME>, 3, No. 43 (1958), 469-86.
"""
# Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
# B(x, y) = #{paths from (0,0) to (x,y) without previously crossing the boundary}
# = binom(x, y) - #{paths which already reached the boundary}
# Multiply by the number of path extensions going from (x, y) to (m, n)
# Sum.
# Probability is symmetrical in m, n. Computation below assumes m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Not every x needs to be considered.
# xj holds the list of x values to be checked.
# Wherever n*x/m + ng*h crosses an integer
lxj = n + (mg-h)//mg
xj = [(h + mg * j + ng-1)//ng for j in range(lxj)]
# B is an array just holding a few values of B(x,y), the ones needed.
# B[j] == B(x_j, j)
if lxj == 0:
return np.round(special.binom(m + n, n))
B = np.zeros(lxj)
B[0] = 1
# Compute the B(x, y) terms
# The binomial coefficient is an integer, but special.binom() may return a float.
# Round it to the nearest integer.
for j in range(1, lxj):
Bj = np.round(special.binom(xj[j] + j, j))
if not np.isfinite(Bj):
raise FloatingPointError()
for i in range(j):
bin = np.round(special.binom(xj[j] - xj[i] + j - i, j-i)) # pylint: disable=redefined-builtin
Bj -= bin * B[i]
B[j] = Bj
if not np.isfinite(Bj):
raise FloatingPointError()
# Compute the number of path extensions...
num_paths = 0
for j in range(lxj):
bin = np.round(special.binom((m-xj[j]) + (n - j), n-j))
term = B[j] * bin
if not np.isfinite(term):
raise FloatingPointError()
num_paths += term
return np.round(num_paths)
def _attempt_exact_2kssamp(n1, n2, g, d, alternative): # pragma: no cover
"""Attempts to compute the exact 2sample probability.
n1, n2 are the sample sizes
g is the gcd(n1, n2)
d is the computed max difference in ECDFs
Returns (success, d, probability)
"""
lcm = (n1 // g) * n2
h = int(np.round(d * lcm))
d = h * 1.0 / lcm
if h == 0:
return True, d, 1.0
saw_fp_error, prob = False, np.nan
try:
if alternative == 'two-sided':
if n1 == n2:
prob = _compute_prob_outside_square(n1, h)
else:
prob = 1 - _compute_prob_inside_method(n1, n2, g, h)
else:
if n1 == n2:
# prob = binom(2n, n-h) / binom(2n, n)
# Evaluating in that form incurs roundoff errors
# from special.binom. Instead calculate directly
jrange = np.arange(h)
prob = np.prod((n1 - jrange) / (n1 + jrange + 1.0))
else:
num_paths = _count_paths_outside_method(n1, n2, g, h)
bin = special.binom(n1 + n2, n1) # pylint: disable=redefined-builtin
if not np.isfinite(bin) or not np.isfinite(num_paths) or num_paths > bin:
saw_fp_error = True
else:
prob = num_paths / bin
except FloatingPointError:
saw_fp_error = True
if saw_fp_error:
return False, d, np.nan
if not (0 <= prob <= 1):
return False, d, prob
return True, d, prob
def _calc_prob_2samp(d, n1, n2, alternative, mode): # pragma: no cover
MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -mt.inf
original_mode = mode
if mode == 'auto':
mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int_).max / n2g:
mode = 'asymp'
warnings.warn(
f"Exact ks_2samp calculation not possible with samples sizes "
f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning)
if mode == 'exact':
success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative)
if not success:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(f"ks_2samp: Exact calculation unsuccessful. "
f"Switching to mode={mode}.", RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptotic formula.
# Ensure float to avoid overflow in multiplication
# sorted because the one-sided formula is not symmetric in n1, n2
m, n = sorted([float(n1), float(n2)], reverse=True)
en = m * n / (m + n)
if alternative == 'two-sided':
prob = distributions.kstwo.sf(d, np.round(en))
else:
z =
|
np.sqrt(en)
|
numpy.sqrt
|
from cvs import *
print ('import tensorflow.....wait...')
import tensorflow as tf
import numpy as np
import time
imageSize = 257
width = imageSize
height = imageSize
def load_model(PATH_TO_CKPT):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def resizeimg(img, width, height):
img = cvs.resize(img, (width,height))
img = img.astype(float)
img = img * (2.0 / 255.0) - 1.0
return img
def main():
cam=cvs.VideoCapture(0)
detection_graph = load_model("frozen_model.pb")
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image = detection_graph.get_tensor_by_name('image:0')
heatmaps=detection_graph.get_tensor_by_name('heatmap:0')
offsets=detection_graph.get_tensor_by_name('offset_2:0')
displacementFwd=detection_graph.get_tensor_by_name('displacement_fwd_2:0')
displacementBwd=detection_graph.get_tensor_by_name('displacement_bwd_2:0')
fcount=-1
start = time.time()
while True:
sleep(30)
img = cam.read()
if img is None :
sleep(50)
continue
fcount=fcount+1
# global lbs
lbs = 'Average FPS: '+ str(fcount / (time.time() - start))
cvs.setLbs(lbs)
input_image = resizeimg(img,width,height)
tmpimg = img
tmpimg = cv2.resize(tmpimg, (width,height))
input_image = np.array(input_image,dtype=np.float32)
input_image = input_image.reshape(1,width,height,3)
heatmaps_result,offsets_result,displacementFwd_result,displacementBwd_result = sess.run(
[heatmaps,offsets,displacementFwd,displacementBwd], feed_dict={ image: input_image } )
colors = [[255, 0, 0], [255, 170, 0], [255, 170, 0],[255, 255, 0], [255, 255, 0], [170, 255, 0], [170, 255, 0], [0, 255, 0],
[0, 255, 0], [0, 255, 170], [0, 255, 170], [0, 170, 255], [0, 170, 255], [0, 0, 255], [0, 0, 255],
[255, 0, 255], [255, 0, 255]]
pairs = [[5,6],[5,7],[6,8],[7,9],[8,10],[5,11],[6,12],[11,12],[11,13],[12,14],[13,15],[14,16]]
keypoint = []
ckeypoint = []
heatmaps_result = heatmaps_result[0]
aaaa= np.transpose(heatmaps_result,(2, 0, 1))
offsets_result=offsets_result[0]
bbb= np.transpose(offsets_result,(2, 0, 1))
for k in range(0,17):
heatmaps_result=aaaa[k]
maxheat=np.max(heatmaps_result)
re=
|
np.where(heatmaps_result==maxheat)
|
numpy.where
|
# from tqdm.notebook import tqdm as tqdm_notebook
# import os
# import glob
import pickle
import numpy as np
from src.support_class import *
from matplotlib import pyplot as plt
from matplotlib import colors as mcolors
from scipy import linalg
from codeStore import support_fun as spf
colors11 = plt.get_cmap('Blues')
colors12 = plt.get_cmap('Reds')
colors1 = np.vstack((colors11(np.linspace(1, 0.2, 256)), colors12(np.linspace(0.4, 1, 256))))
cmpBR = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors1)
# generate the mobility matrix of the microswimmer from pickle file,
# with force and torque free conditions,
# ignore head tail interaction.
def fun_m_rot(mbase, R):
ab = mbase[0:3, 0:3]
bb1 = mbase[3:6, 0:3]
bb2 = mbase[0:3, 3:6]
cb = mbase[3:6, 3:6]
m2 = np.zeros_like(mbase)
m2[0:3, 0:3] = np.dot(R, np.dot(ab, R.T))
m2[3:6, 0:3] = np.dot(R, np.dot(bb1, R.T)) * np.linalg.det(R)
m2[0:3, 3:6] = np.dot(R, np.dot(bb2, R.T)) * np.linalg.det(R)
m2[3:6, 3:6] = np.dot(R, np.dot(cb, R.T))
return m2
def cross_matrix(v):
assert v.shape == (3,)
m = np.zeros((3, 3))
m[0, 1] = -v[2]
m[0, 2] = v[1]
m[1, 0] = v[2]
m[1, 2] = -v[0]
m[2, 0] = -v[1]
m[2, 1] = v[0]
return m
def fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, tail_ini_beta, rotM):
trs = rb1 * rb2 / np.sqrt((rb1 * np.sin(tail_ini_beta)) ** 2 +
(rb2 * np.cos(tail_ini_beta)) ** 2)
tl = 2 * rb1 + ch * ph + dist_hs
rbc_base = np.array((0, 0, tl / 2 - rb1))
rtc = rbc_base - np.array((0, 0, rb1 + dist_hs + ch * ph / 2))
head_end0 = rbc_base - np.array((0, 0, trs))
rbc = np.dot(rotM.T, (rbc_base - head_end0)) + head_end0
return rbc, rtc
def fun_mfull_ufull_core(mhead_base, mtail, dist_hs, beta, rotM, wbc, wtc,
rb1, rb2, ch, ph, body_size_fct=1, tail_size_fct=1, ):
beta_norm = np.array([0, 1, 0])
rotM_beta = get_rot_matrix(norm=beta_norm, theta=-beta)
mhead = fun_m_rot(fun_m_rot(mhead_base, rotM_beta), rotM.T)
rbc, rtc = fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, beta, rotM)
rc = rbc # current version the center of microswimmer is at the center of head.
drbc = rbc - rc
drtc = rtc - rc
mhead[0:3, 0:3] = mhead[0:3, 0:3] * body_size_fct ** 1
mhead[0:3, 3:6] = mhead[0:3, 3:6] * body_size_fct ** 2
mhead[3:6, 0:3] = mhead[3:6, 0:3] * body_size_fct ** 2
mhead[3:6, 3:6] = mhead[3:6, 3:6] * body_size_fct ** 3
mtail[0:3, 0:3] = mtail[0:3, 0:3] * tail_size_fct ** 1
mtail[0:3, 3:6] = mtail[0:3, 3:6] * tail_size_fct ** 2
mtail[3:6, 0:3] = mtail[3:6, 0:3] * tail_size_fct ** 2
mtail[3:6, 3:6] = mtail[3:6, 3:6] * tail_size_fct ** 3
# generate M matrix with the force- and torque-free conditions.
mfull = np.zeros((18, 18))
mfull[0: 6, 0: 6] = mhead
mfull[6:12, 6:12] = mtail
mfull[0: 3, 12:15] = -np.eye(3)
mfull[0: 3, 15:18] = cross_matrix(drbc)
mfull[3: 6, 15:18] = -np.eye(3)
mfull[6: 9, 12:15] = -np.eye(3)
mfull[6: 9, 15:18] = cross_matrix(drtc)
mfull[9:12, 15:18] = -np.eye(3)
mfull[12:15, 0: 3] = -np.eye(3)
mfull[12:15, 6: 9] = -np.eye(3)
mfull[15:18, 0: 3] = -cross_matrix(drbc)
mfull[15:18, 3: 6] = -np.eye(3)
mfull[15:18, 6: 9] = -cross_matrix(drtc)
mfull[15:18, 9:12] = -np.eye(3)
# generate boundary conditions.
norm_head = -np.dot(rotM.T, rotM_beta)[:, 2]
norm_tail = np.array((0, 0, 1))
ufull = np.zeros(18)
ufull[0: 3] = 0
ufull[3: 6] = wbc * norm_head
ufull[6: 9] = 0
ufull[9:12] = wtc * norm_tail
mobility_kwargs = {'rbc': rbc,
'rtc': rtc,
'rc': rc,
'norm_head': norm_head,
'norm_tail': norm_tail, }
return mfull, ufull, mobility_kwargs
def fun_position_kwargs(case_kwargs):
beta_norm = np.array([0, 1, 0])
dist_hs = case_kwargs['dist_hs']
beta = case_kwargs['tail_ini_beta']
theta = case_kwargs['tail_ini_theta']
phi = case_kwargs['tail_ini_phi']
psi = case_kwargs['tail_ini_psi']
rb1 = case_kwargs['rs1']
rb2 = case_kwargs['rs2']
ch = case_kwargs['ch']
ph = case_kwargs['ph']
rotM_beta = get_rot_matrix(norm=beta_norm, theta=-beta)
rotM = Rloc2glb(theta, phi, psi)
rbc, rtc = fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, beta, rotM)
rc = rbc # current version the center of microswimmer is at the center of head.
norm_head = -np.dot(rotM.T, rotM_beta)[:, 2]
norm_tail = np.array((0, 0, 1))
position_kwargs = {'rbc': rbc,
'rtc': rtc,
'rc': rc,
'norm_head': norm_head,
'norm_tail': norm_tail, }
return position_kwargs
def fun_ut_un(u, w):
ut = np.dot(u, w) * w / (np.linalg.norm(w) ** 2)
un = u - ut
return ut, un
def mobility_pickle(pickle_dir, beta, theta, phi, psi, dist_hs, wbc, wtc,
body_size_fct=1, tail_size_fct=1, ):
with open(pickle_dir, 'rb') as handle:
tpick = pickle.load(handle)
problem_kwargs = tpick['problem_kwargs']
rb1 = problem_kwargs['rs1']
rb2 = problem_kwargs['rs2']
ch = problem_kwargs['ch']
ph = problem_kwargs['ph']
mhead_base, mtail = tpick['Mhead'], tpick['Mtail']
rotM = Rloc2glb(theta, phi, psi)
mfull, ufull, mobility_kwargs = \
fun_mfull_ufull_core(mhead_base, mtail, dist_hs, beta, rotM,
wbc, wtc, rb1, rb2, ch, ph,
body_size_fct=body_size_fct, tail_size_fct=tail_size_fct)
mobility_kwargs['rb1'] = rb1
mobility_kwargs['rb2'] = rb2
mobility_kwargs['ch'] = ch
mobility_kwargs['ph'] = ph
return mfull, ufull, mobility_kwargs
def apx_resistance_pickle(pickle_dir, beta, theta, phi, psi, dist_hs, wbc, wtc):
# decoupled method, resistance
with open(pickle_dir, 'rb') as handle:
tpick = pickle.load(handle)
problem_kwargs = tpick['problem_kwargs']
rb1 = problem_kwargs['rs1']
rb2 = problem_kwargs['rs2']
ch = problem_kwargs['ch']
ph = problem_kwargs['ph']
mhead_base, mtail = tpick['Mhead'], tpick['Mtail']
#
Rhead_base = np.linalg.inv(mhead_base)
Rhead_base = np.diagflat(np.diag(Rhead_base))
t1 = (Rhead_base[0, 0] + Rhead_base[1, 1]) / 2
Rhead_base[0, 0] = t1
Rhead_base[1, 1] = t1
t1 = (Rhead_base[3, 3] + Rhead_base[4, 4]) / 2
Rhead_base[3, 3] = t1
Rhead_base[4, 4] = t1
Rtail = np.linalg.inv(mtail)
beta_norm = np.array([0, 1, 0])
rotM_beta = get_rot_matrix(norm=beta_norm, theta=-beta)
rotM = Rloc2glb(theta, phi, psi)
Rhead = fun_m_rot(fun_m_rot(Rhead_base, rotM_beta), rotM.T)
Ab_rt = Rhead[0:3, 0:3]
Cb_rt = Rhead[3:6, 3:6]
At = np.diagflat(np.diag(Rtail[0:3, 0:3]))
t1 = (At[0, 0] + At[1, 1]) / 2
At[0, 0] = t1
At[1, 1] = t1
Bt = np.diagflat(np.diag((Rtail[0:3, 3:6] + Rtail[3:6, 0:3]) / 2))
t1 = (Bt[0, 0] + Bt[1, 1]) / 2 * 0
Bt[0, 0] = t1
Bt[1, 1] = t1
Ct = np.diagflat(np.diag(Rtail[3:6, 3:6]))
t1 = (Ct[0, 0] + Ct[1, 1]) / 2
Ct[0, 0] = t1
Ct[1, 1] = t1
#
rbc, rtc = fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, beta, rotM)
rc = rbc # current version the center of microswimmer is at the center of head.
# drbc = rbc - rc
drtc = rtc - rc
dtc = cross_matrix(drtc)
norm_head = -np.dot(rotM.T, rotM_beta)[:, 2]
norm_tail = np.array((0, 0, 1))
#
Rfull = np.zeros((6, 6))
Rfull[0:3, 0:3] = Ab_rt + At
Rfull[0:3, 3:6] = - np.dot(At, dtc)
Rfull[3:6, 0:3] = + np.dot(dtc, At)
Rfull[3:6, 3:6] = Cb_rt + Ct + np.dot(dtc, Bt) - np.dot(Bt, dtc) - np.dot(dtc, np.dot(At, dtc))
FFull = np.zeros(6)
FFull[0:3] = -np.dot(Bt, wtc * norm_tail)
FFull[3:6] = -np.dot(Cb_rt, wbc * norm_head) - \
np.dot(Ct, wtc * norm_tail) - np.dot(dtc, np.dot(Bt, wtc * norm_tail))
resistance_kwargs = {'rbc': rbc,
'rtc': rtc,
'rc': rc,
'norm_head': norm_head,
'norm_tail': norm_tail, }
return Rfull, FFull, resistance_kwargs
def fun_alpha_bctc(model, wbc, wtc):
mfull, ufull, mobility_kwargs = model.mobility_matrix(wbc, wtc)
ffull = linalg.solve(mfull, ufull)
pb, pt = mobility_kwargs['norm_head'], mobility_kwargs['norm_tail']
Uc, Wc, Wbc = ffull[12:15], ffull[15:18], wbc * pb
Wg = Wc + Wbc
alpha_b = np.arccos(np.dot(pb, Wg) / np.linalg.norm(pb) / np.linalg.norm(Wg))
alpha_b = np.pi - alpha_b if alpha_b > np.pi / 2 else alpha_b
alpha_t = np.arccos(np.dot(pt, Wg) / np.linalg.norm(pt) / np.linalg.norm(Wg))
alpha_t = np.pi - alpha_t if alpha_t > np.pi / 2 else alpha_t
return alpha_b, alpha_t
def fun_kappa_alpha(model, wbc, wtc):
alpha_b, alpha_t = fun_alpha_bctc(model, wbc, wtc)
kappa_alpha = np.abs(alpha_b / alpha_t)
return kappa_alpha
def fun_hook_torque(model, wbc, wtc):
mfull, ufull, mobility_kwargs = model.mobility_matrix(wbc, wtc)
ffull = linalg.solve(mfull, ufull)
rb1 = mobility_kwargs['rb1']
rbc = mobility_kwargs['rbc']
pb = mobility_kwargs['norm_head']
ds = rbc + rb1 * pb
hookT = ffull[3:6] - np.cross(ds, ffull[0:3])
return hookT
def plot_3D_Traj(axi, tplt, theta_list):
axi.plot(np.zeros(1), np.zeros(1), np.zeros(1), ' ')
axi.plot(tplt[:, 0], tplt[:, 1], tplt[:, 2], ' ')
spf.set_axes_equal(axi)
spf.colorline3d(tplt, theta_list / np.pi, ax0=axi, clb_title='$\\theta / \\pi$',
cmap=plt.get_cmap('viridis'))
axi.scatter(axi.get_xlim()[0], np.zeros(1), np.zeros(1), marker='.', c='k')
axi.scatter(np.zeros(1), axi.get_ylim()[1], np.zeros(1), marker='.', c='k')
axi.scatter(np.zeros(1), np.zeros(1), axi.get_zlim()[0], marker='.', c='k')
axi.plot(np.ones_like(theta_list) * axi.get_xlim()[0], tplt[:, 1], tplt[:, 2],
'--', color='grey')
axi.plot(tplt[:, 0], np.ones_like(theta_list) * axi.get_ylim()[1], tplt[:, 2],
'--', color='grey')
axi.plot(tplt[:, 0], tplt[:, 1], np.ones_like(theta_list) * axi.get_zlim()[0],
'--', color='grey')
axi.view_init(25, -60)
axi.plot(np.zeros(1), np.zeros(1), np.zeros(1), marker='s', c='k')
return True
def plot_color_line(axi, tx, ty, xlabel, ylabel, c, vmin, vmax,
cmap=cmpBR, xscale0='linear', yscale0='linear', s=4,
marker='o', label=''):
axi.plot(tx, ty, linestyle='None')
# axi.relim()
# txlim0 = axi.get_xlim()
# tylim0 = axi.get_ylim()
# print(tylim0, ty.min())
sc = axi.scatter(tx, ty, vmin=vmin, vmax=vmax, c=c, cmap=cmap, s=s,
marker=marker, label=label)
axi.set_xlabel(xlabel)
axi.set_ylabel(ylabel)
axi.set_xscale(xscale0)
axi.set_yscale(yscale0)
# axi.set_xlim(*txlim0)
# axi.set_ylim(*tylim0)
return sc
def fun_cal_kwargs(Uc, Wc, wbc, pb, pt, kappa, mdf_alpha=True):
Wbc = wbc * pb
Wg = Wc + kappa * Wbc
UcWg_t, UcWg_n = fun_ut_un(Uc, Wg)
eta = np.arccos(np.dot(Uc, Wg) / np.linalg.norm(Uc) / np.linalg.norm(Wg))
alpha_b = np.arccos(np.dot(pb, Wg) / np.linalg.norm(pb) / np.linalg.norm(Wg))
alpha_t = np.arccos(np.dot(pt, Wg) / np.linalg.norm(pt) / np.linalg.norm(Wg))
if mdf_alpha:
alpha_b = np.pi - alpha_b if alpha_b > np.pi / 2 else alpha_b
alpha_t = np.pi - alpha_t if alpha_t > np.pi / 2 else alpha_t
R = np.linalg.norm(UcWg_n) / np.linalg.norm(Wg)
uc_par = np.sign(np.dot(Uc, Wg)) * np.linalg.norm(UcWg_t)
cal_kwargs = {'Wg': Wg,
'eta': eta,
'alpha_b': alpha_b,
'alpha_t': alpha_t,
'R': R,
'uc_par': uc_par,}
return cal_kwargs
class DecouplingModel:
def __init__(self, pickle_dir, beta_norm=np.array([0, 1, 0])):
with open(pickle_dir, 'rb') as handle:
tpick = pickle.load(handle)
self._case_kwargs = tpick['problem_kwargs']
self._rb1 = self._case_kwargs['rs1']
self._rb2 = self._case_kwargs['rs2']
self._ch = self._case_kwargs['ch']
self._ph = self._case_kwargs['ph']
self._mhead_base = tpick['Mhead']
self._mtail_base = tpick['Mtail']
self._beta_norm = beta_norm
self._beta = 0
self._theta = 0
self._phi = 0
self._psi = 0
self._dist_hs = 0
self._rotM_beta = np.eye(3)
self._rotM = np.eye(3)
@property
def case_kwargs(self):
return self._case_kwargs
@property
def rb1(self):
return self._rb1
@property
def rb2(self):
return self._rb2
@property
def ch(self):
return self._ch
@property
def ph(self):
return self._ph
@property
def mhead_base(self):
return self._mhead_base
@property
def mtail_base(self):
return self._mtail_base
@property
def beta_norm(self):
return self._beta_norm
@staticmethod
def fun_ut_un(u, w):
ut = np.dot(u, w) * w / (np.linalg.norm(w) ** 2)
un = u - ut
return ut, un
@staticmethod
def fun_MR_rot(mr_base, R):
ab = mr_base[0:3, 0:3]
bb1 = mr_base[3:6, 0:3]
bb2 = mr_base[0:3, 3:6]
cb = mr_base[3:6, 3:6]
m2 = np.zeros_like(mr_base)
m2[0:3, 0:3] = np.dot(R, np.dot(ab, R.T))
m2[3:6, 0:3] = np.dot(R, np.dot(bb1, R.T)) * np.linalg.det(R)
m2[0:3, 3:6] = np.dot(R, np.dot(bb2, R.T)) * np.linalg.det(R)
m2[3:6, 3:6] = np.dot(R, np.dot(cb, R.T))
return m2
@staticmethod
def cross_matrix(v):
assert v.shape == (3,)
m = np.zeros((3, 3))
m[0, 1] = -v[2]
m[0, 2] = v[1]
m[1, 0] = v[2]
m[1, 2] = -v[0]
m[2, 0] = -v[1]
m[2, 1] = v[0]
return m
@staticmethod
def fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, tail_ini_beta, rotM):
trs = rb1 * rb2 / np.sqrt((rb1 * np.sin(tail_ini_beta)) ** 2 +
(rb2 * np.cos(tail_ini_beta)) ** 2)
tl = 2 * rb1 + ch * ph + dist_hs
rbc_base = np.array((0, 0, tl / 2 - rb1))
rtc = rbc_base - np.array((0, 0, rb1 + dist_hs + ch * ph / 2))
head_end0 = rbc_base - np.array((0, 0, trs))
rbc = np.dot(rotM.T, (rbc_base - head_end0)) + head_end0
return rbc, rtc
def fun_position_kwargs(self):
beta_norm = self.beta_norm
rb1 = self.rb1
rb2 = self.rb2
ch = self.ch
ph = self.ph
beta = self._beta
theta = self._theta
phi = self._phi
psi = self._psi
dist_hs = self._dist_hs
left_hand = self.case_kwargs['left_hand']
rotM_beta = get_rot_matrix(norm=beta_norm, theta=-beta)
rotM = Rloc2glb(theta, phi, psi)
rbc, rtc = self.fun_rbc_rtc(rb1, rb2, ch, ph, dist_hs, beta, rotM)
rc = rbc # current version the center of microswimmer is at the center of head.
if left_hand:
norm_head = np.dot(rotM.T, rotM_beta)[:, 2]
norm_tail = -np.array((0, 0, 1))
else:
norm_head = -np.dot(rotM.T, rotM_beta)[:, 2]
norm_tail = np.array((0, 0, 1))
position_kwargs = {'rbc': rbc,
'rtc': rtc,
'rc': rc,
'norm_head': norm_head,
'norm_tail': norm_tail,
'rb1': rb1,
'rb2': rb2,
'ch': ch,
'ph': ph}
return position_kwargs
def fun_mfull_ufull_core(self, wbc, wtc, position_kwargs, body_size_fct=1, tail_size_fct=1):
# current version, these factors are prohibited.
assert body_size_fct == 1
assert tail_size_fct == 1
mhead_base = self.mhead_base
mtail = self.mtail_base
rotM = self._rotM
rotM_beta = self._rotM_beta
rbc = position_kwargs['rbc']
rtc = position_kwargs['rtc']
rc = position_kwargs['rc']
norm_head = position_kwargs['norm_head']
norm_tail = position_kwargs['norm_tail']
mhead = self.fun_MR_rot(self.fun_MR_rot(mhead_base, rotM_beta), rotM.T)
drbc = rbc - rc
drtc = rtc - rc
mhead[0:3, 0:3] = mhead[0:3, 0:3] * body_size_fct ** 1
mhead[0:3, 3:6] = mhead[0:3, 3:6] * body_size_fct ** 2
mhead[3:6, 0:3] = mhead[3:6, 0:3] * body_size_fct ** 2
mhead[3:6, 3:6] = mhead[3:6, 3:6] * body_size_fct ** 3
mtail[0:3, 0:3] = mtail[0:3, 0:3] * tail_size_fct ** 1
mtail[0:3, 3:6] = mtail[0:3, 3:6] * tail_size_fct ** 2
mtail[3:6, 0:3] = mtail[3:6, 0:3] * tail_size_fct ** 2
mtail[3:6, 3:6] = mtail[3:6, 3:6] * tail_size_fct ** 3
# generate M matrix with the force- and torque-free conditions.
mfull = np.zeros((18, 18))
mfull[0: 6, 0: 6] = mhead
mfull[6:12, 6:12] = mtail
mfull[0: 3, 12:15] = -np.eye(3)
mfull[0: 3, 15:18] = self.cross_matrix(drbc)
mfull[3: 6, 15:18] = -np.eye(3)
mfull[6: 9, 12:15] = -np.eye(3)
mfull[6: 9, 15:18] = self.cross_matrix(drtc)
mfull[9:12, 15:18] = -np.eye(3)
mfull[12:15, 0: 3] = -np.eye(3)
mfull[12:15, 6: 9] = -np.eye(3)
mfull[15:18, 0: 3] = -self.cross_matrix(drbc)
mfull[15:18, 3: 6] = -np.eye(3)
mfull[15:18, 6: 9] = -self.cross_matrix(drtc)
mfull[15:18, 9:12] = -np.eye(3)
# generate boundary conditions.
ufull = np.zeros(18)
ufull[0: 3] = 0
ufull[3: 6] = wbc * norm_head
ufull[6: 9] = 0
ufull[9:12] = wtc * norm_tail
return mfull, ufull
def case_ini(self, beta, theta, phi, psi, dist_hs):
beta_norm = self.beta_norm
self._beta = beta
self._theta = theta
self._phi = phi
self._psi = psi
self._dist_hs = dist_hs
self._rotM_beta = get_rot_matrix(norm=beta_norm, theta=-beta)
self._rotM = Rloc2glb(theta, phi, psi)
return True
def mobility_matrix(self, wbc, wtc, body_size_fct=1, tail_size_fct=1, ):
position_kwargs = self.fun_position_kwargs()
mfull, ufull = self.fun_mfull_ufull_core(wbc, wtc, position_kwargs,
body_size_fct=body_size_fct,
tail_size_fct=tail_size_fct)
return mfull, ufull, position_kwargs
def fun_Rfull_Ffull_core(self, wbc, wtc, position_kwargs, body_size_fct=1, tail_size_fct=1):
# current version, these factors are prohibited.
assert body_size_fct == 1
assert tail_size_fct == 1
mhead_base = self.mhead_base
mtail = self.mtail_base
rotM = self._rotM
rotM_beta = self._rotM_beta
Rhead_base = np.linalg.inv(mhead_base)
Rhead_base = np.diagflat(np.diag(Rhead_base))
t1 = (Rhead_base[0, 0] + Rhead_base[1, 1]) / 2
Rhead_base[0, 0] = t1
Rhead_base[1, 1] = t1
t1 = (Rhead_base[3, 3] + Rhead_base[4, 4]) / 2
Rhead_base[3, 3] = t1
Rhead_base[4, 4] = t1
Rtail = np.linalg.inv(mtail)
Rhead = fun_m_rot(fun_m_rot(Rhead_base, rotM_beta), rotM.T)
Ab_rt = Rhead[0:3, 0:3]
Cb_rt = Rhead[3:6, 3:6]
At = np.diagflat(np.diag(Rtail[0:3, 0:3]))
t1 = (At[0, 0] + At[1, 1]) / 2
At[0, 0] = t1
At[1, 1] = t1
Bt = np.diagflat(np.diag((Rtail[0:3, 3:6] + Rtail[3:6, 0:3]) / 2))
t1 = (Bt[0, 0] + Bt[1, 1]) / 2 * 0
Bt[0, 0] = t1
Bt[1, 1] = t1
Ct = np.diagflat(np.diag(Rtail[3:6, 3:6]))
t1 = (Ct[0, 0] + Ct[1, 1]) / 2
Ct[0, 0] = t1
Ct[1, 1] = t1
# rbc = position_kwargs['rbc']
rtc = position_kwargs['rtc']
rc = position_kwargs['rc']
norm_head = position_kwargs['norm_head']
norm_tail = position_kwargs['norm_tail']
# drbc = rbc - rc
drtc = rtc - rc
dtc = cross_matrix(drtc)
Rfull = np.zeros((6, 6))
Rfull[0:3, 0:3] = Ab_rt + At
Rfull[0:3, 3:6] = -
|
np.dot(At, dtc)
|
numpy.dot
|
from aiida.orm.data.array import ArrayData
import numpy
class ForceConstantsData(ArrayData):
"""
Store the force constants on disk as a numpy array. It requires numpy to be installed.
"""
def __init__(self, *args, **kwargs):
super(ForceConstantsData, self).__init__(*args, **kwargs)
self._cached_arrays = {}
def get_data(self):
"""
Return the force constants stored in the node as a numpy array (Natoms x 3 x 3)
"""
return self.get_array('force_constants')
def set_data(self, force_constants):
"""
Store the force constants as a numpy array. Possibly overwrite the array
if it already existed.
Internally, it is stored as a force_constants.npy file in numpy format.
:param array: The numpy array to store.
"""
self.set_array('force_constants', numpy.array(force_constants))
def read_from_phonopy_file(self, filename):
"""
Read the force constants from a phonopy FORCE_CONSTANTS file
:param filename: FORCE_CONSTANTS file name
"""
fcfile = open(filename)
num = int((fcfile.readline().strip().split())[0])
force_constants = numpy.zeros((num, num, 3, 3), dtype=float)
for i in range(num):
for j in range(num):
fcfile.readline()
tensor = []
for k in range(3):
tensor.append([float(x) for x in fcfile.readline().strip().split()])
force_constants[i, j] =
|
numpy.array(tensor)
|
numpy.array
|
"""dual grid method"""
import time
import path_magic
from mesh import TransfiniteMesh, CrazyMesh
from function_space import FunctionSpace
from inner_product import MeshFunction, inner
import numpy as np
import scipy.io
import numpy.testing as npt
from scipy.sparse import coo_matrix
from forms import Form
from basis_forms import BasisForm
from coboundaries import d, d_21_lobatto_outer
from assemble import assemble
from scipy import sparse
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, draw, show
from quadrature import gauss_quad, extended_gauss_quad, lobatto_quad
from polynomials import lagrange_basis, edge_basis
from dof_map import dof_map_crazy_ext_gauss_nodes, dof_map_crazy_lobatto_edges_discontinous, dof_map_crazy_lobatto_faces
from my_functions import assemble_cochain, assemble_cochain2, mass_matrix_1_form_local
start_time = time.time()
"""define source term"""
# def source(x, y): return -8 * np.pi**2 * np.sin(2 * np.pi * x) * np.sin(2 * np.pi * y)
# def source(x, y): return -36 * np.pi**2 * np.sin(2 * np.pi * x) * np.sin(2 * np.pi * y)
# def source(x, y): return -36 * np.pi**2 * np.sin(2 * np.pi * x) * np.sin(2 *
# np.pi * y) + 24 * np.pi**2 * np.cos(2 * np.pi * x) * np.cos(2 * np.pi * y)
"""define anisotropic tensor"""
def k_11(x, y):
alpha = 1e-4
return (1e-3 * x**2 + y**2 + alpha) / (x**2 + y**2 + alpha)
def k_12(x, y):
alpha = 1e-4
return ((1e-3 - 1) * x * y) / (x**2 + y**2 + alpha)
def k_22(x, y):
alpha = 1e-4
return (x**2 + 1e-3 * y**2 + alpha) / (x**2 + y**2 + alpha)
def k11_dx(x, y):
alpha = 1e-4
return (2 * 1e-3 * x * (x**2 + y**2 + alpha) - 2 * x * (1e-3 * x**2 + y**2 + alpha)) / (x**2 + y**2 + alpha) ** 2
def k12_dx(x, y):
alpha = 1e-4
return ((x**2 + y**2 + alpha) * (1e-3 - 1) * y - 2 * (1e-3 - 1) * x**2 * y) / (x**2 + y**2 + alpha) ** 2
def k12_dy(x, y):
alpha = 1e-4
return ((x**2 + y**2 + alpha) * (1e-3 - 1) * x - 2 * (1e-3 - 1) * x * y**2) / (x**2 + y**2 + alpha) ** 2
def k22_dy(x, y):
alpha = 1e-4
return (2 * 0.001 * y * (x**2 + y**2 + alpha) - 2 * y * (x**2 + 0.001 * y**2 + alpha)) / (x**2 + y**2 + alpha) ** 2
"""define manufactured solution"""
def manufactured_solution(x, y): return np.sin(2 * np.pi * x) * np.sin(2 * np.pi * y)
def phi_exact(x, y):
return
|
np.sin(2 * np.pi * x)
|
numpy.sin
|
# @File: route_following.py
# @Info: to create an agent of ROUTE FOLLOWING based on the insect brain model in insect_brain_model.py
# @Author: <NAME>, UoL, UK
# @Time: 2020-02-17
import numpy as np
from insect_brain_model import CentralComplexModel, AOTuVisualPathwayModel
from image_processing import visual_sense
class RouteFollowingAgent(object):
"""Class for the implementation of route following model
"""
def __init__(self, world, route_mem, home_mem, zm_n_max, num_neurons=30):
# central complex
self.cx = CentralComplexModel()
# simulated 3D world, an array with size Nx3
self.world = world
# a dictionary with keys: ['imgs', 'h', 'ZM_Ps', 'pos', 'ZM_As']
self.route_mem = route_mem
# a dictionary with keys: ['imgs', 'h', 'ZM_Ps', 'pos', 'ZM_As']
self.home_mem = home_mem
# frequency encoding parameters
self.zm_n_max = zm_n_max
if self.zm_n_max % 2:
self.zm_coeff_num = int(((1 + zm_n_max) / 2) * ((3 + zm_n_max) / 2))
else:
self.zm_coeff_num = int((zm_n_max / 2.0 + 1) ** 2)
# re arrange the memory
mem_scene = self.route_mem['ZM_As'][:, :self.zm_coeff_num].copy()
mem_phase = self.route_mem['ZM_Ps'][:, 16].copy()
mem_phase_ring = np.zeros([len(mem_phase), 8])
for i in range(len(mem_phase)):
mem_scene[i, :] = (mem_scene[i, :] -
|
np.min(mem_scene[i, :])
|
numpy.min
|
import pytest
import numpy as np
import spharpy.samplings as samplings
from spharpy.samplings.coordinates import Coordinates, SamplingSphere
from spharpy.special import spherical_bessel_zeros
from scipy.special import spherical_jn
def test_sph_bessel_zeros():
roots = spherical_bessel_zeros(3, 3)
jn_zeros = np.ones(roots.shape)
for n in range(0, 4):
jn_zeros[n, :] = spherical_jn(n, roots[n, :])
zeros =
|
np.zeros((4, 3), dtype=np.float)
|
numpy.zeros
|
'''
@name: ros_env_cont_img.py
@brief: This class is a simulation environment wrapper for
the X-Image Representation
with continuous action space.
@author: <NAME>
@version: 3.5
@date: 2019/04/05
'''
# ros-relevant
import rospy
# python relevant
import numpy as np
from gym import spaces
#custom classes
from rl_agent.env_wrapper.ros_env_img import RosEnvImg
# Messages
from geometry_msgs.msg import Twist
# Parameters
GOAL_RADIUS = 0.4
WAYPOINT_RADIUS = 0.2
class RosEnvContImg(RosEnvImg):
'''
This class is a simulation environment wrapper for
the X-Image Representation
with continuous action space.
'''
def __init__(self, ns, state_collector, stack_offset, stack_size, robot_radius = 0.46, reward_fnc=6, debug=False, execution_mode="train", task_mode="static"):
img_width = rospy.get_param("%s/rl_agent/img_width_pos"%ns) + rospy.get_param("%s/rl_agent/img_width_neg"%ns)
img_height = rospy.get_param("%s/rl_agent/img_height"%ns)
state_size = (img_height, img_width, 1)
observation_space = spaces.Box(low=0, high=100, shape=state_size, dtype=np.float)
action_space = spaces.Box(low=np.array([0.0, -1.2]), high=
|
np.array([0.8, 1.2])
|
numpy.array
|
# -*- coding: utf-8 -*-
"""Soft Margin SVM classification with kernels for machine learning.
Soft margin SVM is basically an SVM (see folder **supportVectorMachine**) which
has some 'slack' and allows features to be 'wrongly' classified to avoid
overfitting the classifier. This also includes kernels. Kernels use the inner
product to help us transform the feature space to make it possible for Support
Vector Machines to create a good hyperplane with non-linear feature sets.
This file can basically do the same as the "from scratch" algorithm in folder
"supportVectorMachine", but this is much more complex to account for margins
and more dimensions involved. This also involves more complex math, matrix
algebra and Lagrange multipliers.
Example:
$ python howItWorksSoftMarginSVM.py.py
Todo:
*
"""
import numpy as np
from numpy import linalg
# Because I made a convex solver in 'howItWorksSupportVectorMachine.py' I will
# just use a library for it now because it's simpler.
import cvxopt
import cvxopt.solvers
def linear_kernel(x1, x2):
"""Linear kernel function.
if this kernel is used then the decision boundary hyperplane will have a
linear form.
"""
return np.dot(x1, x2)
def polynomial_kernel(x, y, p=3):
"""Polynomial kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Polynomial form.
"""
return (1 + np.dot(x, y))**p
def gaussian_kernel(x, y, sigma=5.0):
"""Gaussian kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Gaussian form.
"""
return np.exp(-linalg.norm(x - y)**2 / (2 * (sigma**2)))
class SVM(object):
"""Support Vector Machine (SVM) class.
This class is for creating an instance of a SVM. To avoid retraining or
refitting (as it's also called) every time it is used.
"""
def __init__(self, kernel=linear_kernel, C=None):
"""The __init__ method of the SVM class.
Args:
kernel (function name): The kernel that will be used.
Default linear kernel.
C: the max sum of all the distances of the features that are
wrongly classified during fitting/training. Default is 'None', if C is
None then it's a hard margin SVM with no slack.
"""
self.kernel = kernel
self.C = C
if self.C is not None:
self.C = float(self.C)
def fit(self, X, y):
"""Method to train the SVM object as a convex optimization problem.
Return:
(void)
Args:
X (np.array): the features
y (np.array): the labels
"""
n_samples, n_features = X.shape
# Creating all the values for the quadratic Programming solver
K = np.zeros((n_samples, n_samples))
for i in range(n_samples):
for j in range(n_samples):
K[i, j] = self.kernel(X[i], X[j])
P = cvxopt.matrix(np.outer(y, y) * K)
q = cvxopt.matrix(np.ones(n_samples) * -1)
A = cvxopt.matrix(y, (1, n_samples))
b = cvxopt.matrix(0.0)
if self.C is None:
G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
h = cvxopt.matrix(np.zeros(n_samples))
else:
tmp1 = np.diag(np.ones(n_samples) * -1)
tmp2 = np.identity(n_samples)
G = cvxopt.matrix(np.vstack((tmp1, tmp2)))
tmp1 = np.zeros(n_samples)
tmp2 = np.ones(n_samples) * self.C
h = cvxopt.matrix(np.hstack((tmp1, tmp2)))
# solve Quadratic Programming problem
solution = cvxopt.solvers.qp(P, q, G, h, A, b)
# Lagrange multipliers
a = np.ravel(solution['x'])
# Support vectors have non zero Lagrange multipliers
sv = a > 1e-5 # due to floating point errors
ind = np.arange(len(a))[sv]
self.a = a[sv]
self.sv = X[sv]
self.sv_y = y[sv]
print("%d support vectors out of %d points" % (len(self.a), n_samples))
# find the Intercept/ bias b
self.b = 0
for n in range(len(self.a)):
self.b += self.sv_y[n]
self.b -= np.sum(self.a * self.sv_y * K[ind[n], sv])
self.b /= len(self.a)
# find the Weight vector w
if self.kernel == linear_kernel:
self.w = np.zeros(n_features)
for n in range(len(self.a)):
self.w += self.a[n] * self.sv_y[n] * self.sv[n]
else:
self.w = None
def project(self, X):
"""Method is useful for getting the prediction depending on kernel.
Return:
(int or np.array of ints) A number which indicate the
classification of the features by being positive or negative.
"""
if self.w is not None:
return np.dot(X, self.w) + self.b
else:
y_predict = np.zeros(len(X))
for i in range(len(X)):
s = 0
for a, sv_y, sv in zip(self.a, self.sv_y, self.sv):
s += a * sv_y * self.kernel(X[i], sv)
y_predict[i] = s
return y_predict + self.b
def predict(self, X):
"""Method to predict features X."""
return np.sign(self.project(X))
if __name__ == '__main__':
def gen_lin_seperable_data():
"""Function to generate linearly seperable 2d training data."""
mean1 = np.array([0, 2])
mean2 =
|
np.array([2, 0])
|
numpy.array
|
"""Executing hardware timed configuration changes on the FPGA timing system
in "Piano Player" mode.
Author: <NAME>
Date created: 2015-05-01
Date last modified: 2019-05-09
"""
__version__ = "6.6.1" # issue: queue_sequeces: dictionary size changed during iteration
from logging import error,info,warn,debug
from numpy import nan,isnan
class Sequence(object):
parameters = {}
def __init__(self,**kwargs):
"""Arguments: delay=100e-12,laser_on=1,..."""
from collections import OrderedDict
from numpy import nan
keys = timing_sequencer.parameters
self.parameters = OrderedDict(zip(keys,[nan]*len(keys)))
for name in kwargs:
alt_name = name.replace("_on",".on")
if not (name in keys or alt_name in keys):
warn("Sequence: unsupported parameter %r" % name)
for key in kwargs: setattr(self,key,kwargs[key])
self.set_defaults()
def __getattr__(self,name):
"""A property"""
# Called when 'x.name' is evaluated.
# It is only invoked if the attribute wasn't found the usual ways.
alt_name = name.replace("_on",".on")
if name in self.parameters: return self.parameters[name]
elif alt_name in self.parameters: return self.parameters[alt_name]
else: return object.__getattribute__(self,name)
def __setattr__(self,name,value):
"""Set a property"""
# Called when 'x.name = y' is evaluated.
alt_name = name.replace("_on",".on")
if name.startswith("__"): object.__setattr__(self,name,value)
elif name in self.parameters: self.parameters[name] = value
elif alt_name in self.parameters: self.parameters[alt_name] = value
else: object.__setattr__(self,name,value)
def set_defaults(self):
"""Fill in unspecified parameters with default values."""
from numpy import isnan
from timing_system import timing_system
for key in self.parameters:
if key in ["pass_number","image_number"]: continue
if isnan(self.parameters[key]):
self.parameters[key] = timing_sequencer.get_default(key)
@property
def descriptor(self):
"""Text representation of the parameters for generating this
sequence"""
p = self.parameters
description = ",".join(["%s=%g"%(k,v) for k,v in zip(p.keys(),p.values())])+","
description += "generator=%r," % "timing_sequence"
description += "generator_version=%r," % __version__
return description
@property
def register_counts(self):
"""list of registers, list of arrays of values"""
from timing_system import timing_system,round_next
from numpy import isnan,where,arange,rint,floor,ceil,array,cumsum
from numpy import zeros,maximum,clip,unique
from sparse_array import sparse_array
delay = self.delay
Tbase = timing_system.hsct # Period of the 987-Hz clock
waitt = round_next(self.waitt,timing_system.waitt.stepsize)
burst_waitt = round_next(self.burst_waitt,timing_system.burst_waitt.stepsize)
burst_delay = round_next(self.burst_delay,timing_system.burst_delay.stepsize)
n = int(rint(waitt/Tbase)) # Sequence length period in 987-Hz cycles
ndt = int(rint(burst_waitt/Tbase)) # X-ray repetition period, in 987-Hz cycles
n_burst_delay = int(rint(burst_delay/Tbase)) # X-ray burst delay, in 987-Hz cycles
n = max(n,ndt*int(self.npulses)) # Make sure the period is long enough for npulses
delay_coarse = int(floor(delay/Tbase))
delay_value = delay - delay_coarse*Tbase
it0 = n_burst_delay + ndt - 2 # First X-ray pulse, in 987-Hz cycles
# The high-speed chopper determines the X-ray pulse timing.
xd = -timing_system.hsc.delay.offset
# If the chopper timing shift is more than 100 ns,
# assume the chopper selects a different bunch with a different timing.
# (e.g super bunch versus single bunch)
# However, if the time shift is more than 4 us, assume the tunnel
# 1-unch selection mode is used so the transmitted X-ray pulse
# arrives at nominally t=0.
if 100e-9 < abs(timing_system.hsc.delay.value) < 4e-6:
xd += timing_system.hsc.delay.value
it_laser = it0-delay_coarse + arange(0,int(self.npulses)*ndt,ndt)
it_xray = it0 + arange(0,int(self.npulses)*ndt,ndt)
t_xray = it_xray*Tbase+xd
t_laser = t_xray - delay
# Trigger X-ray millsecond shutter
pulse_length = timing_system.ms.pulse_length
if self.burst_waitt < 0.010:
# Assume the X-ray is continuously firing at 120 Hz.
t_ms_open = min(t_xray) - timing_system.ms.offset
t_ms_close = max(t_xray) - timing_system.ms.offset + pulse_length
t_ms_open = array([t_ms_open])
t_ms_close = array([t_ms_close])
else:
t_ms_open = t_xray - timing_system.ms.offset
t_ms_close = t_xray - timing_system.ms.offset + pulse_length
it_ms_open = maximum(floor(t_ms_open /Tbase),0).astype(int)
it_ms_close = maximum(ceil(t_ms_close/Tbase),0).astype(int)
it_ms_open = it_ms_open [it_ms_open<n]
it_ms_close = it_ms_close[it_ms_close<n]
ms_inc = sparse_array(n)
ms_inc[it_ms_open] += 1
ms_inc[it_ms_close] -= 1
ms_state_counts = clip(
|
cumsum(ms_inc)
|
numpy.cumsum
|
"""
File name: helper_functions.py
Author: <NAME>
Date created: 15.02.2018
This file contains helper functions for other scripts.
"""
import catboost as cat
import innvestigate
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import shap
from keras import activations
from scipy.stats import iqr
from sklearn import preprocessing
from sklearn.metrics import (accuracy_score, f1_score, recall_score,
roc_auc_score, precision_score, brier_score_loss)
from vis.utils import utils
def subsample(X, y, subsampling_type: str):
"""
If subsampling type is defined as 'random', randomly sub-samples the given
data to yield equal number of label classes. Otherwise if subsampling type
is defined as 'none', returns original data.
:param X: Data input variables
:param y: Data labels
:param subsampling_type: Subsampling method to be used.
:return: Subsampled data input variables and labels.
"""
df = pd.concat([X, y], axis=1)
label = list(df)[-1]
if subsampling_type == "random":
df_bad = df[(df[label] == 1)]
df_good = df[(df[label] == 0)]
df_sub = pd.concat([df_good.sample(len(df_bad.index), random_state=21), df_bad])
X_new, y_new = df_sub.drop(label, axis=1), df_sub[[label]]
elif subsampling_type == "none":
X_new, y_new = X, y
return X_new, y_new
def predict_probability(data, model, model_name: str):
"""
Returns prediction probabilities estimated for the given dataset and model.
:param data: Training or test data.
:param model: Trained model.
:param model_name: Name of the trained model.
:return: Prediction probabilities
"""
if model_name == "MLP":
output_activation = model.get_config()["layers"][-1]["config"]["activation"]
if output_activation == "softmax":
probs = model.predict_proba(data)
else:
probs = model.predict(data)
else:
probs = model.predict_proba(data).T[1]
return probs
def predict_class(data, model, model_name: str):
"""
Returns predicted classes for the given dataset and model.
:param data: Training or test data.
:param model: Trained model.
:param model_name: Name of the trained model.
:return: Predicted classes
"""
if model_name == "MLP":
output_activation = model.get_config()["layers"][-1]["config"]["activation"]
if output_activation == "softmax":
probs = model.predict(data)
preds = probs.argmax(axis=-1)
else:
preds = model.predict_classes(data)
else:
preds = model.predict(data).astype("float64")
return preds
def calc_perf_score(data, labels, model, model_name: str, score_name: str):
"""
Returns performance based on the given performance measure for the
given data and model.
:param data: Training or test data input variables.
:param labels: Training or test labels.
:param model: Trained model.
:param model_name: Name of the trained model.
:param score_name: Name of the performance measure.
:return: Performance score
"""
global score
if isinstance(labels, pd.DataFrame):
labels.iloc[:, 0] = labels.iloc[:, 0].astype("float64")
probs = predict_probability(data, model, model_name)
preds = predict_class(data, model, model_name)
#scores using probs
if score_name == "AUC":
score = roc_auc_score(labels, probs)
elif score_name == "brier_score_loss":
score =brier_score_loss(labels, probs)
#scores using preds
else:
if score_name == "accuracy":
score = accuracy_score(labels, preds)
elif score_name == "f1":
score = f1_score(labels, preds, pos_label=1)
elif score_name == "average_class_accuracy":
recall = recall_score(labels, preds, average=None)
score = 2 / (1 / recall[0] + 1 / recall[1])
elif score_name == "precision_PPV":
precision = precision_score(labels, preds, average=None)
score = precision[1]
elif score_name == "NPV":
precision = precision_score(labels, preds, average=None)
score = precision[0]
elif score_name == "recall_sensitivity":
recall = recall_score(labels, preds, average=None)
score = recall[1]
elif score_name == "specificity":
recall = recall_score(labels, preds, average=None)
score = recall[0]
return score
def linear_shap_value(model):
"""
Calculates Shapley values for the given training set and linear classifier model.
:param dataset: Training or test data.
:param model: Trained linear model.
:return: Shapley values
"""
explainer = shap.LinearExplainer(model.best_model, model.X_tr, feature_dependence="independent")
shap_values = explainer.shap_values(model.X_te)
shap_values_mean_over_samples = np.mean(shap_values, axis=0)
return shap_values_mean_over_samples
def calc_shap_values(dataset, model):
"""
Calculates Shapley values for the given training set and tree boosting model.
:param dataset: Training or test data.
:param model: Trained tree boosting model.
:return: Shapley values
"""
explainer = shap.TreeExplainer(model.best_model)
cat_features = [
list(model.X_tr).index(dataset.cat_preds[i])
for i in range(len(dataset.cat_preds))
]
shap_values = explainer.shap_values(cat.Pool(model.X_te, model.y_te, cat_features=cat_features))
# Calculate average over samples (patients)
shap_values_mean_over_samples = np.mean(shap_values, axis=0)
return shap_values_mean_over_samples
def calc_kernel_shap_values(model):
"""
Calculates Shapley values for the given training set and model.
:param dataset: Training or test data.
:param model: Trained model.
:return: Shapley values
"""
explainer = shap.KernelExplainer(model.best_model.predict_proba, model.X_tr)
shap_values = explainer.shap_values(model.X_te,l1_reg=0.0,nsamples=500)[0] #results are symmetric (-,+)
# Calculate average over samples (patients)
shap_values_mean_over_samples =
|
np.mean(shap_values, axis=0)
|
numpy.mean
|
import gc
import itertools
import numpy as np
class KMedoids:
def __init__(self, dist_matrix, init_type='k++'):
self.dist_matrix = np.asarray(dist_matrix)
self.n_points = dist_matrix.shape[0]
self.inited_medoids = None
if init_type not in ['k++', 'random']:
raise ValueError('init_type argument must be either k++ or random, but it is ', init_type)
self.init_type = init_type
def cluster(self, k):
"""
Performs actual clustering.
:param k: number of clusters
:return: ids of clusters and ids of medoids
"""
print('Running k-Medoids with k = {}, init = {}'.format(k, self.init_type))
if self.init_type == 'random':
curr_medoids = self.__init_random(k)
elif self.init_type == 'k++':
curr_medoids = self.__init_kplusplus(k)
else:
raise AssertionError(self.init_type)
# used to plot later
self.inited_medoids = curr_medoids
# Doesn't matter what we initialize these to.
old_medoids = np.array([-1] * k)
new_medoids = np.array([-1] * k)
print('Medoids initialized: ', curr_medoids)
it = 0
clusters = np.zeros(self.dist_matrix.shape[0])
# Until the medoids stop updating, do the following:
while not ((old_medoids == curr_medoids).all()):
print('Running iter %d' % it)
# Assign each point to cluster with closest medoid.
clusters = assign_points_to_clusters(self.dist_matrix, curr_medoids)
# Update cluster medoids to be lowest cost point.
for curr_medoid in curr_medoids:
cluster = np.where(clusters == curr_medoid)[0]
new_medoids[curr_medoids == curr_medoid] = compute_new_medoid(self.dist_matrix, cluster)
old_medoids[:] = curr_medoids[:]
curr_medoids[:] = new_medoids[:]
it += 1
gc.collect()
return clusters, curr_medoids
def __init_random(self, k):
# Pick k random, unique medoids.
curr_medoids = np.array([-1] * k)
while not len(np.unique(curr_medoids)) == k:
curr_medoids = np.random.randint(0, self.n_points - 1, k)
return curr_medoids
def __init_kplusplus(self, k):
"""
Initialize the medoids with kmeans++ algorithm by <NAME> and <NAME>. This is preferd
initialization over pure random.
:param k: number of clusters
:return: ids of data points that are going to be used as medoids
"""
medoids = np.empty((k,), dtype=int)
fst_mean = np.random.randint(0, self.n_points)
medoids[0] = fst_mean
# get square distances of all points to the mean
# dists = dist(common, means[0, np.newaxis], xtx)
dists = pdist_from_ids(self.dist_matrix, np.arange(self.n_points), fst_mean)
probs = np.empty(self.n_points)
for i in range(1, k):
# sample a new mean weighted by squared dists
np.divide(dists, np.linalg.norm(dists, ord=1), out=probs)
new_mean_idx = np.random.choice(self.n_points, replace=False, p=probs)
# add new mean
medoids[i] = new_mean_idx
# calculate new distances to the closest means
new_dists = pdist_from_ids(self.dist_matrix, np.arange(self.n_points), new_mean_idx)
dists = np.minimum(dists, new_dists)
return medoids
def assign_points_to_clusters(dist_matrix, medoids):
"""
Assign each point to cluster with closest medoid.
:param dist_matrix
:param medoids: IDs of medoids
:return:
"""
medoids = np.array(medoids)
distances_to_medoids = dist_matrix[:, medoids]
clusters = medoids[np.argmin(distances_to_medoids, axis=1)]
clusters[medoids] = medoids
return clusters
def compute_new_medoid(dist_matrix, cluster):
"""
Computes the new medoid for the given cluster
:param dist_matrix
:param cluster:
:return:
"""
mask = np.ones(dist_matrix.shape)
mask[np.ix_(cluster, cluster)] = 0.
cluster_distances = np.ma.masked_array(data=dist_matrix, mask=mask, fill_value=10e9)
costs = cluster_distances.sum(axis=1)
return costs.argmin(axis=0, fill_value=10e9)
def pdist_from_ids(dist_matrix, list1, list2):
"""
Returns pair-wise distances between data points with ids in list1 and ids in list2.
:param dist_matrix: entri i,j represent the distance between point i and j
:param list1: ids of data points in the first set
:param list2: ids of data points in the second set
:return: result[i, j] = distance between data[list1[i]] and data[list2[j]]
"""
if not np.isscalar(list2):
c = list(itertools.product(list1, list2))
c1, c2 = zip(*c)
res = np.asarray(dist_matrix[c1, c2]).reshape((len(list1), len(list2)))
else:
c1 = list1
c2 = list2
res =
|
np.asarray(dist_matrix[c1, c2])
|
numpy.asarray
|
#!/usr/bin/env python
'''
Created on 02 Oct 2020
@author: <NAME>
@license: BSD 3-Clause
'''
from __future__ import annotations
from typing import Callable, Tuple, List, Dict, Any, Union
import numpy as np
import math
from matplotlib import pyplot as plt, patches, axes as plt_axes
from causets.shapes import BallSurface, OpenConeSurface, CoordinateShape, CircleEdge, EllipseEdge
from causets.calculations import NewtonsMethod as Newton
default_samplingsize: int = 128 # default value for sampling lightcones
causality_eps: float = 0.00000000000001 # for small causality rounding errors
class Spacetime(object):
'''
Super-class for the implementation of spacetimes.
'''
_dim: int
_name: str
_metricname: str
_params: Dict[str, Any]
def __init__(self) -> None:
self._dim = 2
self._name = ''
self._metricname = 'unknown'
self._params = {}
def __str__(self):
return f'{self._dim}-dimensional {self._name} spacetime'
def __repr__(self):
return f'{self.__class__.__name__}({self._dim}, **{self._params})'
@property
def Dim(self) -> int:
'''
Returns the dimension of the spacetime.
'''
return self._dim
@property
def Name(self) -> str:
'''
Returns the name of the spacetime.
'''
return self._name
@property
def MetricName(self) -> str:
'''
Returns the name of the coordinate representation of the metric.
'''
return self._metricname
def Parameter(self, key: str) -> Any:
'''
Returns a parameter for the shape of the spacetime.
'''
return self._params[key]
def DefaultShape(self) -> CoordinateShape:
'''
Returns the default coordinate shape of the embedding region in the
spacetime.
'''
return CoordinateShape(self.Dim, 'cylinder')
def Causality(self) -> Callable[[np.ndarray, np.ndarray],
Tuple[bool, bool]]:
'''
Returns a handle to a function to determine if two points x and y are
causally connected for the spacetime.
The function accepts coordinates x and y for two points and returns
the causality tuple (x <= y, x > y).
'''
# This is an example implementation for a spacetime.
def isCausal(x: np.ndarray,
y: np.ndarray) -> Tuple[bool, bool]:
t_delta: float = y[0] - x[0]
return (t_delta >= 0.0, t_delta < 0.0)
return isCausal
def _T_slice_sampling(self, t: float, origin: np.ndarray,
samplingsize: int = -1) -> np.ndarray:
'''
Internal function for the time sampling array for a cone from
`origin` to time `t`.
'''
samplingsize = samplingsize if samplingsize >= 0 \
else default_samplingsize
return np.linspace(origin[0], t, samplingsize)
def _XT_slice(self, t: float, origin: np.ndarray, xdim: int,
samplingsize: int = -1) -> np.ndarray:
'''
Internal function for the cone plotting from `origin` to time `t`
projected onto a X-T (space-time) plane with space dimension `xdim`.
'''
raise NotImplementedError()
def _XY_slice(self, t: float, origin: np.ndarray, dims: List[int],
samplingsize: int = -1) -> np.ndarray:
'''
Internal function for the cone plotting from `origin` to time `t`
projected onto a X-Y (space-space) plane with space dimensions
`dims`.
'''
raise NotImplementedError()
def _XYZ_slice(self, t: float, origin: np.ndarray, dims: List[int],
samplingsize: int = -1) -> \
Tuple[np.ndarray, np.ndarray, np.ndarray]:
'''
Internal function for the cone plotting from `origin` to time `t`
projected onto a X-Y-Z (space-space) plane with space dimensions
`dims`.
'''
raise NotImplementedError()
def ConePlotter(self, dims: List[int], plotting_params: Dict[str, Any],
timesign: float, axes: plt_axes.Axes = None,
dynamicAlpha: Callable[[float], float] = None,
samplingsize: int = -1) -> \
Callable[[np.ndarray, float],
Union[patches.Patch,
List[Tuple[np.ndarray, np.ndarray, np.ndarray]]]]:
'''
Returns a function handle to plot past (`timesign == -1`) or future
(`timesign == 1`) causal cones for the spacetime `self` into the axes
object `axes` (given by gca() by default, with projection='3d' if
len(dims) > 2) up to the coordinate time `timeslice` with plotting
parameters given in the dictionary `plotting_params`. The time
coordinate goes along the axis with index `timeaxis`. As optional
parameter `dynamicAlpha` a function (mapping float to float) can be
specified to compute the opacity of the cone from its size (radius).
The argument `dims` specifies the coordinate axes to be plotted.
It is a list of 2 or 3 integers, setting up a 2D or 3D plot.
'''
is3d: bool = len(dims) == 3
_axes: plt_axes.Axes
if axes is None:
if is3d:
_axes = plt.gca(projection='3d')
else:
_axes = plt.gca(projection=None)
else:
_axes = axes
timeaxis: int
try:
timeaxis = dims.index(0)
except ValueError:
timeaxis = -1
xaxis: int = (timeaxis + 1) % len(dims)
yaxis: int = (timeaxis + 2) % len(dims)
if samplingsize <= 0:
samplingsize = default_samplingsize
def cone(origin: np.ndarray, timeslice: float) -> \
Union[patches.Patch,
List[Tuple[np.ndarray, np.ndarray, np.ndarray]]]:
'''
Creates matplotlib surface plots for a 3D causal cone, or a patch
for a 2D causal cone added to the axes `axes`. The light emanates
from the coordinates `origin`, which has to be a `numpy` vector
with a length given by the coordinate dimensions of the spacetime.
The lightcone reaches up to `timeslice`.
The keyword argument `plotting_params` (with a dynamically
adjusted 'alpha' parameter) are passed to `plot_surface` methods
if it is 3D or to the Patch objects if it is 2D.
The function returns `None` if no causal cone can be computed for
the respective input parameters.
'''
r: float = timesign * (timeslice - origin[0])
if r <= 0.0: # radius non-positive
return None
if dynamicAlpha is not None:
conealpha = dynamicAlpha(r)
if conealpha <= 0.0:
return None
plotting_params.update({'alpha': conealpha})
XY: np.ndarray = None
T: np.ndarray
samplesize_t: int
if timeaxis >= 0:
T = self._T_slice_sampling(timeslice, origin, samplingsize)
samplesize_t = T.size
if is3d:
X: np.ndarray
Y: np.ndarray
Z: np.ndarray
if timeaxis < 0:
X, Y, Z = self._XYZ_slice(timeslice, origin, dims,
samplingsize)
else:
for i, t in enumerate(T):
XY = self._XY_slice(t, origin,
[dims[xaxis], dims[yaxis]],
samplingsize)
if XY is None:
return None
elif i == 0:
s: Tuple[int, int] = (samplesize_t, XY.shape[0])
X, Y, Z = np.zeros(s), np.zeros(s), np.zeros(s)
X[i, :], Y[i, :], Z[i, :] = XY[:, 0], XY[:, 1], t
# rotate:
if timeaxis == 0:
X, Y, Z = Z, X, Y
elif timeaxis == 1:
X, Y, Z = Y, Z, X
_axes.plot_surface(X, Y, Z, **plotting_params)
return [(X, Y, Z)]
else:
if timeaxis < 0:
XY = self._XY_slice(timeslice, origin, dims,
samplingsize)
else:
XY = self._XT_slice(timeslice, origin, dims[xaxis],
samplingsize)
if XY is None:
return None
# rotate:
if timeaxis == 0:
XY = np.fliplr(XY)
p: patches.Patch = patches.Polygon(XY, **plotting_params)
_axes.add_patch(p)
return p
return cone
class FlatSpacetime(Spacetime):
'''
Initializes Minkowski spacetime for dim >= 1.
As additional parameter, the spatial periodicity can be specified (by
the key 'period') as float (to be applied for all spatial
directions equally) or as tuple (with a float for each spatial
dimension). A positive float switches on the periodicity along the
respective spatial direction, using this value as period.
The default is 0.0, no periodicity in any direction.
'''
def __init__(self, dim: int,
period: Union[float, Tuple[float, ...]] = 0.0) -> None:
if dim < 1:
raise ValueError('The spacetime dimension has to be at least 1.')
super().__init__()
self._dim = dim
self._name = 'flat'
self._metricname = 'Minkowski'
_isPeriodic: bool
_periods: np.ndarray = None
if isinstance(period, float):
_isPeriodic = period > 0.0
if _isPeriodic:
_periods = np.array([period] * (dim - 1))
elif isinstance(period, tuple) and (len(period) == dim - 1):
_isPeriodic = any(p > 0.0 for p in period)
_periods = period
else:
raise ValueError('The parameter ''periodic'' has to be of ' +
'type float, or a tuple of float with the ' +
'same length as spatial dimensions.')
self._params['isPeriodic'] = _isPeriodic
if _isPeriodic:
self._params['period'] = _periods
def __repr__(self):
_period: Tuple[float, ...] = self.Parameter('period')
return f'{self.__class__.__name__}({self._dim}, period={_period})'
def DefaultShape(self) -> CoordinateShape:
return CoordinateShape(self.Dim, 'cube') \
if self.Parameter('isPeriodic') \
else CoordinateShape(self.Dim, 'diamond')
def Causality(self) -> Callable[[np.ndarray, np.ndarray],
Tuple[bool, bool]]:
if self.Dim == 1:
return super().Causality()
if not self.Parameter('isPeriodic'):
if self.Dim == 2:
def isCausal_flat2D(x: np.ndarray,
y: np.ndarray) -> Tuple[bool, bool]:
t_delta: float = y[0] - x[0]
isCausal: bool = abs(t_delta) >= \
abs(y[1] - x[1]) - causality_eps
return ((t_delta >= 0.0) and isCausal,
(t_delta < 0.0) and isCausal)
return isCausal_flat2D
else:
def isCausal_flat(x: np.ndarray,
y: np.ndarray) -> Tuple[bool, bool]:
t_delta: float = y[0] - x[0]
isCausal: bool = np.square(t_delta) >= \
sum(np.square(y[1:] - x[1:])) - causality_eps
return ((t_delta >= 0.0) and isCausal,
(t_delta < 0.0) and isCausal)
return isCausal_flat
else:
_period: np.ndarray = self.Parameter('period')
if self.Dim == 2:
def isCausal_flat2Dperiodic(x: np.ndarray,
y: np.ndarray) -> Tuple[bool, bool]:
t_delta: float = y[0] - x[0]
r_delta: float = abs(y[1] - x[1])
if _period[0] > 0.0:
r_delta = min(r_delta, _period[0] - r_delta)
isCausal: bool = abs(t_delta) >= \
abs(r_delta) - causality_eps
return ((t_delta >= 0.0) and isCausal,
(t_delta < 0.0) and isCausal)
return isCausal_flat2Dperiodic
else:
def isCausal_flatperiodic(x: np.ndarray,
y: np.ndarray) -> Tuple[bool, bool]:
t_delta: float = y[0] - x[0]
r2_delta: float = 0.0
for i in range(1, self.Dim):
r_delta_i: float = abs(y[i] - x[i])
if _period[i - 1] > 0.0:
r_delta_i = min(r_delta_i,
_period[i - 1] - r_delta_i)
r2_delta += r_delta_i**2
isCausal: bool = np.square(t_delta) >= \
r2_delta - causality_eps
return ((t_delta >= 0.0) and isCausal,
(t_delta < 0.0) and isCausal)
return isCausal_flatperiodic
def ConePlotter(self, dims: List[int], plotting_params: Dict[str, Any],
timesign: float, axes: plt_axes.Axes = None,
dynamicAlpha: Callable[[float], float] = None,
samplingsize: int = -1) -> \
Callable[[np.ndarray, float],
Union[patches.Patch,
List[Tuple[np.ndarray, np.ndarray, np.ndarray]]]]:
is3d: bool = len(dims) == 3
_axes: plt_axes.Axes
if axes is None:
if is3d:
_axes = plt.gca(projection='3d')
else:
_axes = plt.gca(projection=None)
else:
_axes = axes
timeaxis: int
try:
timeaxis = dims.index(0)
except ValueError:
timeaxis = -1
isPeriodic: bool = self.Parameter('isPeriodic')
shifts: List[np.ndarray]
k_x: float = 0.0
x_s: List[float]
k_y: float = 0.0
y_s: List[float]
if is3d:
k_z: float = 0.0
z_s: List[float]
if isPeriodic:
if timeaxis == 0:
k_y = self.Parameter('period')[dims[1] - 1]
k_z = self.Parameter('period')[dims[2] - 1]
elif timeaxis == 1:
k_z = self.Parameter('period')[dims[2] - 1]
k_x = self.Parameter('period')[dims[0] - 1]
elif timeaxis == 2:
k_x = self.Parameter('period')[dims[0] - 1]
k_y = self.Parameter('period')[dims[1] - 1]
else:
k_x = self.Parameter('period')[dims[0] - 1]
k_y = self.Parameter('period')[dims[1] - 1]
k_z = self.Parameter('period')[dims[2] - 1]
x_s = [-k_x, 0.0, k_x] if k_x > 0.0 else [0.0]
y_s = [-k_y, 0.0, k_y] if k_y > 0.0 else [0.0]
z_s = [-k_z, 0.0, k_z] if k_z > 0.0 else [0.0]
shifts = [np.array([x, y, z])
for x in x_s for y in y_s for z in z_s]
else:
shifts = [np.array([0.0, 0.0, 0.0])]
else:
if isPeriodic:
if timeaxis == 0:
k_y = self.Parameter('period')[dims[1] - 1]
elif timeaxis == 1:
k_x = self.Parameter('period')[dims[0] - 1]
else:
k_x = self.Parameter('period')[dims[0] - 1]
k_y = self.Parameter('period')[dims[1] - 1]
x_s = [-k_x, 0.0, k_x] if k_x > 0.0 else [0.0]
y_s = [-k_y, 0.0, k_y] if k_y > 0.0 else [0.0]
shifts = [np.array([x, y])
for x in x_s for y in y_s]
else:
shifts = [np.array([0.0, 0.0])]
if samplingsize <= 0:
samplingsize = default_samplingsize
def cone(origin: np.ndarray, timeslice: float) -> \
Union[patches.Patch,
List[Tuple[np.ndarray, np.ndarray, np.ndarray]]]:
r: float = timesign * (timeslice - origin[0])
if r <= 0.0: # radius non-positive
return None
if dynamicAlpha is not None:
conealpha = dynamicAlpha(r)
if conealpha <= 0.0:
return None
plotting_params.update({'alpha': conealpha})
origin = origin[dims]
if is3d:
XYZ_list: List[Tuple[np.ndarray, np.ndarray, np.ndarray]] = []
if timeaxis < 0:
for s in shifts:
XYZ_list = XYZ_list + BallSurface(
origin - s, r, samplingsize)
else:
for s in shifts:
XYZ_list = XYZ_list + OpenConeSurface(
origin - s, r, timesign * r,
timeaxis, samplingsize)
for XYZ in XYZ_list:
_axes.plot_surface(*XYZ, **plotting_params)
return XYZ_list
else:
XY: np.array = None
XYpart: np.array
for i, s in enumerate(shifts):
if timeaxis == 0:
XYpart = np.array(
[origin - s,
np.array([timeslice, origin[1] - r]) - s,
np.array([timeslice, origin[1] + r]) - s,
origin - s])
elif timeaxis == 1:
XYpart = np.array(
[origin - s,
np.array([origin[0] + r, timeslice]) - s,
np.array([origin[0] - r, timeslice]) - s,
origin - s])
else:
XYpart = CircleEdge(origin - s, radius=r,
samplingsize=samplingsize)
XY = XYpart if i == 0 \
else np.concatenate(
(XY, np.array([[np.nan, np.nan]]), XYpart))
p: patches.Patch = patches.Polygon(XY, **plotting_params)
_axes.add_patch(p)
return p
return cone
class _dSSpacetime(Spacetime):
'''
Implementation of the base class for de Sitter and Anti-de Sitter
spacetimes.
'''
_alpha: float
_alpha_sq: float
def __init__(self, dim: int, alpha: float = 1.0) -> None:
'''
Initializes (Anti) de Sitter spacetime for dim >= 2.
It is parametrized by `alpha` as float.
'''
if dim < 2:
raise ValueError('The spacetime dimension has to be at least 2.')
super().__init__()
self._dim = dim
self._metricname = 'static'
self._alpha = alpha
self._alpha_sq = alpha**2
def Causality(self) -> Callable[[np.ndarray, np.ndarray],
Tuple[bool, bool]]:
raise NotImplementedError()
def _XT_slice2(self, t: float, t0: float,
x0: float) -> Tuple[float, float]:
raise NotImplementedError()
def _XT_slice(self, t: float, origin: np.ndarray, xdim: int,
samplingsize: int = -1) -> np.ndarray:
T: np.ndarray = self._T_slice_sampling(t, origin, samplingsize)
XT: np.ndarray = np.zeros((2 * T.size - 1, 2))
if origin.size == 2:
x0: float = origin[1] / self._alpha
if abs(x0) >= 1.0:
return None
for i, t in enumerate(T):
r: Tuple[float, float] = self._XT_slice2(t, origin[0], x0)
XT[-i, 0], XT[i, 0] = min(r), max(r)
XT[-i, 1], XT[i, 1] = t, t
else:
t_X: np.ndarray
for i, t in enumerate(T):
x_min: float = np.PINF
x_max: float = np.NINF
for ydim in range(1, origin.size):
if ydim == xdim:
continue
t_X = self._XY_slice(t, origin, [xdim, ydim], samplingsize)
if t_X is None:
return None
x_min = np.min([x_min, np.min(t_X[:, 0])])
x_max = np.max([x_max, np.max(t_X[:, 0])])
XT[-i, 0], XT[i, 0] = x_min, x_max
XT[-i, 1], XT[i, 1] = t, t
return XT
class deSitterSpacetime(_dSSpacetime):
'''
Implementation of de Sitter spacetimes, which are globally hyperbolic.
'''
def __init__(self, dim: int, r_dS: float = 1.0) -> None:
'''
Initializes de Sitter spacetime for dim >= 2.
It is parametrized by the radius of the cosmological radius `r_dS`
as float.
'''
super().__init__(dim, r_dS)
self._name = 'de Sitter'
if r_dS > 0.0:
self._params = {'r_dS': r_dS}
else:
raise ValueError('The cosmological radius ' +
'has to be positive.')
def Causality(self) -> Callable[[np.ndarray, np.ndarray],
Tuple[bool, bool]]:
def isCausal_dS(x: np.ndarray,
y: np.ndarray) -> Tuple[bool, bool]:
r2_x: float = sum(np.square(x[1:]))
r2_y: float = sum(np.square(y[1:]))
if (r2_x >= self._alpha_sq) or (r2_y >= self._alpha_sq):
return (False, False)
amp_x: float = math.sqrt(self._alpha_sq - r2_x)
amp_y: float = math.sqrt(self._alpha_sq - r2_y)
x0_x: float = amp_x * math.sinh(x[0] / self._alpha)
x1_x: float = amp_x * math.cosh(x[0] / self._alpha)
x0_y: float = amp_y * math.sinh(y[0] / self._alpha)
x1_y: float = amp_y * math.cosh(y[0] / self._alpha)
x0_delta: float = x0_y - x0_x
isCausal: bool = x0_delta**2 >= \
sum(np.square(y[1:] - x[1:])) + (x1_y - x1_x)**2 - \
causality_eps
return ((x0_delta >= 0.0) and isCausal,
(x0_delta < 0.0) and isCausal)
return isCausal_dS
def _XT_slice2(self, t: float, t0: float,
x0: float) -> Tuple[float, float]:
return (self._alpha * np.tanh(np.arctanh(x0) - (t - t0) / self._alpha),
self._alpha * np.tanh(
|
np.arctanh(x0)
|
numpy.arctanh
|
# ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import json
import numpy as np
import pytest
from _pyngraph import VariantInt, VariantString
import ngraph as ng
from ngraph.exceptions import UserInputError
from ngraph.impl import Function, PartialShape, Shape, Type
from ngraph.impl.op import Parameter
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node
from tests import (xfail_issue_34323,
xfail_issue_35929,
xfail_issue_36476,
xfail_issue_36479,
xfail_issue_36480)
def test_ngraph_function_api():
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=np.float32, name="A")
parameter_b = ng.parameter(shape, dtype=np.float32, name="B")
parameter_c = ng.parameter(shape, dtype=np.float32, name="C")
model = (parameter_a + parameter_b) * parameter_c
function = Function(model, [parameter_a, parameter_b, parameter_c], "TestFunction")
function.get_parameters()[1].set_partial_shape(PartialShape([3, 4, 5]))
ordered_ops = function.get_ordered_ops()
op_types = [op.get_type_name() for op in ordered_ops]
assert op_types == ["Parameter", "Parameter", "Parameter", "Add", "Multiply", "Result"]
assert len(function.get_ops()) == 6
assert function.get_output_size() == 1
assert function.get_output_op(0).get_type_name() == "Result"
assert function.get_output_element_type(0) == parameter_a.get_element_type()
assert list(function.get_output_shape(0)) == [2, 2]
assert (function.get_parameters()[1].get_partial_shape()) == PartialShape([3, 4, 5])
assert len(function.get_parameters()) == 3
assert len(function.get_results()) == 1
assert function.get_friendly_name() == "TestFunction"
@pytest.mark.parametrize(
"dtype",
[
np.float32,
pytest.param(np.float64, marks=xfail_issue_35929),
pytest.param(np.int8, marks=xfail_issue_36479),
np.int16,
np.int32,
np.int64,
pytest.param(np.uint8, marks=xfail_issue_36479),
np.uint16,
pytest.param(np.uint32, marks=xfail_issue_36476),
np.uint64,
],
)
def test_simple_computation_on_ndarrays(dtype):
runtime = get_runtime()
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=dtype, name="A")
parameter_b = ng.parameter(shape, dtype=dtype, name="B")
parameter_c = ng.parameter(shape, dtype=dtype, name="C")
model = (parameter_a + parameter_b) * parameter_c
computation = runtime.computation(model, parameter_a, parameter_b, parameter_c)
value_a = np.array([[1, 2], [3, 4]], dtype=dtype)
value_b = np.array([[5, 6], [7, 8]], dtype=dtype)
value_c = np.array([[9, 10], [11, 12]], dtype=dtype)
result = computation(value_a, value_b, value_c)
assert np.allclose(result, np.array([[54, 80], [110, 144]], dtype=dtype))
value_a = np.array([[13, 14], [15, 16]], dtype=dtype)
value_b = np.array([[17, 18], [19, 20]], dtype=dtype)
value_c = np.array([[21, 22], [23, 24]], dtype=dtype)
result = computation(value_a, value_b, value_c)
assert np.allclose(result, np.array([[630, 704], [782, 864]], dtype=dtype))
def test_serialization():
dtype = np.float32
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=dtype, name="A")
parameter_b = ng.parameter(shape, dtype=dtype, name="B")
parameter_c = ng.parameter(shape, dtype=dtype, name="C")
model = (parameter_a + parameter_b) * parameter_c
runtime = get_runtime()
computation = runtime.computation(model, parameter_a, parameter_b, parameter_c)
try:
serialized = computation.serialize(2)
serial_json = json.loads(serialized)
assert serial_json[0]["name"] != ""
assert 10 == len(serial_json[0]["ops"])
except Exception:
pass
def test_broadcast_1():
input_data = np.array([1, 2, 3], dtype=np.int32)
new_shape = [3, 3]
expected = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]
result = run_op_node([input_data], ng.broadcast, new_shape)
assert np.allclose(result, expected)
def test_broadcast_2():
input_data = np.arange(4, dtype=np.int32)
new_shape = [3, 4, 2, 4]
expected = np.broadcast_to(input_data, new_shape)
result = run_op_node([input_data], ng.broadcast, new_shape)
assert np.allclose(result, expected)
def test_broadcast_3():
input_data = np.array([1, 2, 3], dtype=np.int32)
new_shape = [3, 3]
axis_mapping = [0]
expected = [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
result = run_op_node([input_data], ng.broadcast, new_shape, axis_mapping, "EXPLICIT")
assert np.allclose(result, expected)
@pytest.mark.xfail(reason="AssertionError: assert dtype('float32') == <class 'bool'")
@pytest.mark.parametrize(
"destination_type, input_data",
[(bool, np.zeros((2, 2), dtype=np.int32)), ("boolean", np.zeros((2, 2), dtype=np.int32))],
)
def test_convert_to_bool(destination_type, input_data):
expected = np.array(input_data, dtype=bool)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == bool
@pytest.mark.parametrize(
"destination_type, rand_range, in_dtype, expected_type",
[
pytest.param(np.float32, (-8, 8), np.int32, np.float32),
pytest.param(np.float64, (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929),
pytest.param("f32", (-8, 8), np.int32, np.float32),
pytest.param("f64", (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929),
],
)
def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type):
np.random.seed(133391)
input_data = np.random.randint(*rand_range, size=(2, 2), dtype=in_dtype)
expected = np.array(input_data, dtype=expected_type)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == expected_type
@xfail_issue_35929
@pytest.mark.parametrize(
"destination_type, expected_type",
[
(np.int8, np.int8),
(np.int16, np.int16),
(np.int32, np.int32),
(np.int64, np.int64),
("i8", np.int8),
("i16", np.int16),
("i32", np.int32),
("i64", np.int64),
],
)
def test_convert_to_int(destination_type, expected_type):
np.random.seed(133391)
input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=expected_type)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == expected_type
@xfail_issue_35929
@pytest.mark.parametrize(
"destination_type, expected_type",
[
(np.uint8, np.uint8),
(np.uint16, np.uint16),
(np.uint32, np.uint32),
(np.uint64, np.uint64),
("u8", np.uint8),
("u16", np.uint16),
("u32", np.uint32),
("u64", np.uint64),
],
)
def test_convert_to_uint(destination_type, expected_type):
np.random.seed(133391)
input_data = np.ceil(np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=expected_type)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == expected_type
def test_bad_data_shape():
A = ng.parameter(shape=[2, 2], name="A", dtype=np.float32)
B = ng.parameter(shape=[2, 2], name="B")
model = A + B
runtime = get_runtime()
computation = runtime.computation(model, A, B)
value_a = np.array([[1, 2]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
with pytest.raises(UserInputError):
computation(value_a, value_b)
def test_constant_get_data_bool():
input_data = np.array([True, False, False, True])
node = ng.constant(input_data, dtype=np.bool)
retrieved_data = node.get_data()
assert np.allclose(input_data, retrieved_data)
@pytest.mark.parametrize("data_type", [np.float32, np.float64])
def test_constant_get_data_floating_point(data_type):
np.random.seed(133391)
input_data = np.random.randn(2, 3, 4).astype(data_type)
min_value = -1.0e20
max_value = 1.0e20
input_data = min_value + input_data * max_value * data_type(2)
node = ng.constant(input_data, dtype=data_type)
retrieved_data = node.get_data()
assert np.allclose(input_data, retrieved_data)
@pytest.mark.parametrize("data_type", [np.int64, np.int32, np.int16, np.int8])
def test_constant_get_data_signed_integer(data_type):
np.random.seed(133391)
input_data = np.random.randint(
np.iinfo(data_type).min, np.iinfo(data_type).max, size=[2, 3, 4], dtype=data_type
)
node = ng.constant(input_data, dtype=data_type)
retrieved_data = node.get_data()
assert
|
np.allclose(input_data, retrieved_data)
|
numpy.allclose
|
import numpy as np
import nibabel as nib
import scipy.ndimage
def normalize_img(img, max_img, min_img, max, min):
# Scale between [1 0]
img = (img - min_img)/(max_img - min_img)
# Scale between [max min]
img = img*(max - min) + min
return img
def unnormalize_img(img, max_img, min_img, max, min):
# Undoes normalize_img()
img = (img - min)/(max - min)*(max_img - min_img) + min_img
return img
def get_nii_img(path_nii):
nii = nib.load(path_nii)
nii_img = nii.get_fdata()
return nii_img
def nii2torch(nii_img):
# Input: (x, y, z, channels)
# Output: (1, channels, z, x ,y)
# Expand dims => (1, x, y, z, channels)
torch_img = np.expand_dims(nii_img, axis=0)
# Permute dimensions => (1, channels, z, x ,y)
torch_img =
|
np.transpose(torch_img, axes=(0, 4, 3, 1, 2))
|
numpy.transpose
|
# this part copy from Yufeng Shen's Code:
#https://github.com/Yufeng-shen/nfHEDMtools/blob/master/Simulation.py
import numpy as np
from fractions import Fraction
from math import floor
from hexomap import utility
# from matplotlib import path
class Detector:
def __init__(self):
self.Norm = np.array([0, 0, 1])
self.CoordOrigin = np.array([0., 0., 0.])
self.Jvector = np.array([1, 0, 0])
self.Kvector = np.array([0, -1, 0])
self.PixelJ = 0.00148
self.PixelK = 0.00148
self.NPixelJ = 2048
self.NPixelK = 2048
def Move(self, J, K, trans, tilt):
self.CoordOrigin -= J * self.Jvector * self.PixelJ + K * self.Kvector * self.PixelK
self.CoordOrigin = tilt.dot(self.CoordOrigin) + trans
self.Norm = tilt.dot(self.Norm)
self.Jvector = tilt.dot(self.Jvector)
self.Kvector = tilt.dot(self.Kvector)
def IntersectionIdx(self, ScatterSrc, TwoTheta, eta, bIdx=True):
#print('eta:{0}'.format(eta))
#self.Print()
dist = self.Norm.dot(self.CoordOrigin - ScatterSrc)
scatterdir = np.array([np.cos(TwoTheta), np.sin(TwoTheta) * np.sin(eta), np.sin(TwoTheta) * np.cos(eta)])
InterPos = dist / (self.Norm.dot(scatterdir)) * scatterdir + ScatterSrc
J = (self.Jvector.dot(InterPos - self.CoordOrigin) / self.PixelJ)
K = (self.Kvector.dot(InterPos - self.CoordOrigin) / self.PixelK)
if 0 <= int(J) < self.NPixelJ and 0 <= int(K) < self.NPixelK:
if bIdx == True:
return int(J), int(K)
else:
return J, K
else:
return -1
def BackProj(self, HitPos, omega, TwoTheta, eta):
"""
HitPos: ndarray (3,)
The position of hitted point on lab coord, unit in mm
"""
scatterdir = np.array([np.cos(TwoTheta), np.sin(TwoTheta) * np.sin(eta), np.sin(TwoTheta) * np.cos(eta)])
t = HitPos[2] / (np.sin(TwoTheta) * np.cos(eta))
x = HitPos[0] - t * np.cos(TwoTheta)
y = HitPos[1] - t * np.sin(TwoTheta) * np.sin(eta)
truex = np.cos(omega) * x + np.sin(omega) * y
truey = -np.sin(omega) * x + np.cos(omega) * y
return np.array([truex, truey])
def Idx2LabCord(self, J, K):
return J * self.PixelJ * self.Jvector + K * self.PixelK * self.Kvector + self.CoordOrigin
def Reset(self):
self.__init__()
def Print(self):
print("Norm: ", self.Norm)
print("CoordOrigin: ", self.CoordOrigin)
print("J vector: ", self.Jvector)
print("K vector: ", self.Kvector)
class CrystalStr:
def __init__(self, material='new'):
self.name = material
self.AtomPos = []
self.AtomZs = []
self.symtype = None
if material == 'gold':
self.symtype = 'Cubic'
self.PrimA = 4.08 * np.array([1, 0, 0])
self.PrimB = 4.08 * np.array([0, 1, 0])
self.PrimC = 4.08 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 79)
self.addAtom([0, 0.5, 0.5], 79)
self.addAtom([0.5, 0, 0.5], 79)
self.addAtom([0.5, 0.5, 0], 79)
elif material == 'copper':
self.symtype = 'Cubic'
self.PrimA = 3.61 * np.array([1, 0, 0])
self.PrimB = 3.61 * np.array([0, 1, 0])
self.PrimC = 3.61 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 29)
self.addAtom([0, 0.5, 0.5], 29)
self.addAtom([0.5, 0, 0.5], 29)
self.addAtom([0.5, 0.5, 0], 29)
elif material == 'copperBCC':
self.symtype = 'Cubic'
self.PrimA = 2.947 * np.array([1, 0, 0])
self.PrimB = 2.947 * np.array([0, 1, 0])
self.PrimC = 2.947 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 29)
self.addAtom([0.5, 0.5, 0.5], 29)
elif material == 'copperFCC':
self.symtype = 'Cubic'
self.PrimA = 3.692 * np.array([1, 0, 0])
self.PrimB = 3.692 * np.array([0, 1, 0])
self.PrimC = 3.692 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 29)
self.addAtom([0, 0.5, 0.5], 29)
self.addAtom([0.5, 0, 0.5], 29)
self.addAtom([0.5, 0.5, 0], 29)
elif material == 'stainless_steel':
self.symtype = 'Cubic'
self.PrimA = 3.59 * np.array([1, 0, 0])
self.PrimB = 3.59 * np.array([0, 1, 0])
self.PrimC = 3.59 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 26)
self.addAtom([0, 0.5, 0.5], 26)
self.addAtom([0.5, 0, 0.5], 26)
self.addAtom([0.5, 0.5, 0], 26)
elif material == 'iron_bcc':
# bcc lattice
self.symtype = 'Cubic'
self.PrimA = 2.856 * np.array([1, 0, 0])
self.PrimB = 2.856 * np.array([0, 1, 0])
self.PrimC = 2.856 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 26)
self.addAtom([0.5, 0.5, 0.5], 26)
elif material == 'iron_fcc':
self.symtype = 'Cubic'
self.PrimA = 2.856 * np.array([1, 0, 0])
self.PrimB = 2.856 * np.array([0, 1, 0])
self.PrimC = 2.856 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 26)
self.addAtom([0, 0.5, 0.5], 26)
self.addAtom([0.5, 0, 0.5], 26)
self.addAtom([0.5, 0.5, 0], 26)
elif material == 'SrTiO3':
self.symtype = 'Cubic'
self.PrimA = 3.9053 * np.array([1, 0, 0])
self.PrimB = 3.9053 * np.array([0, 1, 0])
self.PrimC = 3.9053 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 22)
self.addAtom([0.5, 0.5, 0.5], 38)
self.addAtom([0.5, 0, 0], 8)
self.addAtom([0, 0.5, 0], 8)
self.addAtom([0, 0, 0.5], 8)
elif material == 'SrTiO3_v1':
self.symtype = 'Cubic'
self.PrimA = 3.9053 * np.array([1, 0, 0])
self.PrimB = 3.9053 * np.array([0, 1, 0])
self.PrimC = 3.9053 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 38)
self.addAtom([0.5, 0.5, 0.5], 22)
self.addAtom([0.5, 0.5, 0], 8)
self.addAtom([0, 0.5, 0.5], 8)
self.addAtom([0.5, 0, 0.5], 8)
elif material == 'SrTiO3_v2':
self.symtype = 'Cubic'
self.PrimA = 3.9053 * np.array([1, 0, 0])
self.PrimB = 3.9053 * np.array([0, 1, 0])
self.PrimC = 3.9053 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 38)
#self.addAtom([0.5, 0.5, 0.5], 22)
self.addAtom([0.5, 0.5, 0], 8)
self.addAtom([0, 0.5, 0.5], 8)
self.addAtom([0.5, 0, 0.5], 8)
elif material == 'SrTiO3_v3':
self.symtype = 'Cubic'
self.PrimA = 3.9053 * np.array([1, 0, 0])
self.PrimB = 3.9053 * np.array([0, 1, 0])
self.PrimC = 3.9053 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 38)
#self.addAtom([0.5, 0.5, 0.5], 22)
self.addAtom([0.5, 0.5, 0], 38)
self.addAtom([0, 0.5, 0.5], 38)
self.addAtom([0.5, 0, 0.5], 38)
elif material == 'Ti7':
self.symtype = 'Hexagonal'
self.PrimA = 2.92539 * np.array([1, 0, 0])
self.PrimB = 2.92539 * np.array([np.cos(np.pi * 2 / 3), np.sin(np.pi * 2 / 3), 0])
self.PrimC = 4.67399 * np.array([0, 0, 1])
self.addAtom([1 / 3.0, 2 / 3.0, 1 / 4.0], 22)
self.addAtom([2 / 3.0, 1 / 3.0, 3 / 4.0], 22)
elif material == 'WE43':
# not tested, use Mg to approximate
self.symtype = 'Hexagonal'
a = 3.2094
c = 5.2107
self.PrimA = a * np.array([1, 0, 0])
self.PrimB = a * np.array([np.cos(np.pi * 2 / 3), np.sin(np.pi * 2 / 3), 0])
self.PrimC = c * np.array([0, 0, 1])
self.addAtom([1 / 3.0, 2 / 3.0, 1 / 4.0], 12)
self.addAtom([2 / 3.0, 1 / 3.0, 3 / 4.0], 12)
elif material == 'Ti64_alpha':
self.symtype = 'Hexagonal'
self.PrimA = 2.930 * np.array([1, 0, 0])
self.PrimB = 2.930 * np.array([np.cos(np.pi * 2 / 3), np.sin(np.pi * 2 / 3), 0])
self.PrimC = 4.677 * np.array([0, 0, 1])
self.addAtom([1 / 3.0, 2 / 3.0, 1 / 4.0], 22)
self.addAtom([2 / 3.0, 1 / 3.0, 3 / 4.0], 22)
elif material == 'Ti64_beta':
# bcc lattice
self.symtype = 'Cubic'
self.PrimA = 3.224 * np.array([1, 0, 0])
self.PrimB = 3.224 * np.array([0, 1, 0])
self.PrimC = 3.224 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 26)
self.addAtom([0.5, 0.5, 0.5], 26)
elif material == 'UO2':
# bcc lattice
self.symtype = 'Cubic'
self.PrimA = 5.471 * np.array([1, 0, 0])
self.PrimB = 5.471 * np.array([0, 1, 0])
self.PrimC = 5.471 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 92)
elif material.lower() in ['zr', ' zirconium']:
# hexagonal lattice
# unit: angstrom, radian
# source:
# https://www.webelements.com/zirconium/crystal_structure.html
self.symtype = 'Hexagonal'
self.PrimA = 3.232 * np.array([1, 0, 0])
self.PrimB = 3.232 * np.array([np.cos(np.pi * 2 / 3), np.sin(np.pi * 2 / 3), 0])
self.PrimC = 5.147 * np.array([0, 0, 1])
self.addAtom([1 / 3.0, 2 / 3.0, 1 / 4.0], 22)
self.addAtom([2 / 3.0, 1 / 3.0, 3 / 4.0], 22)
elif material.endswith(('.yml', '.yaml')):
d = utility.load_yaml(material)
self.symtype = d['symtype']
if d['symtype'] == 'Hexagonal':
self.PrimA = d['PrimA'] * np.array([1, 0, 0])
self.PrimB = d['PrimB'] * np.array([np.cos(np.pi * 2 / 3), np.sin(np.pi * 2 / 3), 0])
self.PrimC = d['PrimC'] * np.array([0, 0, 1])
elif d['symtype'] == 'Cubic':
self.PrimA = d['PrimA'] * np.array([1, 0, 0])
self.PrimB = d['PrimB'] * np.array([0, 1, 0])
self.PrimC = d['PrimC'] * np.array([0, 0, 1])
else:
raise NotImplementedError('symType should be Cubic or Hexagonal')
for key, value in d['Atom'].items():
self.addAtom(value['pos'], value['atomNumber'])
else:
raise ValueError("Unknown mateiral type!")
def setPrim(self, x, y, z):
self.PrimA =
|
np.array(x)
|
numpy.array
|
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
from keras.layers.merge import _Merge
from keras import activations
import tensorflow as tf
import numpy as np
#----------------------------------------------------------------------------
# Resize activation tensor 'inputs' of shape 'si' to match shape 'so'.
#
class ACTVResizeLayer(Layer):
def __init__(self,si,so,**kwargs):
self.si = si
self.so = so
super(ACTVResizeLayer,self).__init__(**kwargs)
def call(self, v, **kwargs):
assert len(self.si) == len(self.so) and self.si[0] == self.so[0]
# Decrease feature maps. Attention: channels last
if self.si[-1] > self.so[-1]:
v = v[...,:self.so[-1]]
# Increase feature maps. Attention:channels last
if self.si[-1] < self.so[-1]:
z = K.zeros((self.so[:-1] + (self.so[-1] - self.si[-1])),dtype=v.dtype)
v = K.concatenate([v,z])
# Shrink spatial axis
if len(self.si) == 4 and (self.si[1] > self.so[1] or self.si[2] > self.so[2]):
assert self.si[1] % self.so[1] == 0 and self.si[2] % self.so[2] == 0
pool_size = (self.si[1] / self.so[1],self.si[2] / self.so[2])
strides = pool_size
v = K.pool2d(v,pool_size=pool_size,strides=strides,padding='same',data_format='channels_last',pool_mode='avg')
#Extend spatial axis
for i in range(1,len(self.si) - 1):
if self.si[i] < self.so[i]:
assert self.so[i] % self.si[i] == 0
v = K.repeat_elements(v,rep=int(self.so[i] / self.si[i]),axis=i)
return v
def compute_output_shape(self, input_shape):
return self.so
#----------------------------------------------------------------------------
# Resolution selector for fading in new layers during progressive growing.
class LODSelectLayer(Layer):
def __init__(self,cur_lod,first_incoming_lod=0,ref_idx=0, min_lod=None, max_lod=None,**kwargs):
super(LODSelectLayer,self).__init__(**kwargs)
self.cur_lod = cur_lod
self.first_incoming_lod = first_incoming_lod
self.ref_idx = ref_idx
self.min_lod = min_lod
self.max_lod = max_lod
def call(self, inputs):
self.input_shapes = [K.int_shape(input) for input in inputs]
v = [ACTVResizeLayer(K.int_shape(input), self.input_shapes[self.ref_idx])(input) for input in inputs]
lo = np.clip(int(
|
np.floor(self.min_lod - self.first_incoming_lod)
|
numpy.floor
|
from __future__ import division
import numpy as np
import pdb
import scipy as sp
import cvxpy as cp
import itertools, sys
class FTOCP(object):
""" Finite Time Optimal Control Problem (FTOCP)
Methods:
- solve: solves the FTOCP given the initial condition x0, terminal contraints (optinal) and terminal cost (optional)
- model: given x_t and u_t computes x_{t+1} = Ax_t + Bu_t
"""
def __init__(self, N, A, B, Q, R, Hx=None, gx=None, Hu=None, gu=None):
# Define variables
self.N = N # Horizon Length
# System Dynamics (x_{k+1} = A x_k + Bu_k)
self.A = A
self.B = B
self.n = A.shape[1]
self.d = B.shape[1]
# Linear state constraints (Hx*x <= gx)
self.Hx = Hx
self.gx = gx
# Linear input constraints (Hu*u <= gu)
self.Hu = Hu
self.gu = gu
# Cost (h(x,u) = x^TQx +u^TRu)
self.Q = Q
self.R = R
# FTOCP cost
self.costFTOCP = np.inf
# def stage_cost_fun(self, x, xf, u):
# # Using the cvxpy norm function here
# return cp.norm(self.Q**0.5*(x-xf))**2 + cp.norm(self.R**0.5*u)**2
#
# def term_cost_fun(self, x, xf):
# # Using the cvxpy norm function here
# return cp.norm(self.Q**0.5*(x-xf))**2
def solve(self, x0, xf=None, abs_t=None, expl_con=None, SS=None, Qfun=None, CVX=False, verbose=False):
"""This method solves a FTOCP given:
- x0: initial condition
- xf: (optional) goal condition, defaults to the origin
- abs_t: (required if circular or linear constraints are provided) absolute time step
- expl_con: (optional) allowed deviations, can be ellipsoidal or linear constraints
- SS: (optional) contains a set of state and the terminal constraint is ConvHull(SS)
- Qfun: (optional) cost associtated with the state stored in SS. Terminal cost is BarycentrcInterpolation(SS, Qfun)
- CVX: (optional)
"""
if xf is None:
xf = np.zeros(self.n)
else:
xf = np.reshape(xf, self.n)
if expl_con is not None:
if 'lin' in expl_con:
H = expl_con['lin'][0]
g = expl_con['lin'][1]
if 'ell' in expl_con:
ell_con = expl_con['ell']
# Initialize Variables
x = cp.Variable((self.n, self.N+1))
u = cp.Variable((self.d, self.N))
# If SS is given construct a matrix collecting all states and a vector collection all costs
if SS is not None:
# SS_vector = np.squeeze(list(itertools.chain.from_iterable(SS))).T # From a 3D list to a 2D array
# SS_vector = np.hstack(SS)
SS_vector = SS[-1] # Only use last trajectory
# SS_vector = SS[-1][:,abs_t:min(SS[-1].shape[1],abs_t+int(2*(self.N+1)))]
# Qfun_vector = np.expand_dims(np.array(list(itertools.chain.from_iterable(Qfun))), 0) # From a 2D list to a 1D array
Qfun_vector =
|
np.array(Qfun[-1])
|
numpy.array
|
import numpy as np
class Buffer:
def __init__(self, params):
history_length = params.history_length
width = params.width
height = params.height
self.dims = (width, height, history_length)
self.buffer = np.zeros(self.dims, dtype=np.uint8)
def add(self, state):
self.buffer[:, :, :-1] = self.buffer[:, :, 1:]
self.buffer[:, :, -1] = state
def getInput(self):
x =
|
np.reshape(self.buffer, (1,) + self.dims)
|
numpy.reshape
|
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
from scipy.spatial.distance import cdist
import time
import itertools
import imageio
import heapq
import pickle
sns.set()
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2018"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__version__ = "1.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Thesis"
# Class definitions
class SearchState:
def __init__(self, carPos, eventPos, eventTimes, eventStatus, heuristicVal, costVal, parent,hWeight):
self.carPos = carPos
self.eventPos = eventPos
self.eventTimes = eventTimes
self.eventStatus = eventStatus
self.time = parent.time+1 if parent is not None else 0 # time is one step ahead of parent
self.hval = heuristicVal
self.gval = costVal
self.hWeight = hWeight
self.parent = parent # predecessor in graph
self.root = parent is None # true of state is the root, false otherwise
return
def __lt__(self, other):
# make sure comparison is to SearchState object
try:
assert (isinstance(other, SearchState))
except:
raise TypeError("must compare to SearchState object.")
# return lt check
return self.getFval() < other.getFval()
def __eq__(self, other):
# make sure comparison is to SearchState object
try:
assert(isinstance(other, SearchState))
except:
raise TypeError("must compare to SearchState object.")
# check
carEq = np.array_equal(self.carPos, other.carPos)
eveEq = np.array_equal(self.eventPos,other.eventPos)
etmEq = np.array_equal(self.eventTimes,other.eventTimes)
sttEq = np.array_equal(self.eventStatus, other.eventStatus)
timEq = self.time == other.time
return carEq and eveEq and etmEq and sttEq and timEq
def __repr__(self):
return "time: {0}, cost: {1}, heuristic: {2}, root: {3}, goal: {4}\n".format(self.time,
self.gval,
self.hval,
self.root,
self.goalCheck())
def __hash__(self):
carPosVec = np.reshape(self.carPos, self.carPos.size)
evePosVec = np.reshape(self.eventPos, self.eventPos.size)
eveSttVec = self.eventStatus.astype(np.int32)
stateTime = np.reshape(
|
np.array(self.time)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
TODO: Please check readme.txt file first!
--
This Python2.7 program is to reproduce Figure-7. In this test, we compare
GraphStoIHT with six baseline methods on the grid dataset, which can be found
in reference [2].
References:
[1] <NAME>, <NAME>, and <NAME>. "Linear convergence of
stochastic iterative greedy algorithms with sparse constraints."
IEEE Transactions on Information Theory 63.11 (2017): 6869-6895.
[2] Hegde, Chinmay, <NAME>, and <NAME>. "A nearly-linear time
framework for graph-structured sparsity." International Conference on
Machine Learning. 2015.
[3] Blumensath, Thomas, and <NAME>. "Iterative hard thresholding
for compressed sensing." Applied and computational harmonic analysis
27.3 (2009): 265-274.
[4] Hegde, Chinmay, <NAME>, and <NAME>. "Fast recovery from
a union of subspaces." Advances in Neural Information Processing
Systems. 2016.
[5] <NAME>. "Random walks on graphs: A survey." Combinatorics,
Paul erdos is eighty 2.1 (1993): 1-46.
[6] Needell, Deanna, and <NAME>. "CoSaMP: Iterative signal recovery
from incomplete and inaccurate samples."
Applied and computational harmonic analysis 26.3 (2009): 301-321.
[7] Blumensath, Thomas, and <NAME>. "Normalized iterative hard
thresholding: Guaranteed stability and performance." IEEE Journal
of selected topics in signal processing 4.2 (2010): 298-309.
# TODO You need to:
1. install numpy, matplotlib (optional), and networkx (optional).
2. build our sparse_module by executing ./build.sh please check our
readme.md file. If you do not know how to compile this library.
"""
import os
import time
import pickle
import random
import multiprocessing
from itertools import product
import numpy as np
try:
import sparse_module
try:
from sparse_module import wrap_head_tail_bisearch
except ImportError:
print('cannot find wrap_head_tail_bisearch method in sparse_module')
sparse_module = None
exit(0)
except ImportError:
print('\n'.join([
'cannot find the module: sparse_module',
'try run: \'python setup.py build_ext --inplace\' first! ']))
def algo_head_tail_bisearch(
edges, x, costs, g, root, s_low, s_high, max_num_iter, verbose):
""" This is the wrapper of head/tail-projection proposed in [2].
:param edges: edges in the graph.
:param x: projection vector x.
:param costs: edge costs in the graph.
:param g: the number of connected components.
:param root: root of subgraph. Usually, set to -1: no root.
:param s_low: the lower bound of the sparsity.
:param s_high: the upper bound of the sparsity.
:param max_num_iter: the maximum number of iterations used in
binary search procedure.
:param verbose: print out some information.
:return: 1. the support of the projected vector
2. the projected vector
"""
prizes = x * x
# to avoid too large upper bound problem.
if s_high >= len(prizes) - 1:
s_high = len(prizes) - 1
re_nodes = wrap_head_tail_bisearch(
edges, prizes, costs, g, root, s_low, s_high, max_num_iter, verbose)
proj_w = np.zeros_like(x)
proj_w[re_nodes[0]] = x[re_nodes[0]]
return re_nodes[0], proj_w
def simu_grid_graph(width, height, rand_weight=False):
""" Generate a grid graph with size, width x height. Totally there will be
width x height number of nodes in this generated graph.
:param width: the width of the grid graph.
:param height: the height of the grid graph.
:param rand_weight: the edge costs in this generated grid graph.
:return: 1. list of edges
2. list of edge costs
"""
np.random.seed()
if width < 0 and height < 0:
print('Error: width and height should be positive.')
return [], []
width, height = int(width), int(height)
edges, weights = [], []
index = 0
for i in range(height):
for j in range(width):
if (index % width) != (width - 1):
edges.append((index, index + 1))
if index + width < int(width * height):
edges.append((index, index + width))
else:
if index + width < int(width * height):
edges.append((index, index + width))
index += 1
edges = np.asarray(edges, dtype=int)
# random generate costs of the graph
if rand_weight:
weights = []
while len(weights) < len(edges):
weights.append(random.uniform(1., 2.0))
weights = np.asarray(weights, dtype=np.float64)
else: # set unit weights for edge costs.
weights = np.ones(len(edges), dtype=np.float64)
return edges, weights
def sensing_matrix(n, x, norm_noise=0.0):
""" Generate sensing matrix (design matrix). This generated sensing
matrix is a Gaussian matrix, i.e., each entry ~ N(0,\sigma/\sqrt(n)).
Please see more details in equation (1.2) shown in reference [6].
:param n: the number of measurements required.
:param x: the input signal.
:param norm_noise: plus ||norm_noise|| noise on the measurements.
:return: 1. the design matrix
2. the vector of measurements
3. the noised vector.
"""
p = len(x)
x_mat = np.random.normal(0.0, 1.0, size=(n * p)) / np.sqrt(n)
x_mat = x_mat.reshape((n, p))
y_tr = np.dot(x_mat, x)
noise_e = np.random.normal(0.0, 1.0, len(y_tr))
y_e = y_tr + (norm_noise / np.linalg.norm(noise_e)) * noise_e
return x_mat, y_tr, y_e
def algo_iht(x_mat, y_tr, max_epochs, lr, s, x0, tol_algo):
""" Iterative Hard Thresholding Method proposed in reference [3]. The
standard iterative hard thresholding method for compressive sensing.
:param x_mat: the design matrix.
:param y_tr: the array of measurements.
:param max_epochs: the maximum epochs (iterations) allowed.
:param lr: the learning rate (should be 1.0).
:param s: the sparsity parameter.
:param x0: x0 is the initial point.
:param tol_algo: tolerance parameter for early stopping.
:return: 1. the final estimation error,
2. number of epochs(iterations) used,
3. and the run time.
"""
start_time = time.time()
x_hat = x0
(n, p) = x_mat.shape
x_tr_t = np.transpose(x_mat)
xtx = np.dot(x_tr_t, x_mat)
xty = np.dot(x_tr_t, y_tr)
num_epochs = 0
for epoch_i in range(max_epochs):
num_epochs += 1
bt = x_hat - lr * (np.dot(xtx, x_hat) - xty)
bt[np.argsort(np.abs(bt))[0:p - s]] = 0. # thresholding step
x_hat = bt
# early stopping for diverge cases due to the large learning rate
if np.linalg.norm(x_hat) >= 1e3: # diverge cases.
break
if np.linalg.norm(y_tr - np.dot(x_mat, x_hat)) <= tol_algo:
break
run_time = time.time() - start_time
return num_epochs, run_time, x_hat
def cv_iht(x_tr_mat, y_tr, x_va_mat, y_va,
max_epochs, lr_list, s, x_star, x0, tol_algo):
""" Tuning parameter by using additional validation dataset. """
test_err_mat = np.zeros(shape=len(lr_list))
x_hat_dict = dict()
for lr_ind, lr in enumerate(lr_list):
num_epochs, run_time, x_hat = algo_iht(
x_mat=x_tr_mat, y_tr=y_tr, max_epochs=max_epochs,
lr=lr, s=s, x0=x0, tol_algo=tol_algo)
y_err = np.linalg.norm(y_va - np.dot(x_va_mat, x_hat)) ** 2.
test_err_mat[lr_ind] = y_err
x_hat_dict[lr] = (num_epochs, run_time, x_hat)
min_index = np.argmin(test_err_mat)
best_lr = lr_list[min_index]
err = np.linalg.norm(x_star - x_hat_dict[best_lr][2])
num_epochs, run_time = x_hat_dict[best_lr][:2]
return err, num_epochs, run_time
def algo_sto_iht(x_mat, y_tr, max_epochs, lr, s, x0, tol_algo, b):
""" Stochastic Iterative Hard Thresholding Method proposed in [1].
:param x_mat: the design matrix.
:param y_tr: the array of measurements.
:param max_epochs: the maximum epochs (iterations) allowed.
:param lr: the learning rate (should be 1.0).
:param s: the sparsity parameter.
:param x0: x0 is the initial point.
:param tol_algo: tolerance parameter for early stopping.
:param b: block size
:return: 1. the final estimation error,
2. number of epochs(iterations) used,
3. and the run time.
"""
np.random.seed()
start_time = time.time()
x_hat = x0
(n, p) = x_mat.shape
x_tr_t = np.transpose(x_mat)
b = n if n < b else b
num_blocks = int(n) / int(b)
prob = [1. / num_blocks] * num_blocks
num_epochs = 0
for epoch_i in range(max_epochs):
num_epochs += 1
for _ in range(num_blocks):
ii = np.random.randint(0, num_blocks)
block = range(b * ii, b * (ii + 1))
xtx = np.dot(x_tr_t[:, block], x_mat[block])
xty = np.dot(x_tr_t[:, block], y_tr[block])
gradient = - 2. * (xty - np.dot(xtx, x_hat))
bt = x_hat - (lr / (prob[ii] * num_blocks)) * gradient
bt[np.argsort(np.abs(bt))[0:p - s]] = 0.
x_hat = bt
if np.linalg.norm(x_hat) >= 1e3: # diverge cases.
break
if np.linalg.norm(y_tr - np.dot(x_mat, x_hat)) <= tol_algo:
break
run_time = time.time() - start_time
return num_epochs, run_time, x_hat
def cv_sto_iht(x_tr_mat, y_tr, x_va_mat, y_va, max_epochs, s, x_star, x0,
tol_algo, b_list, lr_list):
""" Tuning parameter by using additional validation dataset. """
test_err_mat = np.zeros(len(lr_list) * len(b_list))
para_dict = dict()
x_hat_dict = dict()
for index, (lr, b) in enumerate(product(lr_list, b_list)):
num_epochs, run_time, x_hat = algo_sto_iht(
x_mat=x_tr_mat, y_tr=y_tr, max_epochs=max_epochs,
lr=lr, s=s, x0=x0, tol_algo=tol_algo, b=b)
y_err = np.linalg.norm(y_va - np.dot(x_va_mat, x_hat)) ** 2.
test_err_mat[index] = y_err
para_dict[index] = (lr, b)
x_hat_dict[(lr, b)] = (num_epochs, run_time, x_hat)
lr, b = para_dict[int(np.argmin(test_err_mat))]
err = np.linalg.norm(x_star - x_hat_dict[(lr, b)][2])
num_epochs, run_time = x_hat_dict[(lr, b)][:2]
return err, num_epochs, run_time
def algo_graph_iht(
x_mat, y_tr, max_epochs, lr, x0, tol_algo, edges, costs, g, s,
root=-1, gamma=0.1, proj_max_num_iter=50, verbose=0):
""" Graph Iterative Hard Thresholding proposed in [4] and projection
operator is proposed in [2].
:param x_mat: the design matrix.
:param y_tr: the array of measurements.
:param max_epochs: the maximum epochs (iterations) allowed.
:param lr: the learning rate (should be 1.0).
:param x0: x0 is the initial point.
:param tol_algo: tolerance parameter for early stopping.
:param edges: edges in the graph.
:param costs: edge costs
:param s: sparsity
:param g: number of connected component in the true signal.
:param root: the root included in the result (default -1: no root).
:param gamma: to control the upper bound of sparsity.
:param proj_max_num_iter: maximum number of iterations of projection.
:param verbose: print out some information.
:return: 1. the final estimation error,
2. number of epochs(iterations) used,
3. and the run time.
"""
start_time = time.time()
x_hat = np.copy(x0)
xtx = np.dot(np.transpose(x_mat), x_mat)
xty = np.dot(np.transpose(x_mat), y_tr)
# graph projection para
h_low = int(len(x0) / 2)
h_high = int(h_low * (1. + gamma))
t_low = int(s)
t_high = int(s * (1. + gamma))
num_epochs = 0
for epoch_i in range(max_epochs):
num_epochs += 1
grad = -1. * (xty - np.dot(xtx, x_hat))
head_nodes, proj_gradient = algo_head_tail_bisearch(
edges, grad, costs, g, root, h_low, h_high,
proj_max_num_iter, verbose)
bt = x_hat - lr * proj_gradient
tail_nodes, proj_bt = algo_head_tail_bisearch(
edges, bt, costs, g, root, t_low, t_high,
proj_max_num_iter, verbose)
x_hat = proj_bt
if np.linalg.norm(x_hat) >= 1e3: # diverge cases.
break
if np.linalg.norm(y_tr - np.dot(x_mat, x_hat)) <= tol_algo:
break
run_time = time.time() - start_time
return num_epochs, run_time, x_hat
def cv_graph_iht(x_tr_mat, y_tr, x_va_mat, y_va, max_epochs, lr_list, x_star,
x0, tol_algo, edges, costs, s):
""" Tuning parameter by using additional validation dataset. """
test_err_mat = np.zeros(len(lr_list))
x_hat_dict = dict()
for lr_ind, lr in enumerate(lr_list):
num_epochs, run_time, x_hat = algo_graph_iht(
x_mat=x_tr_mat, y_tr=y_tr, max_epochs=max_epochs, lr=lr, x0=x0,
tol_algo=tol_algo, edges=edges, costs=costs, g=1, s=s)
y_err = np.linalg.norm(y_va - np.dot(x_va_mat, x_hat)) ** 2.
test_err_mat[lr_ind] = y_err
x_hat_dict[lr] = (num_epochs, run_time, x_hat)
min_index = np.argmin(test_err_mat)
best_lr = lr_list[min_index]
err = np.linalg.norm(x_star - x_hat_dict[best_lr][2])
num_epochs, run_time = x_hat_dict[best_lr][:2]
return err, num_epochs, run_time
def algo_graph_sto_iht(
x_mat, y_tr, max_epochs, lr, x0, tol_algo, edges, costs, g, s, b,
root=-1, gamma=0.1, proj_max_num_iter=50, verbose=0):
""" Graph Stochastic Iterative Hard Thresholding.
:param x_mat: the design matrix.
:param y_tr: the array of measurements.
:param max_epochs: the maximum epochs (iterations) allowed.
:param lr: the learning rate (should be 1.0).
:param x0: x0 is the initial point.
:param tol_algo: tolerance parameter for early stopping.
:param edges: edges in the graph.
:param costs: edge costs
:param s: sparsity
:param b: the block size
:param g: number of connected component in the true signal.
:param root: the root included in the result (default -1: no root).
:param gamma: to control the upper bound of sparsity.
:param proj_max_num_iter: maximum number of iterations of projection.
:param verbose: print out some information.
:return: 1. the final estimation error,
2. number of epochs(iterations) used,
3. and the run time.
"""
np.random.seed()
start_time = time.time()
x_hat = np.copy(x0)
(n, p) = x_mat.shape
x_tr_t = np.transpose(x_mat)
b = n if n < b else b
num_blocks = int(n) / int(b)
prob = [1. / num_blocks] * num_blocks
# graph projection para
h_low = int(len(x0) / 2)
h_high = int(h_low * (1. + gamma))
t_low = int(s)
t_high = int(s * (1. + gamma))
num_epochs = 0
for epoch_i in range(max_epochs):
num_epochs += 1
for _ in range(num_blocks):
ii = np.random.randint(0, num_blocks)
block = range(b * ii, b * (ii + 1))
xtx = np.dot(x_tr_t[:, block], x_mat[block])
xty = np.dot(x_tr_t[:, block], y_tr[block])
gradient = -2. * (xty - np.dot(xtx, x_hat))
head_nodes, proj_grad = algo_head_tail_bisearch(
edges, gradient, costs, g, root, h_low, h_high,
proj_max_num_iter, verbose)
bt = x_hat - (lr / (prob[ii] * num_blocks)) * proj_grad
tail_nodes, proj_bt = algo_head_tail_bisearch(
edges, bt, costs, g, root,
t_low, t_high, proj_max_num_iter, verbose)
x_hat = proj_bt
if np.linalg.norm(x_hat) >= 1e3: # diverge cases.
break
if np.linalg.norm(y_tr - np.dot(x_mat, x_hat)) <= tol_algo:
break
run_time = time.time() - start_time
return num_epochs, run_time, x_hat
def cv_graph_sto_iht(x_tr_mat, y_tr, x_va_mat, y_va, b_list, lr_list, s,
max_epochs, tol_algo, x_star, x0, edges, costs):
""" Tuning parameter by using additional validation dataset. """
test_err_mat = np.zeros(len(lr_list) * len(b_list))
para_dict = dict()
x_hat_dict = dict()
for index, (lr, b) in enumerate(product(lr_list, b_list)):
num_epochs, run_time, x_hat = algo_graph_sto_iht(
x_mat=x_tr_mat, y_tr=y_tr, max_epochs=max_epochs, lr=lr, x0=x0,
tol_algo=tol_algo, edges=edges, costs=costs, g=1, s=s, b=b)
y_err = np.linalg.norm(y_va - np.dot(x_va_mat, x_hat)) ** 2.
test_err_mat[index] = y_err
para_dict[index] = (lr, b)
x_hat_dict[(lr, b)] = (num_epochs, run_time, x_hat)
lr, b = para_dict[int(np.argmin(test_err_mat))]
err = np.linalg.norm(x_star - x_hat_dict[(lr, b)][2])
num_epochs, run_time = x_hat_dict[(lr, b)][:2]
return err, num_epochs, run_time
def algo_niht(x_mat, y_tr, max_epochs, s, x_star, x0, tol_algo):
""" Normalized Iterative Hard Thresholding (NIHT) proposed in [7].
:param x_mat: the design matrix.
:param y_tr: the array of measurements.
:param max_epochs: the maximum epochs (iterations) allowed.
:param x0: x0 is the initial point.
:param s:
:param x_star:
:param tol_algo:
:return:
"""
start_time = time.time()
x_hat = x0
c = 0.01
kappa = 2. / (1 - c)
(m, p) = x_mat.shape
x_tr_t = np.transpose(x_mat)
xtx, xty = np.dot(x_tr_t, x_mat), np.dot(x_tr_t, y_tr)
gamma = np.argsort(np.abs(np.dot(x_tr_t, y_tr)))[-s:]
num_epochs = 0
for epoch_i in range(max_epochs):
num_epochs += 1
# we obey the implementation used in their code
gn = xty - np.dot(xtx, x_hat)
tmp_v = np.dot(x_mat[:, gamma], gn[gamma])
xx = np.dot(gn[gamma], gn[gamma])
yy = np.dot(tmp_v, tmp_v)
if yy != 0:
mu = xx / yy
else:
mu = 1.
bt = x_hat + mu * gn
bt[np.argsort(np.abs(bt))[0:p - s]] = 0.
w_tmp = bt
gamma_next = np.nonzero(w_tmp)[0]
if set(gamma_next).__eq__(set(gamma)):
x_hat = w_tmp
else:
xx = np.linalg.norm(w_tmp - x_hat) ** 2.
yy = np.linalg.norm(np.dot(x_mat, w_tmp - x_hat)) ** 2.
if yy <= 0.0:
continue
if mu <= (1. - c) * xx / yy:
x_hat = w_tmp
elif mu > (1. - c) * xx / yy:
while True:
mu = mu / (kappa * (1. - c))
bt = x_hat + mu * gn
bt[np.argsort(np.abs(bt))[0:p - s]] = 0.
w_tmp = bt
xx =
|
np.linalg.norm(w_tmp - x_hat)
|
numpy.linalg.norm
|
"""
solar collectors
"""
import os
import time
from itertools import repeat
from math import *
import geopandas as gpd
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame as gdf
from numba import jit
import cea.config
import cea.inputlocator
import cea.utilities.parallel
from cea.constants import HOURS_IN_YEAR
from cea.technologies.solar import constants
from cea.utilities import epwreader
from cea.utilities import solar_equations
from cea.utilities.standardize_coordinates import get_lat_lon_projected_shapefile
from cea.analysis.costs.equations import calc_capex_annualized, calc_opex_annualized
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
# SC heat generation
def calc_SC(locator, config, latitude, longitude, weather_data, date_local, building_name):
"""
This function first determines the surface area with sufficient solar radiation, and then calculates the optimal
tilt angles of panels at each surface location. The panels are categorized into groups by their surface azimuths,
tilt angles, and global irradiation. In the last, heat generation from SC panels of each group is calculated.
:param locator: An InputLocator to locate input files
:type locator: cea.inputlocator.InputLocator
:param config: cea.config
:param latitude: latitude of the case study location
:type latitude: float
:param longitude: longitude of the case study location
:type longitude: float
:param weather_data: Data frame containing the weather data in the .epw file as per config
:type weather_data: pandas.DataFrame
:param date_local: contains the localized (to timezone) dates for each timestep of the year
:param building_name: list of building names in the case study
:type building_name: Series
:return: Building_SC.csv with solar collectors heat generation potential of each building, Building_SC_sensors.csv
with sensor data of each SC panel
"""
t0 = time.perf_counter()
type_panel = config.solar.type_SCpanel
radiation_csv = locator.get_radiation_building_sensors(building=building_name)
metadata_csv = locator.get_radiation_metadata(building=building_name)
# solar properties
solar_properties = solar_equations.calc_sun_properties(latitude, longitude, weather_data, date_local, config)
print('calculating solar properties done for building %s' % building_name)
# get properties of the panel to evaluate
panel_properties_SC = calc_properties_SC_db(locator.get_database_conversion_systems(), config)
print('gathering properties of Solar collector panel for building %s' % building_name)
# select sensor point with sufficient solar radiation
max_annual_radiation, annual_radiation_threshold, sensors_rad_clean, sensors_metadata_clean = \
solar_equations.filter_low_potential(radiation_csv, metadata_csv, config)
print('filtering low potential sensor points done for building %s' % building_name)
# Calculate the heights of all buildings for length of vertical pipes
tot_bui_height_m = gpd.read_file(locator.get_zone_geometry())['height_ag'].sum()
# set the maximum roof coverage
if config.solar.custom_roof_coverage:
max_roof_coverage = config.solar.max_roof_coverage
else:
max_roof_coverage = 1.0
if not sensors_metadata_clean.empty:
if not config.solar.custom_tilt_angle:
# calculate optimal angle and tilt for panels
sensors_metadata_cat = solar_equations.optimal_angle_and_tilt(sensors_metadata_clean, latitude,
solar_properties, max_annual_radiation,
panel_properties_SC, max_roof_coverage)
print('calculating optimal tilt angle and separation done for building %s' % building_name)
else:
# calculate spacing required by user-supplied tilt angle for panels
sensors_metadata_cat = solar_equations.calc_spacing_custom_angle(sensors_metadata_clean, solar_properties,
max_annual_radiation, panel_properties_SC,
config.solar.panel_tilt_angle,
max_roof_coverage)
print('calculating separation for custom tilt angle done')
# group the sensors with the same tilt, surface azimuth, and total radiation
sensor_groups = solar_equations.calc_groups(sensors_rad_clean, sensors_metadata_cat)
print('generating groups of sensor points done for building %s' % building_name)
# calculate heat production from solar collectors
Final = calc_SC_generation(sensor_groups, weather_data, date_local, solar_properties, tot_bui_height_m,
panel_properties_SC,
latitude, config)
# save SC generation potential and metadata of the selected sensors
panel_type = panel_properties_SC['type']
Final.to_csv(locator.SC_results(building_name, panel_type), index=True, float_format='%.2f', na_rep='nan')
sensors_metadata_cat.to_csv(locator.SC_metadata_results(building_name, panel_type), index=True,
index_label='SURFACE',
float_format='%.2f', na_rep='nan') # print selected metadata of the selected sensors
print('Building', building_name, 'done - time elapsed:', (time.perf_counter() - t0), ' seconds')
else: # This loop is activated when a building has not sufficient solar potential
panel_type = panel_properties_SC['type']
Final = pd.DataFrame(
{'SC_' + type_panel + '_walls_north_m2': 0, 'SC_' + type_panel + '_walls_north_Q_kWh': 0,
'SC_' + type_panel + '_walls_north_Tout_C': 0,
'SC_' + type_panel + '_walls_south_m2': 0, 'SC_' + type_panel + '_walls_south_Q_kWh': 0,
'SC_' + type_panel + '_walls_south_Tout_C': 0,
'SC_' + type_panel + '_walls_east_m2': 0, 'SC_' + type_panel + '_walls_east_Q_kWh': 0,
'SC_' + type_panel + '_walls_east_Tout_C': 0,
'SC_' + type_panel + '_walls_west_m2': 0, 'SC_' + type_panel + '_walls_west_Q_kWh': 0,
'SC_' + type_panel + '_walls_west_Tout_C': 0,
'SC_' + type_panel + '_roofs_top_m2': 0, 'SC_' + type_panel + '_roofs_top_Q_kWh': 0,
'SC_' + type_panel + '_roofs_top_Tout_C': 0,
'Q_SC_gen_kWh': 0, 'T_SC_sup_C': 0, 'T_SC_re_C': 0, 'mcp_SC_kWperC': 0, 'Eaux_SC_kWh': 0,
'Q_SC_l_kWh': 0, 'Area_SC_m2': 0, 'radiation_kWh': 0,
'Date':date_local},
index=np.zeros(HOURS_IN_YEAR))
Final.set_index('Date', inplace=True)
Final.to_csv(locator.SC_results(building_name, panel_type), index=True, float_format='%.2f', na_rep='nan')
sensors_metadata_cat = pd.DataFrame(
{'SURFACE': 0, 'AREA_m2': 0, 'BUILDING': 0, 'TYPE': 0, 'Xcoor': 0, 'Xdir': 0, 'Ycoor': 0, 'Ydir': 0,
'Zcoor': 0, 'Zdir': 0, 'orientation': 0, 'total_rad_Whm2': 0, 'tilt_deg': 0, 'B_deg': 0,
'array_spacing_m': 0, 'surface_azimuth_deg': 0, 'area_installed_module_m2': 0,
'CATteta_z': 0, 'CATB': 0, 'CATGB': 0, 'type_orientation': 0}, index=range(2))
sensors_metadata_cat.to_csv(locator.SC_metadata_results(building_name, panel_type), index=True,
float_format='%.2f', na_rep="nan")
return
# =========================
# SC heat production
# =========================
def calc_SC_generation(sensor_groups, weather_data, date_local, solar_properties, tot_bui_height, panel_properties_SC,
latitude_deg, config):
"""
To calculate the heat generated from SC panels.
:param sensor_groups: properties of sensors in each group
:type sensor_groups: dict
:param weather_data: weather data read from the epw file
:type weather_data: dataframe
:param solar_properties:
:param tot_bui_height: total height of all buildings [m]
:param panel_properties_SC: properties of solar panels
:type panel_properties_SC: dataframe
:param latitude_deg: latitude of the case study location
:param config: user settings from cea.config
:return: dataframe
"""
# local variables
type_panel = config.solar.type_SCpanel
number_groups = sensor_groups['number_groups'] # number of groups of sensor points
prop_observers = sensor_groups['prop_observers'] # mean values of sensor properties of each group of sensors
hourly_radiation = sensor_groups['hourlydata_groups'] # mean hourly radiation of sensors in each group [Wh/m2]
T_in_C = get_t_in_sc(config)
Tin_array_C = np.zeros(HOURS_IN_YEAR) + T_in_C
# create lists to store results
list_results_from_SC = [0 for i in range(number_groups)]
list_areas_groups = [0 for i in range(number_groups)]
total_radiation_kWh = [0 for i in range(number_groups)]
total_mcp_kWperC = [0 for i in range(number_groups)]
total_qloss_kWh = [0 for i in range(number_groups)]
total_aux_el_kWh = [0 for i in range(number_groups)]
total_Qh_output_kWh = [0 for i in range(number_groups)]
potential = pd.DataFrame(index=range(HOURS_IN_YEAR))
panel_orientations = ['walls_south', 'walls_north', 'roofs_top', 'walls_east', 'walls_west']
for panel_orientation in panel_orientations:
potential['SC_'+ type_panel + '_' + panel_orientation + '_Q_kWh'] = 0
potential['SC_' + type_panel + '_'+ panel_orientation + '_m2'] = 0
# calculate equivalent length of pipes
total_area_module_m2 = prop_observers['area_installed_module_m2'].sum() # total area for panel installation
total_pipe_length = cal_pipe_equivalent_length(tot_bui_height, panel_properties_SC, total_area_module_m2)
# assign default number of subsdivisions for the calculation
if panel_properties_SC['type'] == 'ET': # ET: evacuated tubes
panel_properties_SC['Nseg'] = 100 # default number of subsdivisions for the calculation
else:
panel_properties_SC['Nseg'] = 10
for group in range(number_groups):
# calculate radiation types (direct/diffuse) in group
radiation_Wperm2 = solar_equations.cal_radiation_type(group, hourly_radiation, weather_data)
# load panel angles from each group
teta_z_deg = prop_observers.loc[group, 'surface_azimuth_deg'] # azimuth of panels of group
tilt_angle_deg = prop_observers.loc[group, 'B_deg'] # tilt angle of panels
# calculate incidence angle modifier for beam radiation
IAM_b = calc_IAM_beam_SC(solar_properties, teta_z_deg, tilt_angle_deg, panel_properties_SC['type'],
latitude_deg)
# calculate heat production from a solar collector of each group
list_results_from_SC[group] = calc_SC_module(config, radiation_Wperm2, panel_properties_SC,
weather_data.drybulb_C.values,
IAM_b, tilt_angle_deg, total_pipe_length)
# calculate results from each group
panel_orientation = prop_observers.loc[group, 'type_orientation']
module_area_per_group_m2 = prop_observers.loc[group, 'area_installed_module_m2']
number_modules_per_group = module_area_per_group_m2 / panel_properties_SC['module_area_m2']
SC_Q_kWh = list_results_from_SC[group][1] * number_modules_per_group
potential['SC_' + type_panel + '_' + panel_orientation + '_Q_kWh'] = potential[
'SC_' + type_panel + '_' + panel_orientation + '_Q_kWh'] + SC_Q_kWh
potential['SC_' + type_panel + '_' + panel_orientation + '_m2'] = potential[
'SC_' + type_panel + '_' + panel_orientation + '_m2'] + module_area_per_group_m2 # assume parallel connections in this group
# aggregate results from all modules
list_areas_groups[group] = module_area_per_group_m2
total_mcp_kWperC[group] = list_results_from_SC[group][5] * number_modules_per_group
total_qloss_kWh[group] = list_results_from_SC[group][0] * number_modules_per_group
total_aux_el_kWh[group] = list_results_from_SC[group][2] * number_modules_per_group
total_Qh_output_kWh[group] = list_results_from_SC[group][1] * number_modules_per_group
total_radiation_kWh[group] = (radiation_Wperm2['I_sol'] * module_area_per_group_m2 / 1000)
potential['Area_SC_m2'] = sum(list_areas_groups)
potential['radiation_kWh'] = sum(total_radiation_kWh).values
potential['Q_SC_gen_kWh'] = sum(total_Qh_output_kWh)
potential['mcp_SC_kWperC'] = sum(total_mcp_kWperC)
potential['Eaux_SC_kWh'] = sum(total_aux_el_kWh)
potential['Q_SC_l_kWh'] = sum(total_qloss_kWh)
potential['T_SC_sup_C'] = Tin_array_C
T_out_C = (potential['Q_SC_gen_kWh'] / potential['mcp_SC_kWperC']) + T_in_C
potential[
'T_SC_re_C'] = T_out_C if T_out_C is not np.nan else np.nan # assume parallel connections for all panels #FIXME: change here when the flow rate is zero
potential['Date'] = date_local
potential = potential.set_index('Date')
return potential
def get_t_in_sc(config):
if config.solar.t_in_sc is not None:
Tin_C = config.solar.T_in_SC
else:
if config.solar.type_SCpanel == 'FP':
Tin_C = constants.T_IN_SC_FP
elif config.solar.type_SCpanel == 'ET':
Tin_C = constants.T_IN_SC_ET
return Tin_C
def cal_pipe_equivalent_length(tot_bui_height_m, panel_prop, total_area_module):
"""
To calculate the equivalent length of pipings in buildings
:param tot_bui_height_m: total heights of buildings
:type tot_bui_height_m: float
:param panel_prop: properties of the solar panels
:type panel_prop: dict
:param total_area_module: total installed module area
:type total_area_module: float
:return: equivalent lengths of pipings in buildings
:rtype: dict
"""
# local variables
lv = panel_prop['module_length_m'] # module length
total_area_aperture = total_area_module * panel_prop['aperture_area_ratio']
number_modules = round(total_area_module / panel_prop['module_area_m2']) # this is an estimation
# main calculation
l_ext_mperm2 = (2 * lv * number_modules / total_area_aperture) # pipe length within the collectors
l_int_mperm2 = 2 * tot_bui_height_m / total_area_aperture # pipe length from building substation to roof top collectors
Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture
pipe_equivalent_lengths = {'Leq_mperm2': Leq_mperm2, 'l_ext_mperm2': l_ext_mperm2, 'l_int_mperm2': l_int_mperm2}
return pipe_equivalent_lengths
def calc_SC_module(config, radiation_Wperm2, panel_properties, Tamb_vector_C, IAM_b, tilt_angle_deg, pipe_lengths):
"""
This function calculates the heat production from a solar collector. The method is adapted from TRNSYS Type 832.
Assume no no condensation gains, no wind or long-wave dependency, sky factor set to zero.
:param config: user settings in cea.config
:param radiation_Wperm2: direct and diffuse irradiation
:type radiation_Wperm2: dataframe
:param panel_properties: properties of SC collectors
:type panel_properties: dict
:param Tamb_vector_C: ambient temperatures
:type Tamb_vector_C: Series
:param IAM_b: indicent andgle modifiers for direct(beam) radiation
:type IAM_b: ndarray
:param tilt_angle_deg: panel tilt angle
:type tilt_angle_deg: float
:param pipe_lengths: equivalent lengths of aux pipes
:type pipe_lengths: dict
:return:
..[M. Haller et al., 2012] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. & <NAME>.
(2012). TRNSYS Type 832 v5.00 " Dynamic Collector Model by <NAME>". Updated Input-Output Reference.
..[ J. Fonseca et al., 2016] <NAME>., <NAME>., <NAME>., <NAME>. City Energy Analyst:
Integrated framework for analysis and optimization of building energy systems in neighborhoods and city districts.
Energy and Buildings, 2016.
"""
# read variables
Tin_C = get_t_in_sc(config)
n0 = panel_properties['n0'] # zero loss efficiency at normal incidence [-]
c1 = panel_properties['c1'] # collector heat loss coefficient at zero temperature difference and wind speed [W/m2K]
c2 = panel_properties['c2'] # temperature difference dependency of the heat loss coefficient [W/m2K2]
mB0_r = panel_properties['mB0_r'] # nominal flow rate per aperture area [kg/h/m2 aperture]
mB_max_r = panel_properties['mB_max_r'] # maximum flow rate per aperture area
mB_min_r = panel_properties['mB_min_r'] # minimum flow rate per aperture area
C_eff_Jperm2K = panel_properties['C_eff'] # thermal capacitance of module [J/m2K]
IAM_d = panel_properties['IAM_d'] # incident angle modifier for diffuse radiation [-]
dP1 = panel_properties['dP1'] # pressure drop [Pa/m2] at zero flow rate
dP2 = panel_properties['dP2'] # pressure drop [Pa/m2] at nominal flow rate (mB0)
dP3 = panel_properties['dP3'] # pressure drop [Pa/m2] at maximum flow rate (mB_max)
dP4 = panel_properties['dP4'] # pressure drop [Pa/m2] at minimum flow rate (mB_min)
Cp_fluid_JperkgK = panel_properties['Cp_fluid'] # J/kgK
aperature_area_ratio = panel_properties['aperture_area_ratio'] # aperature area ratio [-]
area_sc_module = panel_properties['module_area_m2']
Nseg = panel_properties['Nseg']
aperture_area_m2 = aperature_area_ratio * area_sc_module # aperture area of each module [m2]
msc_max_kgpers = mB_max_r * aperture_area_m2 / 3600 # maximum mass flow [kg/s]
# Do the calculation of every time step for every possible flow condition
# get states where highly performing values are obtained.
specific_flows_kgpers = [np.zeros(HOURS_IN_YEAR), (np.zeros(HOURS_IN_YEAR) + mB0_r) * aperture_area_m2 / 3600,
(np.zeros(HOURS_IN_YEAR) + mB_max_r) * aperture_area_m2 / 3600,
(np.zeros(HOURS_IN_YEAR) + mB_min_r) * aperture_area_m2 / 3600, np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR)] # in kg/s
specific_pressure_losses_Pa = [np.zeros(HOURS_IN_YEAR), (np.zeros(HOURS_IN_YEAR) + dP2) * aperture_area_m2,
(np.zeros(HOURS_IN_YEAR) + dP3) * aperture_area_m2,
(np.zeros(HOURS_IN_YEAR) + dP4) * aperture_area_m2, np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR)] # in Pa
# generate empty lists to store results
temperature_out_C = [np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR)]
temperature_in_C = [np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR)]
temperature_mean_C = [
|
np.zeros(HOURS_IN_YEAR)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
import os
import sys
import h5py
from matplotlib import rcParams
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from presto.utils import rotate_opt
rcParams['figure.dpi'] = 108.8
if len(sys.argv) == 2:
load_filename = sys.argv[1]
print(f"Loading: {os.path.realpath(load_filename)}")
else:
load_filename = None
def load(load_filename):
with h5py.File(load_filename, "r") as h5f:
num_averages = h5f.attrs["num_averages"]
control_freq_1 = h5f.attrs["control_freq_1"]
control_freq_2 = h5f.attrs["control_freq_2"]
control_if = h5f.attrs["control_if"]
readout_freq_1 = h5f.attrs["readout_freq_1"]
readout_freq_2 = h5f.attrs["readout_freq_2"]
readout_duration = h5f.attrs["readout_duration"]
control_duration = h5f.attrs["control_duration"]
readout_amp = h5f.attrs["readout_amp"]
control_amp_1 = h5f.attrs["control_amp_1"]
control_amp_2 = h5f.attrs["control_amp_2"]
sample_duration = h5f.attrs["sample_duration"]
wait_delay = h5f.attrs["wait_delay"]
readout_sample_delay = h5f.attrs["readout_sample_delay"]
coupler_dc_bias = h5f.attrs["coupler_dc_bias"]
nr_freqs = h5f.attrs["nr_freqs"]
nr_amps = h5f.attrs["nr_amps"]
coupler_ac_duration = h5f.attrs["coupler_ac_duration"]
t_arr = h5f["t_arr"][()]
store_arr = h5f["store_arr"][()]
coupler_ac_freq_arr = h5f["coupler_ac_freq_arr"][()]
coupler_ac_amp_arr = h5f["coupler_ac_amp_arr"][()]
t_low = 1500 * 1e-9
t_high = 2000 * 1e-9
idx_low = np.argmin(np.abs(t_arr - t_low))
idx_high = np.argmin(
|
np.abs(t_arr - t_high)
|
numpy.abs
|
import os
import errno
import numpy as np
import scipy
import scipy.misc
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_image(image_path , image_size , is_crop=True, resize_w=64 , is_grayscale = False):
return transform(imread(image_path , is_grayscale), image_size, is_crop , resize_w)
def transform(image, npx=64 , is_crop=False, resize_w=64):
# npx : # of pixels width/height of image
if is_crop:
cropped_image = center_crop(image , npx , resize_w = resize_w)
else:
cropped_image = image
cropped_image = scipy.misc.imresize(cropped_image ,
[resize_w , resize_w])
return np.array(cropped_image)/127.5 - 1
def center_crop(x, crop_h, crop_w=None, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
rate = np.random.uniform(0, 1, size=1)
if rate < 0.5:
x = np.fliplr(x)
#first crop tp 178x178 and resize to 128x128
return scipy.misc.imresize(x[20:218-20, 0: 178], [resize_w, resize_w])
#Another cropped method
# return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w],
# [resize_w, resize_w])
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path, is_grayscale=False):
if (is_grayscale):
return scipy.misc.imread(path, flatten=True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def imsave(images, size, path):
return scipy.misc.imsave(path, merge(images, size))
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w: i * w + w, :] = image
return img
def inverse_transform(image):
return ((image + 1)* 127.5).astype(np.uint8)
def read_image_list(category):
filenames = []
print("list file")
list = os.listdir(category)
list.sort()
for file in list:
if 'jpg' or 'png' in file:
filenames.append(category + "/" + file)
print("list file ending!")
length = len(filenames)
perm = np.arange(length)
np.random.shuffle(perm)
filenames = np.array(filenames)
filenames = filenames[perm]
return filenames
class CelebA(object):
def __init__(self, images_path, image_size, attri_id):
self.dataname = "CelebA"
self.dims = image_size*image_size
self.shape = [image_size, image_size, 3]
self.image_size = image_size
self.channel = 3
self.images_path = images_path
self.attri_id = attri_id
self.dom_1_train_data_list, self.dom_2_train_data_list = self.load_celebA()
self.train_len = len(self.dom_1_train_data_list)
def load_celebA(self):
# get the list of image path
return read_image_list_file(self.images_path, is_test= False, attri_id= self.attri_id)
def load_test_celebA(self):
# get the list of image path
return read_image_list_file(self.images_path, is_test= True, attri_id= self.attri_id)
def getShapeForData(self, filenames):
array = [get_image(batch_file, 128, is_crop=True, resize_w=self.image_size,
is_grayscale=False) for batch_file in filenames]
sample_images = np.array(array)
return sample_images
def getTestNextBatch(self, batch_num=0, batch_size=64):
ro_num = len(self.test_data_list) / batch_size
if batch_num % ro_num == 0:
length = len(self.test_data_list)
perm = np.arange(length)
np.random.shuffle(perm)
self.test_data_list =
|
np.array(self.test_data_list)
|
numpy.array
|
from typing import Tuple
import numpy as np
from numpy.linalg import norm, eig
def eigenvalue(A, v):
return np.dot(v, np.dot(A, v)) / np.dot(v, v)
def eigendecomp(A: np.ndarray,
eps: float = 0.01) \
-> Tuple[np.ndarray, np.ndarray]:
"""
Calculates the eigendecomposition of matrix A using power method.
:param A: matrix of shape (n, n)
:param eps: precision of iterations
:return: tuple vector, matrix - (eigenvalues, eigenvectors)
"""
n = A.shape[0]
eigvals = np.zeros(n)
eigvecs = np.zeros(A.shape)
for i in range(n):
eig_vec = np.random.rand(n)
eig_val = eigenvalue(A, eig_vec)
while True:
Av = A.dot(eig_vec)
eig_vec_new = Av /
|
np.linalg.norm(Av)
|
numpy.linalg.norm
|
import tensorflow as tf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import skimage.transform
import sys
from core.utils import *
from core.bleu import evaluate
import numpy as np
from core.log import *
from datetime import datetime
from tqdm import tqdm
from core.utils import initialize_uninitialized
class GAN(object):
def __init__(self, sess, generator, discriminator, pretrained_model=None, dis_dropout_keep_prob=1.):
self.generator = generator
self.discriminator = discriminator
self.pretrained_model = pretrained_model
self.dis_dropout_keep_prob = dis_dropout_keep_prob
self.prev_gen_loss = -1
self.prev_disc_acc = -1
self.prev_disc_loss = -1
self.sess = sess
initialize_uninitialized(self.sess)
# ---load pretrained model
self.saver = tf.train.Saver(max_to_keep=40)
if pretrained_model is not None:
print("Pretrained gan loaded")
self.saver.restore(sess=self.sess, save_path=os.path.join(pretrained_model, 'model.ckpt'))
initialize_uninitialized(self.sess)
def get_reward(self, sess, features_batch, emotions_batch, captions_batch, rollout_num):
rewards = []
for i in range(rollout_num):
# given_num between 1 to sequence_length - 1 for a part completed sentence
feed_dict = {self.generator.features: features_batch, self.generator.emotions: emotions_batch}
generated_captions = sess.run(self.generator.generated_captions, feed_dict)
for seq_length in range(1, self.generator.T):
feed_dict = {self.discriminator.input_x: np.column_stack((generated_captions[:,:seq_length], np.zeros((generated_captions.shape[0], self.generator.T-seq_length), dtype=np.int64))) , self.discriminator.dropout_keep_prob: 1.0}
ypred_for_auc = sess.run(self.discriminator.ypred_for_auc, feed_dict)
ypred = np.array([item[1] for item in ypred_for_auc])
if i == 0:
rewards.append(ypred)
else:
rewards[seq_length - 1] += ypred
# the last token reward
feed_dict = {self.discriminator.input_x: captions_batch[:,:self.generator.T], self.discriminator.dropout_keep_prob: 1.0}
ypred_for_auc = sess.run(self.discriminator.ypred_for_auc, feed_dict)
ypred = np.array([item[1] for item in ypred_for_auc])
if i == 0:
rewards.append(ypred)
else:
# completed sentence reward
rewards[self.generator.T - 1] += ypred
rewards = np.transpose(
|
np.array(rewards)
|
numpy.array
|
# -*- coding: utf-8 -*-
# @Time : 2019/8/23 21:52
# @Author : zhoujun
import math
import numbers
import random
import cv2
import numpy as np
from skimage.util import random_noise
class RandomNoise:
def __init__(self, random_rate):
self.random_rate = random_rate
def __call__(self, data: dict):
"""
对图片加噪声
:param data: {'img':,'text_polys':,'texts':,'ignore_tags':}
:return:
"""
if random.random() > self.random_rate:
return data
data['img'] = (random_noise(data['img'], mode='gaussian', clip=True) * 255).astype(im.dtype)
return data
class RandomScale:
def __init__(self, scales, random_rate):
"""
:param scales: 尺度
:param ramdon_rate: 随机系数
:return:
"""
self.random_rate = random_rate
self.scales = scales
def __call__(self, data: dict) -> dict:
"""
从scales中随机选择一个尺度,对图片和文本框进行缩放
:param data: {'img':,'text_polys':,'texts':,'ignore_tags':}
:return:
"""
if random.random() > self.random_rate:
return data
im = data['img']
text_polys = data['text_polys']
tmp_text_polys = text_polys.copy()
rd_scale = float(np.random.choice(self.scales))
im = cv2.resize(im, dsize=None, fx=rd_scale, fy=rd_scale)
tmp_text_polys *= rd_scale
data['img'] = im
data['text_polys'] = tmp_text_polys
return data
class RandomRotateImgBox:
def __init__(self, degrees, random_rate, same_size=False):
"""
:param degrees: 角度,可以是一个数值或者list
:param ramdon_rate: 随机系数
:param same_size: 是否保持和原图一样大
:return:
"""
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
degrees = (-degrees, degrees)
elif isinstance(degrees, list) or isinstance(degrees, tuple) or isinstance(degrees, np.ndarray):
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
degrees = degrees
else:
raise Exception('degrees must in Number or list or tuple or np.ndarray')
self.degrees = degrees
self.same_size = same_size
self.random_rate = random_rate
def __call__(self, data: dict) -> dict:
"""
从scales中随机选择一个尺度,对图片和文本框进行缩放
:param data: {'img':,'text_polys':,'texts':,'ignore_tags':}
:return:
"""
if random.random() > self.random_rate:
return data
im = data['img']
text_polys = data['text_polys']
# ---------------------- 旋转图像 ----------------------
w = im.shape[1]
h = im.shape[0]
angle = np.random.uniform(self.degrees[0], self.degrees[1])
if self.same_size:
nw = w
nh = h
else:
# 角度变弧度
rangle = np.deg2rad(angle)
# 计算旋转之后图像的w, h
nw = (abs(np.sin(rangle) * h) + abs(np.cos(rangle) * w))
nh = (abs(np.cos(rangle) * h) + abs(np.sin(rangle) * w))
# 构造仿射矩阵
rot_mat = cv2.getRotationMatrix2D((nw * 0.5, nh * 0.5), angle, 1)
# 计算原图中心点到新图中心点的偏移量
rot_move = np.dot(rot_mat, np.array([(nw - w) * 0.5, (nh - h) * 0.5, 0]))
# 更新仿射矩阵
rot_mat[0, 2] += rot_move[0]
rot_mat[1, 2] += rot_move[1]
# 仿射变换
rot_img = cv2.warpAffine(im, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4)
# ---------------------- 矫正bbox坐标 ----------------------
# rot_mat是最终的旋转矩阵
# 获取原始bbox的四个中点,然后将这四个点转换到旋转后的坐标系下
rot_text_polys = list()
for bbox in text_polys:
point1 = np.dot(rot_mat, np.array([bbox[0, 0], bbox[0, 1], 1]))
point2 = np.dot(rot_mat, np.array([bbox[1, 0], bbox[1, 1], 1]))
point3 = np.dot(rot_mat, np.array([bbox[2, 0], bbox[2, 1], 1]))
point4 = np.dot(rot_mat, np.array([bbox[3, 0], bbox[3, 1], 1]))
rot_text_polys.append([point1, point2, point3, point4])
data['img'] = rot_img
data['text_polys'] =
|
np.array(rot_text_polys)
|
numpy.array
|
# imports
import numpy as np
import pandas as pd
from scipy.interpolate import griddata, Akima1DInterpolator
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
from sklearn.utils.fixes import parse_version
from utils import fit, modify
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.pyplot import cm
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import collections, colors, transforms
# formatting
plt.rcParams['legend.title_fontsize'] = 'large'
plt.rcParams['legend.fontsize'] = 'medium'
fontP = FontProperties()
fontP.set_size('medium')
plt.style.use(['science', 'ieee', 'std-colors'])
# plt.style.use(['science', 'scatter'])
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
def plot_scatter(dficts, xparameter='y', yparameter='z', min_cm=0.5, z0=0, take_abs=False,
figsize=(6, 4), scattersize=2):
"""
Plot all data (xparameter, yparameter) as scatter points with different colors.
:param dficts:
:param xparameter:
:param yparameter:
:param min_cm:
:param z0:
:return:
"""
fig, ax = plt.subplots(figsize=figsize)
#cscatter = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dficts.keys()))))
for name, df in dficts.items():
# filter dataframe
if min_cm:
df = df[df['cm'] > min_cm]
# sort by x-parameter and get x- and y-arrays for plotting
if xparameter is None or xparameter == 'index':
x = df.index
else:
df = df.sort_values(by=xparameter)
x = df[xparameter]
y = df[yparameter]
if z0:
y = y - z0
# take absolute value
if take_abs:
y = np.abs(y)
# plot
#cs = next(cscatter)
ax.scatter(x, y, s=scattersize)
# ax.set_xlabel(xparameter, fontsize=18)
# ax.set_ylabel(yparameter, fontsize=18)
# ax.grid(alpha=0.125)
# ax.legend(dficts.keys(), prop=fontP, title=r'$dz$ (mm)', loc='upper right', fancybox=True, shadow=False)
return fig, ax
def plot_mean(dficts, xparameter='y', yparameter='z', min_cm=0.5, z0=0, take_abs=False, fit_function=None):
"""
Plot all data (xparameter, yparameter) as scatter points with different colors.
:param dficts:
:param xparameter:
:param yparameter:
:param min_cm:
:param z0:
:return:
"""
cscatter = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dficts.keys()))))
cerror = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dficts.keys()))))
fig, ax = plt.subplots(figsize=(7.25, 4.25))
means = []
for name, df in dficts.items():
# filter dataframe
df = df[df['cm'] > min_cm]
y = df[yparameter] - z0
# take absolute value
if take_abs:
y = np.abs(y)
yerr = np.std(y)
y = np.mean(y)
means.append(y)
# plot
cs = next(cscatter)
ax.errorbar(name, y, yerr=yerr * 2, fmt='o', color=cs, ecolor=next(cerror), elinewidth=3, capsize=4, alpha=0.75)
ax.scatter(name, y, color=cs)
ax.set_xlabel(xparameter, fontsize=18)
ax.set_ylabel(yparameter, fontsize=18)
ax.grid(alpha=0.125)
ax.legend(dficts.keys(), prop=fontP, title=r'$dz$ (mm)', loc='upper left', fancybox=True, shadow=False)
# fit the function
if fit_function is not None:
names = list(dficts.keys())
popt, pcov, fit_func = fit.fit(names, means, fit_function=fit_function)
# plot fitted function
xfit = np.linspace(0, np.max(names), 100)
ax.plot(xfit, fit_function(xfit, *popt), color='black', linewidth=2, linestyle='--', alpha=0.5)
return fig, ax
def plot_errorbars(dfbicts, xparameter='index', yparameter='z', min_cm=0.5, z0=0):
"""
Plot all data (xparameter, yparameter) as scatter points with different colors.
:param dficts:
:param xparameter:
:param yparameter:
:param min_cm:
:param z0:
:return:
"""
fig, ax = plt.subplots(figsize=(7.25, 4.25))
cscatter = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dfbicts.keys()))))
cerror = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dfbicts.keys()))))
for name, df in dfbicts.items():
# filter dataframe
df = df[df['cm'] > min_cm]
# sort by x-parameter and get x- and y-arrays for plotting
if xparameter is None or xparameter == 'index':
x = df.index
else:
df = df.sort_values(by=xparameter)
x = df[xparameter]
y = df[yparameter] - z0
# plot
cs = next(cscatter)
ax.errorbar(x, y, yerr=df.z_std * 2, fmt='o', color=cs, ecolor=next(cerror), elinewidth=1, capsize=2, alpha=0.75)
ax.scatter(x, y, color=cs)
ax.set_xlabel(xparameter, fontsize=18)
ax.set_ylabel(yparameter, fontsize=18)
ax.grid(alpha=0.125)
ax.legend(dfbicts.keys(), prop=fontP, title=r'$dz$ (mm)', loc='upper left', fancybox=True, shadow=False)
return fig, ax
def plot_fit_and_scatter(fit_function, dficts, xparameter='index', yparameter='z', min_cm=0.5, z0=0, auto_format=False):
"""
Plot fitted curve and data (xparameter, yparameter) as scatter points with different colors.
:param dficts:
:param xparameter:
:param yparameter:
:param min_cm:
:param z0:
:return:
"""
fig, ax = plt.subplots(figsize=(7.25, 4.25))
cscatter = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dficts.keys()))))
for name, df in dficts.items():
# drop NaN's
df = df.dropna(axis=0, subset=[yparameter])
# filter dataframe
df = df[df['cm'] > min_cm]
# sort by x-parameter and get x- and y-arrays for plotting
if xparameter is None or xparameter == 'index':
x = df.index
else:
df = df.sort_values(by=xparameter)
x = df[xparameter]
y = df[yparameter] - z0
# plot scatter points
cs = next(cscatter)
ax.scatter(x, y, color=cs)
# fit the function
popt, pcov, fit_func = fit.fit(x, y, fit_function=fit_function)
# plot fitted function
xfit = np.linspace(0, x.max(), 100)
ax.plot(xfit, fit_function(xfit, popt[0], popt[1], popt[2]), color=cs, linewidth=3, alpha=0.9)
ax.set_xlabel(xparameter, fontsize=18)
ax.set_ylabel(yparameter, fontsize=18)
ax.grid(alpha=0.125)
if auto_format:
ax.legend(dficts.keys(), prop=fontP, title=r'$dz$ (mm)', loc='upper left', fancybox=True, shadow=False)
return fig, ax
def plot_dfbicts_local(dfbicts, parameters='rmse_z', h=1, colors=None, linestyles=None, show_legend=False, scale=None,
scatter_on=True, scatter_size=10,
label_dict=None,
ylabel=None, xlabel=None, semilogx=False, nrows=None, ncols=None):
"""
Notes:
1. Plots the dataframe index on x-axis.
2. If only one parameter is passed (len(parameters) == 1), then no ax2 is returned.
:param dfbicts:
:param parameters:
:param h:
:param colors:
:param linestyles:
:param show_legend:
:param scale:
:param scatter_on:
:param scatter_size:
:param ylabel:
:param xlabel:
:return:
"""
# format figure
if isinstance(colors, list):
colors = colors
cscatter = None
cscatterr = None
elif colors == 'Blues':
cscatter = iter(cm.Blues(np.linspace(0.1, 0.9, len(dfbicts.keys()))))
cscatterr = iter(cm.Blues(np.linspace(0.1, 0.9, len(dfbicts.keys()))))
elif colors == 'inferno':
cscatter = iter(cm.inferno(np.linspace(0.1, 0.9, len(dfbicts.keys()))))
cscatterr = iter(cm.inferno(np.linspace(0.1, 0.9, len(dfbicts.keys()))))
else:
# get colors from cycler
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
if len(dfbicts) > len(colors):
colors_repeats = colors + colors
colors = colors_repeats[:len(dfbicts)]
cscatter = None
cscatterr = None
if isinstance(linestyles, list):
lstyle = iter(linestyles)
else:
lstyle = iter('-' for i in list(dfbicts.keys()))
if not scale:
if nrows:
fig, [ax, ax2] = plt.subplots(nrows=2, sharex=True)
elif ncols:
fig, [ax, ax2] = plt.subplots(ncols=2)
else:
fig, ax = plt.subplots()
else:
if isinstance(scale, (int, float)):
scalex, scaley = scale, scale
else:
scalex, scaley = scale[0], scale[1]
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
size_x_pixels, size_y_pixels = fig.get_size_inches() * fig.dpi
plt.close(fig)
if nrows:
fig, [ax, ax2] = plt.subplots(nrows=2, sharex=True, figsize=(size_x_inches * scalex, size_y_inches * scaley))
elif ncols:
fig, [ax, ax2] = plt.subplots(ncols=2, figsize=(size_x_inches * scalex, size_y_inches * scaley))
else:
fig, ax = plt.subplots(figsize=(size_x_inches*scalex, size_y_inches*scaley))
# organize data
if (isinstance(parameters, str)) or (isinstance(parameters, list) and len(parameters) == 1):
parameter = parameters
parameterr = None
parameter3 = None
parameter4 = None
elif isinstance(parameters, list) and len(parameters) == 2:
parameter = parameters[0]
parameterr = parameters[1]
parameter3 = None
parameter4 = None
elif isinstance(parameters, list) and len(parameters) == 3:
parameter = parameters[0]
parameterr = parameters[1]
parameter3 = parameters[2]
parameter4 = None
elif isinstance(parameters, list) and len(parameters) == 4:
parameter = parameters[0]
parameterr = parameters[1]
parameter3 = parameters[2]
parameter4 = parameters[3]
if parameter == 'rmse_z':
for item, clr in zip(dfbicts.items(), colors):
if cscatter is not None:
cs = next(cscatter)
ls = next(lstyle)
ax.plot(item[1].index, item[1][parameter] / h)
if scatter_on:
ax.scatter(item[1].index, item[1][parameter] / h)
else:
if label_dict:
lbl = label_dict[item[0]]['label']
else:
lbl = None
ls = next(lstyle)
if scatter_on:
ax.scatter(item[1].index, item[1][parameter] / h, color=clr, s=scatter_size)
ax.plot(item[1].index, item[1][parameter] / h, color=clr, linestyle=ls, label=lbl)
else:
ax.plot(item[1].index, item[1][parameter] / h, color=clr, label=lbl, linestyle=ls)
else:
for item, clr in zip(dfbicts.items(), colors):
if cscatter is not None:
ax.plot(item[1].index, item[1][parameter])
if scatter_on:
ax.scatter(item[1].index, item[1][parameter])
else:
if label_dict:
lbl = label_dict[item[0]]['label']
else:
lbl = None
ls = next(lstyle)
if semilogx:
ax.semilogx(item[1].index, item[1][parameter] / h)
else:
if scatter_on:
ax.scatter(item[1].index, item[1][parameter] / h, s=scatter_size, color=clr)
ax.plot(item[1].index, item[1][parameter] / h, color=clr, linestyle=ls, label=lbl)
else:
ax.plot(item[1].index, item[1][parameter] / h, color=clr, linestyle=ls, label=lbl)
if parameterr is not None:
if not nrows:
ax2 = ax.twinx()
for item, clr in zip(dfbicts.items(), colors):
if nrows:
ax2.plot(item[1].index, item[1][parameterr], color=clr)
else:
ax2.plot(item[1].index, item[1][parameterr], color=clr, linestyle='--')
if parameter3 is not None:
ax2.plot(item[1].index, item[1][parameter3], color=clr, linestyle=':')
if parameter4 is not None:
ax2.plot(item[1].index, item[1][parameter4], color=clr, linestyle='-.')
if ylabel:
ax.set_ylabel(ylabel)
elif h != 1 and parameter == 'rmse_z':
ax.set_ylabel(r'$\sigma_{z}\left(z\right) / h$')
elif parameter == 'rmse_z':
ax.set_ylabel(r'$\sigma_{z}\left(z\right)$')
else:
ax.set_ylabel(parameter)
if xlabel:
if nrows:
ax2.set_xlabel(xlabel)
else:
ax.set_xlabel(xlabel)
else:
if nrows:
ax2.set_xlabel('z ($\mu m$)')
else:
ax.set_xlabel('z ($\mu m$)')
ax.grid(alpha=0.25)
if nrows:
ax2.grid(alpha=0.25)
if show_legend:
ax.legend(dfbicts.keys(), title=r'$\sigma$')
if parameterr is not None:
return fig, ax, ax2
else:
return fig, ax
def plot_dfbicts_global(dfbicts, parameters='rmse_z', xlabel='parameter', h=1, print_values=False,
scale=None, fig=None, ax=None, ax2=None, ax2_ylim=None, color=None, scatter_size=10,
smooth=False, ylabel=None):
if fig is None and ax is None:
if not scale:
fig, ax = plt.subplots()
else:
if isinstance(scale, (int, float)):
scalex, scaley = scale, scale
else:
scalex, scaley = scale[0], scale[1]
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
fig, ax = plt.subplots(figsize=(size_x_inches * scalex, size_y_inches * scaley))
if ax2 is None and isinstance(parameters, list) and len(parameters) > 1:
ax2 = ax.twinx()
# organize data
if isinstance(parameters, str) or len(parameters) == 1:
parameter = parameters
parameterr = None
parameterrr = None
names = dfbicts.keys()
means = np.array([m[parameter].mean() for m in dfbicts.values()])
sort_by_name = sorted(list(zip(names, means)), key=lambda x: x[0])
names = [x[0] for x in sort_by_name]
means = np.array([x[1] for x in sort_by_name])
elif isinstance(parameters, list) and len(parameters) == 2:
parameter = parameters[0]
parameterr = parameters[1]
parameterrr = None
names = dfbicts.keys()
means = np.array([m[parameter].mean() for m in dfbicts.values()])
means_prr = np.array([m[parameterr].mean() for m in dfbicts.values()])
sort_by_name = sorted(list(zip(names, means, means_prr)), key=lambda x: x[0])
names = [x[0] for x in sort_by_name]
means = np.array([x[1] for x in sort_by_name])
means_prr = np.array([x[2] for x in sort_by_name])
elif isinstance(parameters, list) and len(parameters) == 3:
parameter = parameters[0]
parameterr = parameters[1]
parameterrr = parameters[2]
names = dfbicts.keys()
means = np.array([m[parameter].mean() for m in dfbicts.values()])
means_prr = np.array([m[parameterr].mean() for m in dfbicts.values()])
means_prrr = np.array([m[parameterrr].mean() for m in dfbicts.values()])
sort_by_name = sorted(list(zip(names, means, means_prr, means_prrr)), key=lambda x: x[0])
names = [x[0] for x in sort_by_name]
means = np.array([x[1] for x in sort_by_name])
means_prr = np.array([x[2] for x in sort_by_name])
means_prrr = np.array([x[3] for x in sort_by_name])
else:
raise ValueError("parameters must be a string or a list of strings")
# smooth data
if smooth:
names = np.array(names)
names_interp = np.linspace(np.min(names), np.max(names), 500)
means_interp = Akima1DInterpolator(names, means)(names_interp)
means = means_interp
if parameterr:
means_prr_interp = Akima1DInterpolator(names, means_prr)(names_interp)
means_prr = means_prr_interp
if parameterrr:
means_prrr_interp = Akima1DInterpolator(names, means_prrr)(names_interp)
means_prrr = means_prrr_interp
names = names_interp
# plot figure
if parameter == 'rmse_z' and h != 1:
ax.plot(names, means / h, color=color)
if scatter_size:
ax.scatter(names, means / h, s=scatter_size, color=color)
else:
ax.plot(names, means, color=color)
if scatter_size:
ax.scatter(names, means, s=scatter_size, color=color)
if parameter == 'rmse_z':
ax.set_ylabel(r'$\overline{\sigma_{z}} / h$')
elif ylabel:
ax.set_ylabel(ylabel)
else:
ax.set_ylabel(parameter)
if parameterr is not None and parameterrr is None:
ax2.plot(names, means_prr, linestyle='--', color=color)
ax2.set_ylim(ax2_ylim)
elif parameterrr is not None:
ax2.plot(names, means_prr, color=color, linestyle='--')
ax2.plot(names, means_prrr, color=color, linestyle=':')
ax2.set_ylim(ax2_ylim)
ax.set_xlabel(xlabel)
ax.grid(alpha=0.25)
# print results
if print_values:
print(names)
print('{}: {}'.format(parameter, means / h))
if parameterr:
print('{}: {}'.format(parameterr, means_prr))
return fig, ax, ax2
def plot_dfbicts_list_global(dfbicts_list, parameters='rmse_z', xlabel='parameter', h=1, print_values=False,
scale=None, colors=None, ax2_ylim=None, scatter_size=10, smooth=False, ylabel=None):
# format figure
if not colors:
# get colors from cycler
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
if not scale:
fig, ax = plt.subplots()
else:
if isinstance(scale, (int, float)):
scalex, scaley = scale, scale
else:
scalex, scaley = scale[0], scale[1]
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
fig, ax = plt.subplots(figsize=(size_x_inches * scalex, size_y_inches * scaley))
if isinstance(parameters, list) and len(parameters) > 1:
ax2 = ax.twinx()
else:
ax2 = None
for dfbicts, color in zip(dfbicts_list, colors):
fig, ax, ax2 = plot_dfbicts_global(dfbicts, parameters, xlabel, h, print_values,
scale=scale, fig=fig, ax=ax, ax2=ax2, ax2_ylim=ax2_ylim,
color=color, scatter_size=scatter_size, smooth=smooth, ylabel=ylabel)
return fig, ax, ax2
def plot_scatter_z_color(dficts, xparameter='x', yparameter='y', zparameter='z', min_cm=0.5, z0=0, take_abs=False):
"""
Plot all data (xparameter, yparameter, zparameter) as scatter points with z-parameter as colors.
"""
for name, df in dficts.items():
ax = plt.subplot()
# filter dataframe
df = df[df['cm'] > min_cm]
# get x and y values
x = df[xparameter]
y = df[yparameter]
# adjust for z-offset
z = df[zparameter] - z0
# take absolute value
if take_abs:
z = np.abs(z)
# plot
data = ax.scatter(x, y, c=z)
ax.set_xlabel(xparameter, fontsize=18)
ax.set_ylabel(yparameter, fontsize=18)
ax.set_title(name, fontsize=18)
ax.grid(alpha=0.125)
# color bar
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.5)
plt.colorbar(data, cax=cax)
plt.show()
plt.close('all')
# --------------------------------- DATAFRAMES ---------------------------------------------------------------------
def plot_scatter_3d(df, fig=None, ax=None, elev=5, azim=-40, color=None, alpha=0.75):
"""
:param df: dataframe with 'x', 'y', and 'z' columns
:param fig: figure
:param ax: axes to plot on
:param elev: the elevation angle in the z-plane.
:param azim: the azimuth angle in the x-y plane.
:return:
"""
if not fig:
fig = plt.figure(figsize=(6, 6))
if not ax:
ax = fig.add_subplot(projection='3d')
if isinstance(df, list):
x, y, z = df
else:
x, y, z = df.x, df.y, df.z
if color is None:
color = z
ax.scatter(x, y, z, marker='o', c=color, alpha=alpha)
ax.view_init(elev, azim)
return fig, ax
def plot_scatter_3d_multi_angle(df, z_param='z'):
fig = plt.figure(figsize=(6.5, 5))
for i, v in zip(np.arange(1, 5), [45, 0, 315, 270]):
ax = fig.add_subplot(2, 2, i, projection='3d')
sc = ax.scatter(df.x, df.y, df[z_param], c=df[z_param])
ax.view_init(5, v)
ax.patch.set_alpha(0.0)
if i == 2:
plt.colorbar(sc, shrink=0.5)
ax.get_xaxis().set_ticks([])
ax.set_ylabel(r'$y \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
elif i == 4:
ax.get_yaxis().set_ticks([])
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
else:
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_ylabel(r'$y \: (pixels)$')
ax.get_zaxis().set_ticklabels([])
plt.suptitle('title', y=0.875)
plt.subplots_adjust(hspace=-0.1, wspace=0.15)
return fig, ax
def plot_heatmap(df, fig=None, ax=None):
# drop NaNs
dfc = df.dropna(axis=0, subset=['z'])
# move x, y, z series to numpy arrays
x = dfc.x.to_numpy()
y = dfc.y.to_numpy()
z = dfc.z.to_numpy()
# get spatial coordinate extents
xspace = np.max(x) - np.min(x)
yspace = np.max(y) - np.min(y)
zspace = np.max(z) - np.min(z)
# contour surface levels: 1 level = 1 micron
lvls_surface = int(np.round(zspace + 1))
lvls_lines = int(lvls_surface / 5)
# -----------------------
# Interpolation on a grid
# -----------------------
# A contour plot of irregularly spaced data coordinates
# via interpolation on a grid.
ngridx = int(xspace)
ngridy = int(yspace)
# Create grid values first.
xi = np.linspace(np.min(x), np.max(x), ngridx)
yi = np.linspace(np.min(y), np.max(y), ngridy)
# Linearly interpolate the data (x, y) on a grid defined by (xi, yi).
zi = griddata((x, y), z, (xi[None, :], yi[:, None]), method='linear')
if fig is None and ax is None:
fig, ax = plt.subplots(figsize=(8, 8))
# plot level surfaces
cntr = ax.contourf(xi, yi, zi, levels=lvls_surface, cmap="RdBu_r")
# plot level lines
ax.contour(xi, yi, zi, levels=lvls_lines, linewidths=0.5, colors='gray')
# plot data points
ax.scatter(x, y, c=z, cmap="RdBu_r")
cbar = fig.colorbar(cntr, ax=ax)
cbar.ax.set_title(r'$\delta z$')
ax.set_xlabel('$x$', fontsize=18)
ax.set_ylabel(r'$y$', fontsize=18)
return fig, ax
# ------------------------------------- ARRAYS ---------------------------------------------------------------------
def scatter_xy_color_z(df, param_z):
fig, ax = plt.subplots()
sc = ax.scatter(df.x, df.y, c=df[param_z], s=3)
plt.colorbar(sc, shrink=0.75)
ax.set_xlabel('x')
ax.set_ylabel('y')
return fig
def scatter_z_by_xy(df, z_params):
if not isinstance(z_params, list):
z_params = [z_params]
fig, ax = plt.subplots(ncols=2, sharey=True, figsize=(size_x_inches*2, size_y_inches))
for z_param in z_params:
ax[0].scatter(df.x, df[z_param], s=3)
ax[1].scatter(df.y, df[z_param], s=3, label=z_param)
ax[0].set_xlabel('x')
ax[0].set_ylabel('z')
ax[1].set_xlabel('y')
ax[1].legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.tight_layout()
return fig, ax
def plot_fitted_plane_and_points(df, dict_fit_plane):
param_z = dict_fit_plane['z_f']
rmse, r_squared = dict_fit_plane['rmse'], dict_fit_plane['r_squared']
tilt_x, tilt_y = dict_fit_plane['tilt_x_degrees'], dict_fit_plane['tilt_y_degrees']
px, py, pz = dict_fit_plane['px'], dict_fit_plane['py'], dict_fit_plane['pz']
normal = dict_fit_plane['normal']
d = dict_fit_plane['d']
fig = plt.figure(figsize=(6.5, 5))
for i, v in zip(np.arange(1, 5), [45, 0, 315, 270]):
ax = fig.add_subplot(2, 2, i, projection='3d')
sc = ax.scatter(df.x, df.y, df[param_z], c=df[param_z], s=1)
ax.plot_surface(px, py, pz, alpha=0.4, color='red')
ax.view_init(5, v)
ax.patch.set_alpha(0.0)
if i == 2:
plt.colorbar(sc, shrink=0.5)
ax.get_xaxis().set_ticks([])
ax.set_ylabel(r'$y \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
elif i == 4:
ax.get_yaxis().set_ticks([])
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_zlabel(r'$z \: (\mu m)$')
else:
ax.set_xlabel(r'$x \: (pixels)$')
ax.set_ylabel(r'$y \: (pixels)$')
ax.get_zaxis().set_ticklabels([])
# title
plt.suptitle('RMSE: {}, '.format(np.round(rmse, 3)) +
r'$R^2$' + ': {}'.format(np.round(r_squared, 3)) + '\n' +
r'$(\theta_{x}, \theta_{y})=$' + ' ({}, {} deg.)'.format(np.round(tilt_x, 3),
|
np.round(tilt_y, 3)
|
numpy.round
|
import numpy as np
import pdb
from scipy.interpolate import interp1d
def rotate(vec, theta):
'''
rotate a 2D vector.
Args:
vec (1darray): the 2D vector.
theta (float): the angle for rotation.
Returns:
1darray: the rotated vector.
'''
mat = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
return mat.dot(np.transpose(vec)).T
def intersection(line, theta, align):
'''
get the intersection point from direction specified by theta.
Args:
line (2darray): an array of points.
theta (float): direction of the intersection line.
align (len-2 tuple): align to this point in the free dimension.
Returns:
tuple: the nearest intersection point.
'''
def trans(line, theta):
return rotate(line, - theta)[...,::-1]
def trans_r(line, theta):
return rotate(
|
np.asarray(line)
|
numpy.asarray
|
"""
drivese_omdao.py
Created by <NAME>, <NAME> and <NAME> 2014.
Copyright (c) NREL. All rights reserved.
Functions nacelle_example_5MW_baseline_[34]pt() did not define blade_mass
We've added prob['blade_mass'] = 17740.0 (copied from hubse_omdao.py)
GNS 2019 05 13
GNS 2019 06 05: nacelle_example_*() now return prob
Classes with declarations like
class ObjName_OM(Component)
are OpenMDAO wrappers for pure-python objects that define the parts of a wind turbine drivetrain.
These objects are defined in drivese_components.py (which contains NO OpenMDAO code).
"""
import numpy as np
import sys
from drivese.drivese_components import LowSpeedShaft4pt, LowSpeedShaft3pt, Gearbox, MainBearing, Bedplate, YawSystem, \
Transformer, HighSpeedSide, Generator, NacelleSystemAdder, AboveYawMassAdder, RNASystemAdder
from drivese.hubse_omdao import HubSE, HubMassOnlySE, Hub_CM_Adder_OM
from openmdao.api import Group, Component, IndepVarComp, Problem, view_connections
#-------------------------------------------------------------------------
# Components
#-------------------------------------------------------------------------
class LowSpeedShaft4pt_OM(Component):
''' LowSpeedShaft class
The LowSpeedShaft class is used to represent the low speed shaft component of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def __init__(self, mb1Type, mb2Type, IEC_Class, debug=False):
super(LowSpeedShaft4pt_OM, self).__init__()
# variables
self.add_param('rotor_bending_moment_x', val=0.0, units='N*m', desc='The bending moment about the x axis')
self.add_param('rotor_bending_moment_y', val=0.0, units='N*m', desc='The bending moment about the y axis')
self.add_param('rotor_bending_moment_z', val=0.0, units='N*m', desc='The bending moment about the z axis')
self.add_param('rotor_thrust', val=0.0, units='N', desc='The force along the x axis applied at hub center')
self.add_param('rotor_force_y', val=0.0, units='N', desc='The force along the y axis applied at hub center')
self.add_param('rotor_force_z', val=0.0, units='N', desc='The force along the z axis applied at hub center')
self.add_param('rotor_mass', val=0.0, units='kg', desc='rotor mass')
self.add_param('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_param('machine_rating', val=0.0, units='kW', desc='machine_rating machine rating of the turbine')
self.add_param('gearbox_mass', val=0.0, units='kg', desc='Gearbox mass')
self.add_param('carrier_mass', val=0.0, units='kg', desc='Carrier mass')
self.add_param('overhang', val=0.0, units='m', desc='Overhang distance')
self.add_param('distance_hub2mb', val=0.0, units='m', desc='distance between hub center and upwind main bearing')
self.add_param('drivetrain_efficiency', val=0.0, desc='overall drivettrain efficiency')
# parameters
self.add_param('shrink_disc_mass', val=0.0, units='kg', desc='Mass of the shrink disc')
self.add_param('gearbox_cm', val=np.zeros(3), units='m', desc='center of mass of gearbox')
self.add_param('gearbox_length', val=0.0, units='m', desc='gearbox length')
self.add_param('flange_length', val=0.0, units='m', desc='flange length')
self.add_param('shaft_angle', val=0.0, units='rad', desc='Angle of the LSS inclination with respect to the horizontal')
self.add_param('shaft_ratio', val=0.0, desc='Ratio of inner diameter to outer diameter. Leave zero for solid LSS')
self.add_param('hub_flange_thickness', val=0.0, desc='Shell thickness for spherical hub')
# outputs
self.add_output('lss_design_torque', val=0.0, units='N*m', desc='lss design torque')
self.add_output('lss_design_bending_load', val=0.0, units='N', desc='lss design bending load')
self.add_output('lss_length', val=0.0, units='m', desc='lss length')
self.add_output('lss_diameter1', val=0.0, units='m', desc='lss outer diameter at main bearing')
self.add_output('lss_diameter2', val=0.0, units='m', desc='lss outer diameter at second bearing')
self.add_output('lss_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('lss_cm', val=np.zeros(3), desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('lss_I', val=np.zeros(3), desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.add_output('lss_mb1_facewidth', val=0.0, units='m', desc='facewidth of upwind main bearing')
self.add_output('lss_mb2_facewidth', val=0.0, units='m', desc='facewidth of main bearing')
self.add_output('lss_mb1_mass', val=0.0, units='kg', desc='main bearing mass')
self.add_output('lss_mb2_mass', val=0.0, units='kg', desc='second bearing mass')
self.add_output('lss_mb1_cm', val=np.zeros(3), units='m', desc='main bearing 1 center of mass')
self.add_output('lss_mb2_cm', val=np.zeros(3), units='m', desc='main bearing 2 center of mass')
self.lss4pt = LowSpeedShaft4pt(mb1Type, mb2Type, IEC_Class, debug=debug)
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['lss_design_torque'], outputs['lss_design_bending_load'], outputs['lss_length'], outputs['lss_diameter1'], outputs['lss_diameter2'], outputs['lss_mass'], outputs['lss_cm'], outputs['lss_I'], \
outputs['lss_mb1_facewidth'], outputs['lss_mb2_facewidth'], outputs['lss_mb1_mass'], outputs['lss_mb2_mass'], outputs['lss_mb1_cm'], outputs['lss_mb2_cm']) \
= self.lss4pt.compute(inputs['rotor_diameter'], inputs['rotor_mass'], inputs['rotor_thrust'], inputs['rotor_force_y'], inputs['rotor_force_z'], \
inputs['rotor_bending_moment_x'], inputs['rotor_bending_moment_y'], inputs['rotor_bending_moment_z'], \
inputs['overhang'], inputs['machine_rating'], inputs['drivetrain_efficiency'], \
inputs['gearbox_mass'], inputs['carrier_mass'], inputs['gearbox_cm'], inputs['gearbox_length'], \
inputs['shrink_disc_mass'], inputs['flange_length'], inputs['distance_hub2mb'], inputs['shaft_angle'], inputs['shaft_ratio'], \
inputs['hub_flange_thickness'])
return outputs
#-------------------------------------------------------------------------
class LowSpeedShaft3pt_OM(Component):
''' LowSpeedShaft class
The LowSpeedShaft class is used to represent the low speed shaft component of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def __init__(self, mb1Type, IEC_Class, debug=False):
super(LowSpeedShaft3pt_OM, self).__init__()
# variables
self.add_param('rotor_bending_moment_x', val=0.0, units='N*m', desc='The bending moment about the x axis')
self.add_param('rotor_bending_moment_y', val=0.0, units='N*m', desc='The bending moment about the y axis')
self.add_param('rotor_bending_moment_z', val=0.0, units='N*m', desc='The bending moment about the z axis')
self.add_param('rotor_thrust', val=0.0, units='N', desc='The force along the x axis applied at hub center')
self.add_param('rotor_force_y', val=0.0, units='N', desc='The force along the y axis applied at hub center')
self.add_param('rotor_force_z', val=0.0, units='N', desc='The force along the z axis applied at hub center')
self.add_param('rotor_mass', val=0.0, units='kg', desc='rotor mass')
self.add_param('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_param('machine_rating', val=0.0, units='kW', desc='machine_rating machine rating of the turbine')
self.add_param('gearbox_mass', val=0.0, units='kg', desc='Gearbox mass')
self.add_param('carrier_mass', val=0.0, units='kg', desc='Carrier mass')
self.add_param('overhang', val=0.0, units='m', desc='Overhang distance')
self.add_param('distance_hub2mb', val=0.0, units='m', desc='distance between hub center and upwind main bearing')
self.add_param('drivetrain_efficiency', val=0.0, desc='overall drivettrain efficiency')
# parameters
self.add_param('shrink_disc_mass', val=0.0, units='kg', desc='Mass of the shrink disc')
self.add_param('gearbox_cm', val=np.zeros(3), units='m', desc='center of mass of gearbox')
self.add_param('gearbox_length', val=0.0, units='m', desc='gearbox length')
self.add_param('flange_length', val=0.0, units='m', desc='flange length')
self.add_param('shaft_angle', val=0.0, units='rad', desc='Angle of the LSS inclination with respect to the horizontal')
self.add_param('shaft_ratio', val=0.0, desc='Ratio of inner diameter to outer diameter. Leave zero for solid LSS')
self.add_param('hub_flange_thickness', val=0.0, desc='Shell thickness for spherical hub')
# outputs
self.add_output('lss_design_torque', val=0.0, units='N*m', desc='lss design torque')
self.add_output('lss_design_bending_load', val=0.0, units='N', desc='lss design bending load')
self.add_output('lss_length', val=0.0, units='m', desc='lss length')
self.add_output('lss_diameter1', val=0.0, units='m', desc='lss outer diameter at main bearing')
self.add_output('lss_diameter2', val=0.0, units='m', desc='lss outer diameter at second bearing')
self.add_output('lss_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('lss_cm', val=np.zeros(3), desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('lss_I', val=np.zeros(3), desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.add_output('lss_mb1_facewidth', val=0.0, units='m', desc='facewidth of upwind main bearing')
self.add_output('lss_mb2_facewidth', val=0.0, units='m', desc='facewidth of main bearing')
self.add_output('lss_mb1_mass', val=0.0, units='kg', desc='main bearing mass')
self.add_output('lss_mb2_mass', val=0.0, units='kg', desc='second bearing mass')
self.add_output('lss_mb1_cm', val=np.zeros(3), units='m', desc='main bearing 1 center of mass')
self.add_output('lss_mb2_cm', val=np.zeros(3), units='m', desc='main bearing 2 center of mass')
self.lss3pt = LowSpeedShaft3pt(mb1Type, IEC_Class, debug=debug)
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['lss_design_torque'], outputs['lss_design_bending_load'], outputs['lss_length'], outputs['lss_diameter1'], outputs['lss_diameter2'], outputs['lss_mass'], outputs['lss_cm'], outputs['lss_I'], \
outputs['lss_mb1_facewidth'], outputs['lss_mb2_facewidth'], outputs['lss_mb1_mass'], outputs['lss_mb2_mass'], outputs['lss_mb1_cm'], outputs['lss_mb2_cm']) \
= self.lss3pt.compute(inputs['rotor_diameter'], inputs['rotor_mass'], inputs['rotor_thrust'], inputs['rotor_force_y'], inputs['rotor_force_z'], \
inputs['rotor_bending_moment_x'], inputs['rotor_bending_moment_y'], inputs['rotor_bending_moment_z'], \
inputs['overhang'], inputs['machine_rating'], inputs['drivetrain_efficiency'], \
inputs['gearbox_mass'], inputs['carrier_mass'], inputs['gearbox_cm'], inputs['gearbox_length'], \
inputs['shrink_disc_mass'], inputs['flange_length'], inputs['distance_hub2mb'], inputs['shaft_angle'], inputs['shaft_ratio'],
inputs['hub_flange_thickness'])
return outputs
#-------------------------------------------------------------------------
class MainBearing_OM(Component):
''' MainBearings class
The MainBearings class is used to represent the main bearing components of a wind turbine drivetrain. It contains two subcomponents (main bearing and second bearing) which also inherit from the SubComponent class.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def __init__(self, bearing_position):
super(MainBearing_OM, self).__init__()
# variables
self.add_param('bearing_mass', val=0.0, units='kg', desc='bearing mass from LSS model')
self.add_param('lss_diameter', val=0.0, units='m', desc='lss outer diameter at main bearing')
self.add_param('lss_design_torque', val=0.0, units='N*m', desc='lss design torque')
self.add_param('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_param('lss_mb_cm', val=np.array([0., 0., 0.]), units='m', desc='x,y,z location from shaft model')
# returns
self.add_output('mb_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('mb_cm', val=np.zeros(3), desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('mb_I', val=np.zeros(3), desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.mb = MainBearing(bearing_position)
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['mb_mass'], outputs['mb_cm'], outputs['mb_I']) \
= self.mb.compute(inputs['bearing_mass'], inputs['lss_diameter'], inputs['lss_design_torque'], inputs['rotor_diameter'], inputs['lss_mb_cm'])
return outputs
#-------------------------------------------------------------------------
class Gearbox_OM(Component):
''' Gearbox class
The Gearbox class is used to represent the gearbox component of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def __init__(self, gear_configuration, shaft_factor, debug=False):
super(Gearbox_OM, self).__init__()
# variables
self.add_param('gear_ratio', val=0.0, desc='overall gearbox speedup ratio')
self.add_param('planet_numbers', val=np.array([0, 0, 0,]), desc='number of planets in each stage', pass_by_obj=True)
self.add_param('rotor_rpm', val=0.0, units='rpm', desc='rotor rpm at rated power')
self.add_param('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_param('rotor_torque', val=0.0, units='N*m', desc='rotor torque at rated power')
self.add_param('gearbox_input_xcm', val=0.00, units='m', desc='gearbox position along x-axis')
# outputs
self.add_output('stage_masses', val=np.zeros(3), units='kg', desc='individual gearbox stage gearbox_masses')
self.add_output('gearbox_mass', val=0.0, units='kg', desc='overall component gearbox_mass')
self.add_output('gearbox_cm', val=np.zeros(3), desc='center of gearbox_mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('gearbox_I', val=np.zeros(3), desc=' moments of gearbox_Inertia for the component [gearbox_Ixx, gearbox_Iyy, gearbox_Izz] around its center of gearbox_mass')
self.add_output('gearbox_length', val=0.0, units='m', desc='gearbox length')
self.add_output('gearbox_height', val=0.0, units='m', desc='gearbox height')
self.add_output('gearbox_diameter', val=0.0, units='m', desc='gearbox diameter')
self.gearbox = Gearbox(gear_configuration, shaft_factor, debug=debug)
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['stage_masses'], outputs['gearbox_mass'], outputs['gearbox_cm'], outputs['gearbox_I'], outputs['gearbox_length'], outputs['gearbox_height'], outputs['gearbox_diameter']) \
= self.gearbox.compute(inputs['gear_ratio'], inputs['planet_numbers'], inputs['rotor_rpm'], inputs['rotor_diameter'], inputs['rotor_torque'], inputs['gearbox_input_xcm'])
return outputs
#-------------------------------------------------------------------
class HighSpeedSide_OM(Component):
'''
HighSpeedShaft class
The HighSpeedShaft class is used to represent the high speed shaft and mechanical brake components of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def __init__(self):
super(HighSpeedSide_OM, self).__init__()
# variables
self.add_param('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_param('rotor_torque', val=0.0, units='N*m', desc='rotor torque at rated power')
self.add_param('gear_ratio', val=0.0, desc='overall gearbox ratio')
self.add_param('lss_diameter', val=0.0, units='m', desc='low speed shaft outer diameter')
self.add_param('gearbox_length', val=0.0, units='m', desc='gearbox length')
self.add_param('gearbox_height', val=0.0, units='m', desc='gearbox height')
self.add_param('gearbox_cm', val=np.zeros(3), units='m', desc='gearbox cm [x,y,z]')
self.add_param('hss_input_length', val=0.0, units='m', desc='high speed shaft length determined by user. Default 0.5m')
# returns
self.add_output('hss_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('hss_cm', val=np.zeros(3), desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('hss_I', val=np.zeros(3), desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.add_output('hss_length', val=0.0, desc='length of high speed shaft')
self.hss = HighSpeedSide()
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['hss_mass'], outputs['hss_cm'], outputs['hss_I'], outputs['hss_length']) \
= self.hss.compute(inputs['rotor_diameter'], inputs['rotor_torque'], inputs['gear_ratio'], inputs['lss_diameter'], inputs['gearbox_length'], inputs['gearbox_height'], inputs['gearbox_cm'], inputs['hss_input_length'])
return outputs
#----------------------------------------------------------------------------------------------
class Generator_OM(Component):
'''Generator class
The Generator class is used to represent the generator of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def __init__(self, drivetrain_design):
super(Generator_OM, self).__init__()
# variables
self.add_param('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_param('machine_rating', val=0.0, units='kW', desc='machine rating of generator')
self.add_param('gear_ratio', val=0.0, desc='overall gearbox ratio')
self.add_param('hss_length', val=0.0, units='m', desc='length of high speed shaft and brake')
self.add_param('hss_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='cm of high speed shaft and brake')
self.add_param('rotor_rpm', val=0.0, units='rpm', desc='Speed of rotor at rated power')
#returns
self.add_output('generator_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('generator_cm', val=np.zeros(3), desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('generator_I', val=np.zeros(3), desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.gen = Generator(drivetrain_design)
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['generator_mass'], outputs['generator_cm'], outputs['generator_I']) \
= self.gen.compute(inputs['rotor_diameter'], inputs['machine_rating'], inputs['gear_ratio'], inputs['hss_length'], inputs['hss_cm'], inputs['rotor_rpm'])
return outputs
#--------------------------------------------
class RNASystemAdder_OM(Component):
''' RNASystem class
This analysis is only to be used in placing the transformer of the drivetrain.
The Rotor-Nacelle-Group class is used to represent the RNA of the turbine without the transformer and bedplate (to resolve circular dependency issues).
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def __init__(self):
super(RNASystemAdder_OM, self).__init__()
# inputs
self.add_param('lss_mass', val=0.0, units='kg', desc='component mass')
self.add_param('mb1_mass', val=0.0, units='kg', desc='component mass')
self.add_param('mb2_mass', val=0.0, units='kg', desc='component mass')
self.add_param('gearbox_mass', val=0.0, units='kg', desc='component mass')
self.add_param('hss_mass', val=0.0, units='kg', desc='component mass')
self.add_param('generator_mass', val=0.0, units='kg', desc='component mass')
self.add_param('lss_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_param('mb1_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_param('mb2_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_param('gearbox_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_param('hss_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_param('generator_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_param('overhang', val=0.0, units='m', desc='nacelle overhang')
self.add_param('rotor_mass', val=0.0, units='kg', desc='component mass')
self.add_param('machine_rating', val=0.0, units='kW', desc='machine rating')
# returns
self.add_output('RNA_mass', val=0.0, units='kg', desc='mass of total RNA')
self.add_output('RNA_cm', val=0.0, units='m', desc='RNA CM along x-axis')
self.rnaadder = RNASystemAdder()
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['RNA_mass'], outputs['RNA_cm']) \
= self.rnaadder.compute(inputs['lss_mass'], inputs['mb1_mass'], inputs['mb2_mass'], inputs['gearbox_mass'], inputs['hss_mass'], inputs['generator_mass'], \
inputs['lss_cm'], inputs['mb1_cm'], inputs['mb2_cm'], inputs['gearbox_cm'], inputs['hss_cm'], inputs['generator_cm'], inputs['overhang'], inputs['rotor_mass'], inputs['machine_rating'])
return outputs
#-------------------------------------------------------------------------------
class Transformer_OM(Component):
''' Transformer class
The transformer class is used to represent the transformer of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component if it is in fact uptower'''
def __init__(self, uptower_transformer):
super(Transformer_OM, self).__init__()
# inputs
self.add_param('machine_rating', val=0.0, units='kW', desc='machine rating of the turbine')
self.add_param('tower_top_diameter', val=0.0, units='m', desc='tower top diameter for comparision of nacelle CM')
self.add_param('rotor_mass', val=0.0, units='kg', desc='rotor mass')
self.add_param('generator_cm', val=np.zeros(3), desc='center of mass of the generator in [x,y,z]')
self.add_param('rotor_diameter', val=0.0, units='m', desc='rotor diameter of turbine')
self.add_param('RNA_mass', val=0.0, units='kg', desc='mass of total RNA')
self.add_param('RNA_cm', val=0.0, units='m', desc='RNA CM along x-axis')
# outputs
self.add_output('transformer_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('transformer_cm', val=np.zeros(3), desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('transformer_I', val=np.zeros(3), desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.transformer = Transformer(uptower_transformer)
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['transformer_mass'], outputs['transformer_cm'], outputs['transformer_I']) \
= self.transformer.compute(inputs['machine_rating'], inputs['tower_top_diameter'], inputs['rotor_mass'], inputs['generator_cm'], inputs['rotor_diameter'], inputs['RNA_mass'], inputs['RNA_cm'])
return outputs
#-------------------------------------------------------------------------
class Bedplate_OM(Component):
''' Bedplate class
The Bedplate class is used to represent the bedplate of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def __init__(self, uptower_transformer, debug=False):
super(Bedplate_OM, self).__init__()
# variables
self.add_param('gearbox_length', val=0.0, units='m', desc='gearbox length')
self.add_param('gearbox_location', val=0.0, units='m', desc='gearbox CM location')
self.add_param('gearbox_mass', val=0.0, units='kg', desc='gearbox mass')
self.add_param('hss_location', val=0.0, units='m', desc='HSS CM location')
self.add_param('hss_mass', val=0.0, units='kg', desc='HSS mass')
self.add_param('generator_location', val=0.0, units='m', desc='generator CM location')
self.add_param('generator_mass', val=0.0, units='kg', desc='generator mass')
self.add_param('lss_location', val=0.0, units='m', desc='LSS CM location')
self.add_param('lss_mass', val=0.0, units='kg', desc='LSS mass')
self.add_param('lss_length', val=0.0, units='m', desc='LSS length')
self.add_param('lss_mb1_facewidth', val=0.0, units='m', desc='Upwind main bearing facewidth')
self.add_param('mb1_cm', val=np.zeros(3), units='m', desc='Upwind main bearing CM location')
self.add_param('mb1_mass', val=0.0, units='kg', desc='Upwind main bearing mass')
self.add_param('mb2_cm', val=np.zeros(3), units='m', desc='Downwind main bearing CM location')
self.add_param('mb2_mass', val=0.0, units='kg', desc='Downwind main bearing mass')
self.add_param('transformer_mass', val=0.0, units='kg', desc='Transformer mass')
self.add_param('transformer_cm', val=np.zeros(3), units='m', desc='transformer CM location')
self.add_param('tower_top_diameter', val=0.0, units='m', desc='diameter of the top tower section at the yaw gear')
self.add_param('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_param('machine_rating', val=0.0, units='kW', desc='machine_rating machine rating of the turbine')
self.add_param('rotor_mass', val=0.0, units='kg', desc='rotor mass')
self.add_param('rotor_bending_moment_y', val=0.0, units='N*m', desc='The bending moment about the y axis')
self.add_param('rotor_force_z', val=0.0, units='N', desc='The force along the z axis applied at hub center')
self.add_param('flange_length', val=0.0, units='m', desc='flange length')
self.add_param('distance_hub2mb', val=0.0, units='m', desc='length between rotor center and upwind main bearing')
# outputs
self.add_output('bedplate_mass', val=0.0, units='kg', desc='overall component bedplate_mass')
self.add_output('bedplate_cm', val=np.zeros(3), desc='center of bedplate_mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('bedplate_I', val=np.zeros(3), desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of bedplate_mass')
self.add_output('bedplate_length', val=0.0, units='m', desc='length of bedplate')
self.add_output('bedplate_height', val=0.0, units='m', desc='max height of bedplate')
self.add_output('bedplate_width', val=0.0, units='m', desc='width of bedplate')
self.bpl = Bedplate(uptower_transformer, debug=debug)
self.debug = debug
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['bedplate_mass'], outputs['bedplate_cm'], outputs['bedplate_I'], outputs['bedplate_length'], outputs['bedplate_height'], outputs['bedplate_width']) \
= self.bpl.compute(inputs['gearbox_length'], inputs['gearbox_location'], inputs['gearbox_mass'], inputs['hss_location'], inputs['hss_mass'], inputs['generator_location'], inputs['generator_mass'], \
inputs['lss_location'], inputs['lss_mass'], inputs['lss_length'], inputs['mb1_cm'], inputs['lss_mb1_facewidth'], inputs['mb1_mass'], inputs['mb2_cm'], inputs['mb2_mass'], \
inputs['transformer_mass'], inputs['transformer_cm'], \
inputs['tower_top_diameter'], inputs['rotor_diameter'], inputs['machine_rating'], inputs['rotor_mass'], inputs['rotor_bending_moment_y'], inputs['rotor_force_z'], \
inputs['flange_length'], inputs['distance_hub2mb'])
return outputs
#-------------------------------------------------------------------------------
class AboveYawMassAdder_OM(Component):
''' AboveYawMassAdder_OM class
The AboveYawMassAdder_OM class is used to represent the masses of all parts of a wind turbine drivetrain that
are above the yaw system.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def __init__(self, crane, debug=False):
super(AboveYawMassAdder_OM, self).__init__()
# variables
self.add_param('machine_rating', val=0.0, units='kW', desc='machine rating')
self.add_param('lss_mass', val=0.0, units='kg', desc='component mass')
self.add_param('mb1_mass', val=0.0, units='kg', desc='component mass')
self.add_param('mb2_mass', val=0.0, units='kg', desc='component mass')
self.add_param('gearbox_mass', val=0.0, units='kg', desc='component mass')
self.add_param('hss_mass', val=0.0, units='kg', desc='component mass')
self.add_param('generator_mass', val=0.0, units='kg', desc='component mass')
self.add_param('bedplate_mass', val=0.0, units='kg', desc='component mass')
self.add_param('bedplate_length', val=0.0, units='m', desc='component length')
self.add_param('bedplate_width', val=0.0, units='m', desc='component width')
self.add_param('transformer_mass', val=0.0, units='kg', desc='component mass')
# returns
self.add_output('electrical_mass', val=0.0, units='kg', desc='component mass')
self.add_output('vs_electronics_mass', val=0.0, units='kg', desc='component mass')
self.add_output('hvac_mass', val=0.0, units='kg', desc='component mass')
self.add_output('controls_mass', val=0.0, units='kg', desc='component mass')
self.add_output('platforms_mass', val=0.0, units='kg', desc='component mass')
self.add_output('crane_mass', val=0.0, units='kg', desc='component mass')
self.add_output('mainframe_mass', val=0.0, units='kg', desc='component mass')
self.add_output('cover_mass', val=0.0, units='kg', desc='component mass')
self.add_output('above_yaw_mass', val=0.0, units='kg', desc='total mass above yaw system')
self.add_output('nacelle_length', val=0.0, units='m', desc='component length')
self.add_output('nacelle_width', val=0.0, units='m', desc='component width')
self.add_output('nacelle_height', val=0.0, units='m', desc='component height')
self.aboveyawmass = AboveYawMassAdder(crane)
self.debug = debug
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['electrical_mass'], outputs['vs_electronics_mass'], outputs['hvac_mass'], outputs['controls_mass'],
outputs['platforms_mass'], outputs['crane_mass'], outputs['mainframe_mass'], outputs['cover_mass'],
outputs['above_yaw_mass'], outputs['nacelle_length'], outputs['nacelle_width'], outputs['nacelle_height']) \
= self.aboveyawmass.compute(inputs['machine_rating'], inputs['lss_mass'], inputs['mb1_mass'], inputs['mb2_mass'],
inputs['gearbox_mass'], inputs['hss_mass'], inputs['generator_mass'], inputs['bedplate_mass'],
inputs['bedplate_length'], inputs['bedplate_width'], inputs['transformer_mass'])
if self.debug:
print('AYMA IN: {:.1f} kW BPl {:.1f} m BPw {:.1f} m'.format(
inputs['machine_rating'],inputs['bedplate_length'], inputs['bedplate_width']))
print('AYMA IN masses (kg): LSS {:.1f} MB1 {:.1f} MB2 {:.1f} GBOX {:.1f} HSS {:.1f} GEN {:.1f} BP {:.1f} TFRM {:.1f}'.format(
inputs['lss_mass'], inputs['mb1_mass'], inputs['mb2_mass'], inputs['gearbox_mass'],
inputs['hss_mass'], inputs['generator_mass'], inputs['bedplate_mass'], inputs['transformer_mass']))
print('AYMA OUT masses (kg) : E {:.1f} VSE {:.1f} HVAC {:.1f} CNTL {:.1f} PTFM {:.1f} CRN {:.1f} MNFRM {:.1f} CVR {:.1f} AYM {:.1f}'.format(
outputs['electrical_mass'], outputs['vs_electronics_mass'], outputs['hvac_mass'], outputs['controls_mass'],
outputs['platforms_mass'], outputs['crane_mass'], outputs['mainframe_mass'], outputs['cover_mass'],
outputs['above_yaw_mass']))
print('AYMA OUT nacelle (m): L {:.2f} W {:.2f} H {:.2f}'.format(
outputs['nacelle_length'], outputs['nacelle_width'], outputs['nacelle_height']))
return outputs
#---------------------------------------------------------------------------------------------------------------
class YawSystem_OM(Component):
''' YawSystem class
The YawSystem class is used to represent the yaw system of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def __init__(self, yaw_motors_number):
super(YawSystem_OM, self).__init__()
# variables
self.add_param('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_param('rotor_thrust', val=0.0, units='N', desc='maximum rotor thrust')
self.add_param('tower_top_diameter', val=0.0, units='m', desc='tower top diameter')
self.add_param('above_yaw_mass', val=0.0, units='kg', desc='above yaw mass')
self.add_param('bedplate_height', val=0.0, units='m', desc='bedplate height')
# outputs
self.add_output('yaw_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('yaw_cm', val=np.zeros(3), desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('yaw_I', val=np.zeros(3), desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.yaw = YawSystem(yaw_motors_number)
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['yaw_mass'], outputs['yaw_cm'], outputs['yaw_I']) \
= self.yaw.compute(inputs['rotor_diameter'], inputs['rotor_thrust'], inputs['tower_top_diameter'], inputs['above_yaw_mass'], inputs['bedplate_height'])
return outputs
#--------------------------------------------
class NacelleSystemAdder_OM(Component): #added to drive to include transformer
''' NacelleSystem class
The Nacelle class is used to represent the overall nacelle of a wind turbine.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def __init__(self):
super(NacelleSystemAdder_OM, self).__init__()
# variables
self.add_param('above_yaw_mass', val=0.0, units='kg', desc='mass above yaw system')
self.add_param('yaw_mass', val=0.0, units='kg', desc='mass of yaw system')
self.add_param('lss_mass', val=0.0, units='kg', desc='component mass')
self.add_param('mb1_mass', val=0.0, units='kg', desc='component mass')
self.add_param('mb2_mass', val=0.0, units='kg', desc='component mass')
self.add_param('gearbox_mass', val=0.0, units='kg', desc='component mass')
self.add_param('hss_mass', val=0.0, units='kg', desc='component mass')
self.add_param('generator_mass', val=0.0, units='kg', desc='component mass')
self.add_param('bedplate_mass', val=0.0, units='kg', desc='component mass')
self.add_param('mainframe_mass', val=0.0, units='kg', desc='component mass')
self.add_param('lss_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_param('mb1_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_param('mb2_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_param('gearbox_cm', val=
|
np.array([0.0,0.0,0.0])
|
numpy.array
|
#!/usr/bin/env python
"""UTILS.PY - Utility functions
"""
from __future__ import print_function
__authors__ = '<NAME> <<EMAIL>>'
__version__ = '20200112' # yyyymmdd
import os
import numpy as np
import warnings
from scipy import sparse
from scipy.interpolate import interp1d
from dlnpyutils import utils as dln
import matplotlib.pyplot as plt
# Ignore these warnings, it's a bug
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
cspeed = 2.99792458e5 # speed of light in km/s
# Convert wavelengths to pixels for a dispersion solution
def w2p(dispersion,w,extrapolate=True):
"""
Convert wavelength values to pixels for a "dispersion solution".
Parameters
----------
dispersion : 1D array
The dispersion solution. This is basically just a 1D array of
monotonically increasing (or decreasing) wavelengths.
w : array
Array of wavelength values to convert to pixels.
extrapolate : bool, optional
Extrapolate beyond the dispersion solution, if necessary.
This is True by default.
Returns
-------
x : array
Array of converted pixel values.
Examples
--------
x = w2p(disp,w)
"""
x = interp1d(dispersion,np.arange(len(dispersion)),kind='cubic',bounds_error=False,fill_value=(np.nan,np.nan),assume_sorted=False)(w)
# Need to extrapolate
if ((np.min(w)<np.min(dispersion)) | (
|
np.max(w)
|
numpy.max
|
#! /usr/bin/env python3
import numpy as np
from collections import Counter
from scipy.stats import norm
from scipy.special import logsumexp, loggamma
from scipy.stats import t
from lsbm.mvt import dmvt
################################################################################
### LSBM embeddings with weighted inner product of basis functions GP kernel ###
################################################################################
class lsbm_gibbs:
## Initialise the class with the number of components and embedding
def __init__(self, X, K, W_function, fixed_function={}, K_fixed=True, first_linear=False):
self.X = X
self.n = X.shape[0]
self.d = X.shape[1]
self.K = K
if isinstance(K_fixed,bool):
self.K_fixed = K_fixed
else:
raise TypeError('K_fixed must be a boolean value.')
## Function to obtain the design matrix
self.fW = W_function
self.fixed_function = fixed_function
Fs = Counter()
for k,_ in self.fW:
Fs[k] += 1
self.kappa = len(Fs)
if self.kappa != self.K and self.K_fixed:
raise ValueError('The number of latent functions in W_function must be equal to K (when K is fixed).')
## Set up first_linear
if isinstance(first_linear, bool):
self.first_linear = (self.K if self.K_fixed else self.kappa) * [first_linear]
else:
self.first_linear = first_linear
if np.sum([isinstance(k,bool) for k in first_linear]) != self.K and self.K_fixed:
raise ValueError('If K is fixed, first_linear is either a boolean or a K-vector of booleans.')
if np.sum([isinstance(k,bool) for k in first_linear]) != self.kappa and not self.K_fixed:
raise ValueError('If K is not fixed, first_linear is either a boolean or a vector of booleans of size equal to the number of possible kernels.')
## Initialise the model parameters
def initialise(self, z, theta, Lambda_0=1.0, a_0=1.0, b_0=1.0, nu=1.0, mu_theta=0.0, sigma_theta=1.0, omega=0.9, g_prior=True):
## Initial cluster configuration
self.z = z
if np.min(self.z) == 1:
self.z -= 1
self.theta = theta
## Theta hyperparameters
self.mu_theta = mu_theta
self.sigma_theta = sigma_theta
## K hyperparameters
self.omega = omega
## Basis functions
self.g_prior = g_prior
self.W = {}
## for j in range(self.d):
## for k in range(self.K):
for k,j in self.fW:
self.W[k,j] = np.array([self.fW[k,j](self.theta[i]) for i in range(self.n)])
self.fixed_W = {}
## for j in self.fixed_function:
## for k in range(self.K):
for k,j in self.fixed_function:
self.fixed_W[j] = np.array([self.fixed_function[k,j](self.theta[i]) for i in range(self.n)])[:,0] ## rewrite using only one coefficient (1)
## If K_fixed, match each cluster with a set of functions
if not self.K_fixed:
self.fk = np.zeros(self.K, dtype=int)
## Prior parameters
self.nu = nu
self.a0 = a_0
self.b0 = b_0
self.lambda_coef = Lambda_0
self.Lambda0 = {}; self.Lambda0_inv = {}
for j in range(self.d):
for k in range(self.K if self.K_fixed else self.kappa):
if g_prior:
self.Lambda0_inv[k,j] = Lambda_0 * np.dot(self.W[k,j].T,self.W[k,j])
else:
self.Lambda0_inv[k,j] = np.diag(np.ones(len(self.fW[k,j](1))) * Lambda_0)
self.Lambda0[k,j] = np.linalg.inv(self.Lambda0_inv[k,j])
self.nu = nu
## Initialise hyperparameter vectors
self.nk = np.zeros(self.K, dtype=int) ## Counter(self.z)
for ind in self.z:
self.nk[ind] += 1
self.a = np.zeros(self.K)
for k in range(self.K):
self.a[k] = self.a0 + self.nk[k] / 2
self.b = {}; self.WtW = {}; self.WtX = {}; self.Lambda_inv = {}; self.Lambda = {}; self.mu = {}
for j in range(self.d):
self.b[j] = {}; self.WtW[j] = {}; self.WtX[j] = {}; self.Lambda_inv[j] = {}; self.Lambda[j] = {}; self.mu[j] = {}
for k in range(self.K):
X = self.X[self.z == k][:,j]
if j in self.fixed_function:
X -= self.fixed_W[j][self.z == k]
if j == 0 and self.first_linear[k if self.K_fixed else self.fk[k]]:
self.b[j][k] = self.b0 + np.sum((X - self.theta[self.z == k]) ** 2) / 2
else:
W = self.W[k if self.K_fixed else self.fk[k],j][self.z == k]
self.WtW[j][k] = np.dot(W.T,W)
self.WtX[j][k] = np.dot(W.T,X)
self.Lambda_inv[j][k] = self.WtW[j][k] + self.Lambda0_inv[k if self.K_fixed else self.fk[k],j]
self.Lambda[j][k] = np.linalg.inv(self.Lambda_inv[j][k])
self.mu[j][k] = np.dot(self.Lambda[j][k], self.WtX[j][k])
self.b[j][k] = self.b0 + (np.dot(X.T,X) - np.dot(self.mu[j][k].T, np.dot(self.Lambda_inv[j][k],self.mu[j][k]))) / 2
########################################################
### a. Resample the allocations using Gibbs sampling ###
########################################################
def gibbs_communities(self,l=1):
## Change the value of l when too large
if l > self.n:
l = self.n
## Update the latent allocations in randomised order
## Loop over the indices
WtW_old = {}; WtX_old = {}; Lambda_inv_old = {}; Lambda_old = {}; mu_old = {}; b_old = {}; position = {}
for i in np.random.choice(self.n, size=l, replace=False):
zold = self.z[i]
## Update parameters of the distribution
self.a[zold] -= .5
self.nk[zold] -= 1
for j in range(self.d):
position[j] = self.X[i,j]
if j in self.fixed_function:
position[j] -= self.fixed_W[j][i]
if j == 0 and self.first_linear[zold if self.K_fixed else self.fk[zold]]:
b_old[j] = float(np.copy(self.b[j][zold]))
self.b[j][zold] -= (position[j] - self.theta[i]) ** 2 / 2
else:
b_old[j] = float(np.copy(self.b[j][zold]))
self.b[j][zold] -= (position[j] ** 2 - np.dot(self.mu[j][zold].T,np.dot(self.Lambda_inv[j][zold],self.mu[j][zold]))) / 2
WtW_old[j] = np.copy(self.WtW[j][zold])
WtX_old[j] = np.copy(self.WtX[j][zold])
self.WtW[j][zold] -= np.outer(self.W[zold if self.K_fixed else self.fk[zold],j][i],self.W[zold if self.K_fixed else self.fk[zold],j][i])
self.WtX[j][zold] -= self.W[zold if self.K_fixed else self.fk[zold],j][i] * position[j]
Lambda_inv_old[j] = np.copy(self.Lambda_inv[j][zold])
Lambda_old[j] = np.copy(self.Lambda[j][zold])
self.Lambda_inv[j][zold] = self.WtW[j][zold] + self.Lambda0_inv[zold if self.K_fixed else self.fk[zold],j]
self.Lambda[j][zold] = np.linalg.inv(self.Lambda_inv[j][zold])
mu_old[j] = np.copy(self.mu[j][zold])
self.mu[j][zold] = np.dot(self.Lambda[j][zold], self.WtX[j][zold])
self.b[j][zold] -= np.dot(self.mu[j][zold].T,np.dot(self.Lambda_inv[j][zold],self.mu[j][zold])) / 2
## Calculate probabilities for community allocations
community_probs = np.log((np.array([self.nk[k] for k in range(self.K)]) + self.nu/self.K) / (self.n - 1 + self.nu))
for k in range(self.K):
for j in range(self.d):
if j == 0 and self.first_linear[k if self.K_fixed else self.fk[k]]:
community_probs[k] += t.logpdf(position[j], df=2*self.a[k], loc=self.theta[i], scale=np.sqrt(self.b[j][k] / self.a[k]))
else:
community_probs[k] += t.logpdf(position[j], df=2*self.a[k], loc=np.dot(self.W[k if self.K_fixed else self.fk[k],j][i],self.mu[j][k]),
scale=np.sqrt(self.b[j][k] / self.a[k] * (1 + np.dot(self.W[k if self.K_fixed else self.fk[k],j][i].T, np.dot(self.Lambda[j][k], self.W[k if self.K_fixed else self.fk[k],j][i])))))
## Raise error if nan probabilities are computed
if np.isnan(community_probs).any():
print(community_probs)
raise ValueError("Error in the allocation probabilities. Check invertibility of the covariance matrices.")
## Update allocation
znew = np.random.choice(self.K, p=np.exp(community_probs - logsumexp(community_probs)))
self.z[i] = np.copy(znew)
## Update parameters
self.a[znew] += .5
self.nk[znew] += 1
if znew == zold:
## Re-update to old values
for j in range(self.d):
self.b[j][znew] = b_old[j]
if not (j == 0 and self.first_linear[znew if self.K_fixed else self.fk[znew]]):
self.WtW[j][znew] = WtW_old[j]
self.WtX[j][znew] = WtX_old[j]
self.Lambda_inv[j][znew] = Lambda_inv_old[j]
self.Lambda[j][znew] = Lambda_old[j]
self.mu[j][znew] = mu_old[j]
else:
## Update to new values
for j in range(self.d):
if j == 0 and self.first_linear[znew if self.K_fixed else self.fk[znew]]:
self.b[j][znew] += (position[j] - self.theta[i]) ** 2 / 2
else:
self.b[j][znew] += np.dot(self.mu[j][znew].T,np.dot(self.Lambda_inv[j][znew],self.mu[j][znew])) / 2
self.WtW[j][znew] += np.outer(self.W[znew if self.K_fixed else self.fk[znew],j][i],self.W[znew if self.K_fixed else self.fk[znew],j][i])
self.WtX[j][znew] += self.W[znew if self.K_fixed else self.fk[znew],j][i] * position[j]
self.Lambda_inv[j][znew] = self.WtW[j][znew] + self.Lambda0_inv[znew if self.K_fixed else self.fk[znew],j]
self.Lambda[j][znew] = np.linalg.inv(self.Lambda_inv[j][znew])
self.mu[j][znew] = np.dot(self.Lambda[j][znew], self.WtX[j][znew])
self.b[j][znew] += (position[j] ** 2 - np.dot(self.mu[j][znew].T,np.dot(self.Lambda_inv[j][znew],self.mu[j][znew]))) / 2
return None
##############################################
### b. Resample the latent positions theta ###
##############################################
def resample_theta(self, l=1, sigma_prop=0.1):
## Change the value of l when too large
if l > self.n:
l = self.n
## Update the latent allocations in randomised order
## Loop over the indices
WtW_old = {}; WtX_old = {}; Lambda_inv_old = {}; Lambda_old = {}; mu_old = {}; b_old = {}; position = {}
position_prop = {}; W_prop = {}; W_prop_fixed = {}
for i in np.random.choice(self.n, size=l, replace=False):
zold = self.z[i]
theta_old = self.theta[i]
## Update parameters of the distribution
self.a[zold] -= .5
self.nk[zold] -= 1
for j in range(self.d):
position[j] = self.X[i,j]
if j in self.fixed_function:
position[j] -= self.fixed_W[j][i]
if j == 0 and self.first_linear[zold if self.K_fixed else self.fk[zold]]:
b_old[j] = float(np.copy(self.b[j][zold]))
self.b[j][zold] -= (position[j] - theta_old) ** 2 / 2
else:
b_old[j] = float(np.copy(self.b[j][zold]))
self.b[j][zold] -= (position[j] ** 2 - np.dot(self.mu[j][zold].T,np.dot(self.Lambda_inv[j][zold],self.mu[j][zold]))) / 2
WtW_old[j] = np.copy(self.WtW[j][zold])
WtX_old[j] = np.copy(self.WtX[j][zold])
self.WtW[j][zold] -= np.outer(self.W[zold if self.K_fixed else self.fk[zold],j][i],self.W[zold if self.K_fixed else self.fk[zold],j][i])
self.WtX[j][zold] -= self.W[zold if self.K_fixed else self.fk[zold],j][i] * position[j]
Lambda_inv_old[j] = np.copy(self.Lambda_inv[j][zold])
Lambda_old[j] = np.copy(self.Lambda[j][zold])
self.Lambda_inv[j][zold] = self.WtW[j][zold] + self.Lambda0_inv[zold if self.K_fixed else self.fk[zold],j]
self.Lambda[j][zold] = np.linalg.inv(self.Lambda_inv[j][zold])
mu_old[j] = np.copy(self.mu[j][zold])
self.mu[j][zold] = np.dot(self.Lambda[j][zold], self.WtX[j][zold])
self.b[j][zold] -= np.dot(self.mu[j][zold].T,np.dot(self.Lambda_inv[j][zold],self.mu[j][zold])) / 2
## Calculate proposal
theta_prop = np.random.normal(loc=theta_old, scale=sigma_prop)
for j in range(self.d):
position_prop[j] = self.X[i,j]
for k in range(self.K if self.K_fixed else self.kappa):
W_prop[k,j] = self.fW[k,j](theta_prop)
if j in self.fixed_function:
W_prop_fixed[j] = self.fixed_function[j](theta_prop)
position_prop[j] -= W_prop_fixed[j]
## Calculate acceptance ratio
numerator_accept = norm.logpdf(theta_prop,loc=self.mu_theta,scale=self.sigma_theta)
for j in range(self.d):
if j == 0 and self.first_linear[zold if self.K_fixed else self.fk[zold]]:
numerator_accept += t.logpdf(position_prop[j], df=2*self.a[zold], loc=theta_prop, scale=np.sqrt(self.b[j][zold] / self.a[zold]))
else:
numerator_accept += t.logpdf(position_prop[j], df=2*self.a[zold], loc=np.dot(W_prop[zold if self.K_fixed else self.fk[zold],j],self.mu[j][zold]),
scale=np.sqrt(self.b[j][zold] / self.a[zold] * (1 + np.dot(W_prop[zold if self.K_fixed else self.fk[zold],j].T, np.dot(self.Lambda[j][zold], W_prop[zold if self.K_fixed else self.fk[zold],j])))))
denominator_accept = norm.logpdf(theta_old,loc=self.mu_theta,scale=self.sigma_theta)
for j in range(self.d):
if j == 0 and self.first_linear[zold if self.K_fixed else self.fk[zold]]:
denominator_accept += t.logpdf(position[j], df=2*self.a[zold], loc=theta_old, scale=np.sqrt(self.b[j][zold] / self.a[zold]))
else:
denominator_accept += t.logpdf(position[j], df=2*self.a[zold], loc=np.dot(self.W[zold if self.K_fixed else self.fk[zold],j][i],self.mu[j][zold]),
scale=np.sqrt(self.b[j][zold] / self.a[zold] * (1 + np.dot(self.W[zold if self.K_fixed else self.fk[zold],j][i].T, np.dot(self.Lambda[j][zold], self.W[zold if self.K_fixed else self.fk[zold],j][i])))))
## Calculate acceptance probability
accept_ratio = numerator_accept - denominator_accept
accept = (-np.random.exponential(1) < accept_ratio)
## Update parameters
self.a[zold] += .5
self.nk[zold] += 1
if accept:
self.theta[i] = theta_prop
if not accept:
## Re-update to old values
for j in range(self.d):
self.b[j][zold] = b_old[j]
if not (j == 0 and self.first_linear[zold if self.K_fixed else self.fk[zold]]):
self.WtW[j][zold] = WtW_old[j]
self.WtX[j][zold] = WtX_old[j]
self.Lambda_inv[j][zold] = Lambda_inv_old[j]
self.Lambda[j][zold] = Lambda_old[j]
self.mu[j][zold] = mu_old[j]
else:
## Update to new values
for j in range(self.d):
## Update design matrix
for k in range(self.K if self.K_fixed else self.kappa):
self.W[k,j][i] = W_prop[k,j]
if j in self.fixed_function:
self.fixed_W[j][i] = W_prop_fixed[j]
if j == 0 and self.first_linear[zold if self.K_fixed else self.fk[zold]]:
self.b[j][zold] += (position[j] - self.theta[i]) ** 2 / 2
else:
self.b[j][zold] += np.dot(self.mu[j][zold].T,np.dot(self.Lambda_inv[j][zold],self.mu[j][zold])) / 2
self.WtW[j][zold] += np.outer(self.W[zold if self.K_fixed else self.fk[zold],j][i],self.W[zold if self.K_fixed else self.fk[zold],j][i])
self.WtX[j][zold] += self.W[zold if self.K_fixed else self.fk[zold],j][i] * position_prop[j]
self.Lambda_inv[j][zold] = self.WtW[j][zold] + self.Lambda0_inv[zold if self.K_fixed else self.fk[zold],j]
self.Lambda[j][zold] = np.linalg.inv(self.Lambda_inv[j][zold])
self.mu[j][zold] = np.dot(self.Lambda[j][zold], self.WtX[j][zold])
self.b[j][zold] += (position_prop[j] ** 2 - np.dot(self.mu[j][zold].T,np.dot(self.Lambda_inv[j][zold],self.mu[j][zold]))) / 2
return None
#################################################
### c. Propose to add/remove an empty cluster ###
#################################################
def propose_empty(self, verbose=False):
if self.K_fixed:
raise ValueError('propose_empty can only be run if K_fixed is set to False.')
## Propose K
if self.K == 1:
K_prop = 2
elif (self.K == self.n):
K_prop = self.n - 1
else:
## If there are no empty clusters and K_prop = K-1, reject the proposal
if not np.any(self.nk == 0):
K_prop = self.K+1
else:
K_prop = np.random.choice([self.K-1, self.K+1])
## Assign values to the variable remove
if K_prop < self.K:
remove = True
else:
remove = False
## Propose functional form for new cluster
if not remove:
fk_prop = np.random.choice(self.kappa)
## Propose a new (empty) vector of cluster allocations
if remove:
## Delete empty cluster with largest index (or sample at random)
ind_delete = np.random.choice(np.where(self.nk == 0)[0])
nk_prop = np.delete(self.nk, ind_delete)
else:
nk_prop = np.append(self.nk, 0)
## Common term for the acceptance probability
accept_ratio = self.K * loggamma(self.nu / self.K) - K_prop * loggamma(self.nu / K_prop) + \
np.sum(loggamma(nk_prop + self.nu / K_prop)) - np.sum(loggamma(self.nk + self.nu / self.K)) + \
(K_prop - self.K) * np.log(1 - self.omega) * np.log(.5) * int(self.K == 1) - np.log(.5) * int(self.K == self.n)
## Accept or reject the proposal
accept = (-np.random.exponential(1) < accept_ratio)
## Scale all the values if an empty cluster is added
if verbose:
print('\t',['Add','Remove'][int(remove)], accept, np.exp(accept_ratio), K_prop, end='')
if accept:
self.nk = nk_prop
if not remove:
self.fk = np.append(self.fk, fk_prop)
self.a = np.append(self.a, self.a0)
for j in range(self.d):
self.b[j][K_prop-1] = self.b0
if not (j == 0 and self.first_linear[fk_prop]):
self.WtW[j][K_prop-1] = np.zeros((self.W[fk_prop,j].shape[1], self.W[fk_prop,j].shape[1]))
self.WtX[j][K_prop-1] = np.zeros(self.W[fk_prop,j].shape[1])
self.Lambda_inv[j][K_prop-1] = np.copy(self.Lambda0_inv[fk_prop,j])
self.Lambda[j][K_prop-1] = np.copy(self.Lambda0[fk_prop,j])
self.mu[j][K_prop-1] = np.zeros(self.W[fk_prop,j].shape[1])
else:
## Delete old values
fk_old = self.fk[ind_delete]
self.fk = np.delete(self.fk, ind_delete)
self.a = np.delete(self.a,ind_delete)
for j in range(self.d):
del self.b[j][ind_delete]
if not (j == 0 and self.first_linear[fk_old]):
del self.WtW[j][ind_delete]
del self.WtX[j][ind_delete]
del self.Lambda_inv[j][ind_delete]
del self.Lambda[j][ind_delete]
del self.mu[j][ind_delete]
## Relabel groups
Q = np.arange(self.K)[np.arange(self.K) > ind_delete]
for k in Q:
self.z[self.z == k] = k-1
for j in range(self.d):
self.b[j][k-1] = self.b[j][k]; del self.b[j][k]
if not (j == 0 and self.first_linear[self.fk[k-1]]):
self.WtW[j][k-1] = self.WtW[j][k]; del self.WtW[j][k]
self.WtX[j][k-1] = self.WtX[j][k]; del self.WtX[j][k]
self.Lambda_inv[j][k-1] = self.Lambda_inv[j][k]; del self.Lambda_inv[j][k]
self.Lambda[j][k-1] = self.Lambda[j][k]; del self.Lambda[j][k]
self.mu[j][k-1] = self.mu[j][k]; del self.mu[j][k]
## Update K
self.K = K_prop
return None
##################################
### d. Split-merge communities ###
##################################
def split_merge(self, verbose=False):
if self.K_fixed:
raise ValueError('propose_empty can only be run if K_fixed is set to False.')
# Randomly choose two nodes
q, q_prime = np.random.choice(self.n, size=2, replace=False)
# Propose a split or merge move according to the sampled values
if self.z[q] == self.z[q_prime]:
split = True
z = self.z[q]
z_prime = self.K
fk_prop = self.fk[z]
fk_temp = [fk_prop, fk_prop]
else:
split = False
z = np.min([self.z[q],self.z[q_prime]])
z_prime = np.max([self.z[q],self.z[q_prime]])
fk_prop = np.random.choice([self.fk[z], self.fk[z_prime]])
fk_temp = [self.fk[z], self.fk[z_prime]]
# Proposed K
K_prop = self.K + (1 if split else -1)
# Preprocessing for split / merge move
nk_prop = np.ones(2, dtype=int)
a_prop = self.a0 + np.ones(2) / 2
b_prop = {}; WtW_prop = {}; WtX_prop = {}; Lambda_inv_prop = {}; Lambda_prop = {}; mu_prop = {}
for j in range(self.d):
b_prop[j] = np.ones(2) * self.b0; WtW_prop[j] = {}; WtX_prop[j] = {}; Lambda_inv_prop[j] = {}; Lambda_prop[j] = {}; mu_prop[j] = {}
if j == 0 and self.first_linear[self.fk[z]]:
b_prop[j][0] += (self.X[q,j] - self.theta[q]) ** 2 / 2
else:
WtW_prop[j][0] = np.outer(self.W[fk_temp[0],j][q], self.W[fk_temp[0],j][q])
WtX_prop[j][0] = np.multiply(self.W[fk_temp[0],j][q], self.X[q,j])
Lambda_inv_prop[j][0] = self.Lambda0_inv[self.fk[z],j] + WtW_prop[j][0]
Lambda_prop[j][0] = np.linalg.inv(Lambda_inv_prop[j][0])
mu_prop[j][0] = np.dot(Lambda_prop[j][0], WtX_prop[j][0])
b_prop[j][0] += (self.X[q,j] ** 2 - np.dot(mu_prop[j][0].T,np.dot(Lambda_inv_prop[j][0],mu_prop[j][0]))) / 2
if j == 0 and self.first_linear[self.fk[z if split else z_prime]]:
b_prop[j][1] += (self.X[q_prime,j] - self.theta[q_prime]) ** 2 / 2
else:
WtW_prop[j][1] = np.outer(self.W[fk_temp[1],j][q_prime], self.W[fk_temp[1],j][q_prime])
WtX_prop[j][1] = np.multiply(self.W[fk_temp[1],j][q_prime], self.X[q_prime,j])
Lambda_inv_prop[j][1] = self.Lambda0_inv[self.fk[z if split else z_prime],j] + WtW_prop[j][1]
Lambda_prop[j][1] = np.linalg.inv(Lambda_inv_prop[j][1])
mu_prop[j][1] = np.dot(Lambda_prop[j][1], WtX_prop[j][1])
b_prop[j][1] += (self.X[q_prime,j] ** 2 - np.dot(mu_prop[j][1].T,np.dot(Lambda_inv_prop[j][1],mu_prop[j][1]))) / 2
## Indices
if split:
indices = np.where(self.z == z)[0]
else:
indices = np.where(np.logical_or(self.z == z, self.z == z_prime))[0]
indices = indices[np.logical_and(indices != q, indices != q_prime)]
if not split:
## Calculate parameters for merge move
nk_merge = self.nk[z] + self.nk[z_prime]
a_merge = self.a0 + nk_merge / 2
b_merge = {}; WtW_merge = {}; WtX_merge = {}; Lambda_inv_merge = {}; Lambda_merge = {}; mu_merge = {}
for j in range(self.d):
b_merge[j] = self.b0
if j == 0 and self.first_linear[self.fk[z]]:
b_merge[j] += (self.X[q,j] - self.theta[q]) ** 2 / 2
b_merge[j] += (self.X[q_prime,j] - self.theta[q_prime]) ** 2 / 2
b_merge[j] += np.sum((self.X[indices,j] - self.theta[indices]) ** 2 / 2)
else:
X = self.X[np.append([q,q_prime],indices)][:,j]
W = self.W[fk_prop,j][np.append([q,q_prime],indices)]
WtW_merge[j] = np.dot(W.T,W)
WtX_merge[j] = np.dot(W.T,X)
Lambda_inv_merge[j] = self.Lambda0_inv[fk_prop,j] + WtW_merge[j]
Lambda_merge[j] = np.linalg.inv(Lambda_inv_merge[j])
mu_merge[j] = np.dot(Lambda_merge[j], WtX_merge[j])
b_merge[j] += (self.X[q,j] ** 2 + self.X[q_prime,j] ** 2 + np.dot(self.X[indices][:,j].T,self.X[indices][:,j]) - np.dot(mu_merge[j].T,np.dot(Lambda_inv_merge[j],mu_merge[j]))) / 2
## Random allocation of indices
indices = np.random.choice(indices,size=len(indices),replace=False)
## Calculate q probabilities
qprob = 0
zz = []
for i in indices:
## Calculate probabilities for community allocations
community_probs = np.log(nk_prop + self.nu/2)
for j in range(self.d):
if j == 0 and self.first_linear[self.fk[z]]:
community_probs[0] += t.logpdf(self.X[i,j], df=2*a_prop[0], loc=self.theta[i], scale=np.sqrt(b_prop[j][0] / a_prop[0]))
else:
community_probs[0] += t.logpdf(self.X[i,j], df=2*a_prop[0], loc=np.dot(self.W[fk_temp[0],j][i], mu_prop[j][0]),
scale=np.sqrt(b_prop[j][0] / a_prop[0] * (1 + np.dot(self.W[fk_temp[0],j][i].T, np.dot(Lambda_prop[j][0], self.W[fk_temp[0],j][i])))))
if j == 0 and self.first_linear[self.fk[z if split else z_prime]]:
community_probs[1] += t.logpdf(self.X[i,j], df=2*a_prop[1], loc=self.theta[i], scale=np.sqrt(b_prop[j][1] / a_prop[1]))
else:
community_probs[1] += t.logpdf(self.X[i,j], df=2*a_prop[1], loc=np.dot(self.W[fk_temp[1],j][i], mu_prop[j][1]),
scale=np.sqrt(b_prop[j][1] / a_prop[1] * (1 + np.dot(self.W[fk_temp[1],j][i].T, np.dot(Lambda_prop[j][1], self.W[fk_temp[1],j][i])))))
## Raise error if nan probabilities are computed
if np.isnan(community_probs).any():
print(community_probs)
raise ValueError("Error in the allocation probabilities. Check invertibility of the covariance matrices.")
## Update allocation
community_probs = np.exp(community_probs - logsumexp(community_probs))
if split:
znew = np.random.choice(2, p=community_probs)
else:
znew = int(self.z[i] == z_prime)
zz = np.append(zz, znew)
qprob += np.log(community_probs)[znew]
knew = fk_temp[znew]
## Update parameters
a_prop[znew] += 0.5
nk_prop[znew] += 1
for j in range(self.d):
if j == 0 and self.first_linear[knew]:
b_prop[j][znew] += (self.X[i,j] - self.theta[i]) ** 2 / 2
else:
b_prop[j][znew] += np.dot(mu_prop[j][znew].T,np.dot(Lambda_inv_prop[j][znew],mu_prop[j][znew])) / 2
WtW_prop[j][znew] += np.outer(self.W[knew,j][i],self.W[knew,j][i])
WtX_prop[j][znew] += self.W[knew,j][i] * self.X[i,j]
Lambda_inv_prop[j][znew] = WtW_prop[j][znew] + self.Lambda0_inv[knew,j]
Lambda_prop[j][znew] = np.linalg.inv(Lambda_inv_prop[j][znew])
mu_prop[j][znew] = np.dot(Lambda_prop[j][znew], WtX_prop[j][znew])
b_prop[j][znew] += (self.X[i,j] ** 2 - np.dot(mu_prop[j][znew].T,np.dot(Lambda_inv_prop[j][znew],mu_prop[j][znew]))) / 2
## Calculate acceptance ratio
acceptance_ratio = self.K * loggamma(self.nu / self.K) - K_prop * loggamma(self.nu / K_prop)
nk_ast = np.copy(self.nk)
if split:
nk_ast[z] = nk_prop[0]; nk_ast = np.append(nk_ast, nk_prop[1])
else:
nk_ast[z] = nk_merge; nk_ast = np.delete(arr=nk_ast, obj=z_prime)
acceptance_ratio += np.sum(loggamma(nk_ast + self.nu / K_prop)) - np.sum(loggamma(self.nk + self.nu / self.K))
acceptance_ratio += (K_prop - self.K) * np.log(1 - self.omega)
## acceptance_ratio += np.log(.5) * int(self.K == 1) - np.log(.5) * int(self.K == self.n)
acceptance_ratio -= (1 if split else -1) * qprob
if split:
indices0 = np.append([q],indices[zz == 0])
indices1 = np.append([q_prime],indices[zz == 1])
indices_all = np.append([q,q_prime],indices)
for j in range(self.d):
if j == 0 and self.first_linear[fk_prop]:
acceptance_ratio += np.sum(loggamma(a_prop) - loggamma(self.a0) + self.a0 * loggamma(self.b0) - nk_prop/2 * loggamma(2*np.pi))
acceptance_ratio -= a_prop[0] * loggamma(self.b0 + np.sum((self.X[:,j][indices0] - self.theta[indices0]) ** 2) / 2)
acceptance_ratio -= a_prop[1] * loggamma(self.b0 + np.sum((self.X[:,j][indices1] - self.theta[indices1]) ** 2) / 2)
acceptance_ratio -= loggamma(self.a[z]) - loggamma(self.a0) + self.a0 * loggamma(self.b0) - self.nk[z]/2 * loggamma(2*np.pi)
acceptance_ratio += self.a[z] * loggamma(self.b0 + np.sum((self.X[:,j][indices_all] - self.theta[indices_all]) ** 2) / 2)
else:
S0 = np.dot(self.W[fk_prop,j][indices0], np.dot(self.Lambda0[fk_prop,j], self.W[fk_prop,j][indices0].T)) + np.diag(np.ones(nk_prop[0]))
acceptance_ratio += dmvt(x=self.X[indices0,j], mu=np.zeros(nk_prop[0]), Sigma=self.b0 / self.a0 * S0, nu=2*self.a0)
S1 = np.dot(self.W[fk_prop,j][indices1], np.dot(self.Lambda0[fk_prop,j], self.W[fk_prop,j][indices1].T)) + np.diag(np.ones(nk_prop[1]))
acceptance_ratio += dmvt(x=self.X[indices1,j], mu=np.zeros(nk_prop[1]), Sigma=self.b0 / self.a0 * S1, nu=2*self.a0)
S_all = np.dot(self.W[fk_prop,j][indices_all], np.dot(self.Lambda0[fk_prop,j], self.W[fk_prop,j][indices_all].T)) + np.diag(np.ones(self.nk[z]))
acceptance_ratio -= dmvt(x=self.X[indices_all,j], mu=np.zeros(self.nk[z]), Sigma=self.b0 / self.a0 * S_all, nu=2*self.a0)
else:
indices0 = np.append([q],indices[zz == 0])
indices1 = np.append([q_prime],indices[zz == 1])
indices_all = np.append([q,q_prime],indices)
for j in range(self.d):
if j == 0 and self.first_linear[fk_prop]:
acceptance_ratio -= np.sum(loggamma(a_prop) - loggamma(self.a0) + self.a0 * loggamma(self.b0) - nk_prop/2 * loggamma(2*np.pi))
acceptance_ratio += a_prop[0] * loggamma(self.b0 + np.sum((self.X[:,j][indices0] - self.theta[indices0]) ** 2) / 2)
acceptance_ratio += a_prop[1] * loggamma(self.b0 + np.sum((self.X[:,j][indices1] - self.theta[indices1]) ** 2) / 2)
acceptance_ratio += loggamma(a_merge) - loggamma(self.a0) + self.a0 * loggamma(self.b0) - nk_merge/2 * loggamma(2*np.pi)
acceptance_ratio -= a_merge * loggamma(self.b0 + np.sum((self.X[:,j][indices_all] - self.theta[indices_all]) ** 2) / 2)
else:
S0 = np.dot(self.W[fk_temp[0],j][indices0], np.dot(self.Lambda0[fk_temp[0],j], self.W[fk_temp[0],j][indices0].T)) + np.diag(np.ones(nk_prop[0]))
acceptance_ratio -= dmvt(x=self.X[indices0,j], mu=np.zeros(nk_prop[0]), Sigma=self.b0 / self.a0 * S0, nu=2*self.a0)
S1 = np.dot(self.W[fk_temp[1],j][indices1], np.dot(self.Lambda0[fk_temp[1],j], self.W[fk_temp[1],j][indices1].T)) + np.diag(np.ones(nk_prop[1]))
acceptance_ratio -= dmvt(x=self.X[indices1,j], mu=np.zeros(nk_prop[1]), Sigma=self.b0 / self.a0 * S1, nu=2*self.a0)
S_all = np.dot(self.W[fk_prop,j][indices_all], np.dot(self.Lambda0[fk_prop,j], self.W[fk_prop,j][indices_all].T)) + np.diag(np.ones(nk_merge))
acceptance_ratio += dmvt(x=self.X[indices_all,j], mu=np.zeros(nk_merge), Sigma=self.b0 / self.a0 * S_all, nu=2*self.a0)
# Accept / reject using Metropolis-Hastings
accept = (-np.random.exponential(1) < acceptance_ratio)
if verbose:
print('\t',['Merge','Split'][int(split)], bool(accept), z, z_prime, K_prop, end='')
# Update if move is accepted
if accept:
if split:
self.z[indices1] = z_prime
self.fk = np.append(self.fk, values=fk_prop)
self.nk[z] = nk_prop[0]; self.nk = np.append(self.nk, nk_prop[1])
self.a[z] = a_prop[0]; self.a = np.append(self.a, a_prop[1])
for j in range(self.d):
self.b[j][z] = b_prop[j][0]; self.b[j][self.K] = b_prop[j][1]
if not (j == 0 and self.first_linear[fk_prop]):
self.WtW[j][z] = WtW_prop[j][0]; self.WtW[j][self.K] = WtW_prop[j][1]
self.WtX[j][z] = WtX_prop[j][0]; self.WtX[j][self.K] = WtX_prop[j][1]
self.Lambda_inv[j][z] = Lambda_inv_prop[j][0]; self.Lambda_inv[j][self.K] = Lambda_inv_prop[j][1]
self.Lambda[j][z] = Lambda_prop[j][0]; self.Lambda[j][self.K] = Lambda_prop[j][1]
self.mu[j][z] = mu_prop[j][0]; self.mu[j][self.K] = mu_prop[j][1]
else:
self.z[self.z == z_prime] = z
self.fk[z] = fk_prop; self.fk = np.delete(arr=self.fk, obj=z_prime)
self.nk[z] = nk_merge; self.nk = np.delete(arr=self.nk, obj=z_prime)
self.a[z] = a_merge; self.a = np.delete(arr=self.a, obj=z_prime)
for j in range(self.d):
self.b[j][z] = b_merge[j]; del self.b[j][z_prime]
if not (j == 0 and self.first_linear[fk_prop]):
self.WtW[j][z] = WtW_merge[j]; del self.WtW[j][z_prime]
self.WtX[j][z] = WtX_merge[j]; del self.WtX[j][z_prime]
self.Lambda_inv[j][z] = Lambda_inv_merge[j]; del self.Lambda_inv[j][z_prime]
self.Lambda[j][z] = Lambda_merge[j]; del self.Lambda[j][z_prime]
self.mu[j][z] = mu_merge[j]; del self.mu[j][z_prime]
## Relabel groups
Q = np.arange(self.K)[np.arange(self.K) > z_prime]
for k in Q:
self.z[self.z == k] = k-1
for j in range(self.d):
self.b[j][k-1] = self.b[j][k]; del self.b[j][k]
if not (j == 0 and self.first_linear[self.fk[k-1]]):
self.WtW[j][k-1] = self.WtW[j][k]; del self.WtW[j][k]
self.WtX[j][k-1] = self.WtX[j][k]; del self.WtX[j][k]
self.Lambda_inv[j][k-1] = self.Lambda_inv[j][k]; del self.Lambda_inv[j][k]
self.Lambda[j][k-1] = self.Lambda[j][k]; del self.Lambda[j][k]
self.mu[j][k-1] = self.mu[j][k]; del self.mu[j][k]
## Update K
self.K = K_prop
return None
######################################################
### e. Resample community-specific functional form ###
######################################################
def resample_kernel(self, verbose=False):
## Sample existing community
k_group = np.random.choice(self.K)
fk_old = self.fk[k_group]
## Initialise hyperparameter vectors
S = {}; b_kernels = {}; WtW_kernels = {}; WtX_kernels = {}; Lambda_inv_kernels = {}; Lambda_kernels = {}; mu_kernels = {}
X = self.X[self.z == k_group]
theta = self.theta[self.z == k_group]
## Calculate vector of probabilities
probs = np.zeros(self.kappa)
for k in range(self.kappa):
b_kernels[k] = {}; WtW_kernels[k] = {}; WtX_kernels[k] = {}; Lambda_inv_kernels[k] = {}; Lambda_kernels[k] = {}; mu_kernels[k] = {}
XX = np.copy(X)
for j in range(self.d):
if j in self.fixed_function:
XX[:,j] -= self.fixed_W[j][self.z == k_group]
if j == 0 and self.first_linear[k]:
b_kernels[k][j] = self.b0 + np.sum((XX[:,j] - theta) ** 2) / 2
probs[k] += loggamma(self.a[k_group]) - loggamma(self.a0) - self.nk[k_group] *
|
np.log(2*np.pi)
|
numpy.log
|
import numpy as np
from tqdm import tqdm
from numpy.linalg import norm
def iterate(nApT, nAnT, seed, c, epsilon, beta, gamma, max_iters,
handles_deadend, verbose):
'''
Perform power iteration for SRWR query
inputs
nApT: csr_matrix
positive semi row-normalized adjacency matrix (transpose)
nAnT: csr_matrix
negative semi row-normalized adjacency matrix (transpose)
seed: int
seed (query) node
c: float
restart probability
epsilon: float
error tolerance for power iteration
beta: float
balance attenuation factor
gamma: float
balance attenuation factor
max_iters: int
maximum number of iterations for power iteration
handles_deadend: bool
if true, it will handle the deadend issue in power iteration
otherwise, it won't, i.e., no guarantee for sum of SRWR scores
to be 1 in directed graphs
verbose: bool
if true, it will show a progress bar over iterations
outputs:
rd: ndarray
relative trustworthiness score vector w.r.t. seed
rp: ndarray
positive SRWR vector w.r.t. seed
rn: ndarray
negative SRWR vector w.r.t. seed
residuals: list
list of residuals of power iteration,
e.g., residuals[i] is i-th residual
'''
m, n = nApT.shape
q = np.zeros((n, 1))
q[seed] = 1.0
rp = q
rn = np.zeros((n, 1))
rt =
|
np.row_stack((rp, rn))
|
numpy.row_stack
|
import csv
import numpy as np
from numpy import array, linspace, exp, pi, inf, vstack
from scipy.interpolate import interp1d
from scipy.optimize import fsolve
from scipy.integrate import quad,trapz
import pandas as pd
import tmm
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot,figure,xlabel,ylabel,show,ylim,legend
import sys
assert sys.version_info >= (3,6), 'Requires Python 3.6+'
from pvlib.pvsystem import singlediode
from colour import SpectralDistribution, XYZ_to_sRGB, cctf_decoding, cctf_encoding
from colour.colorimetry import sd_to_XYZ_integration
from colour.notation import RGB_to_HEX
from colour.plotting import ColourSwatch, plot_multi_colour_swatches
import os
#import tmmPVColor as pvc
'''We determine the size and scaling of the photon wavelength scale. Units are um'''
num_lams = 500
lams = linspace(0.3,2.5,num=num_lams) #um
'''We are constants and help control units'''
q = 1.602176634e-19 #coulombs. elementary charge
c0 = 299792458 #m/s #Speed of light
hPlanck = 6.62607015e-34 #J*s 4.135667516e-15 #eV*s
kB = 1.380649e-23 #J/K 8.61733034e-5 #eV/K
#pathtodat = os.path.abspath(__file__).replace(__file__,'')
pathtodat = os.path.abspath(__file__).replace('/wpv.py','')
#print('path to data: ')
#print(__file__)
#print(pathtodat)
#print('printed')
class Layer:
"""
I am a layer class for organizing data for each layer. I should make constructing stacks easier in the future and reduce possible mistakes
"""
def __init__(self, thickness, fname_root, i_or_c='c', isPV=False, **kwargs):
if thickness:
self.d = thickness
else:
self.d = None
self.i_or_c = i_or_c
#self.nk = 1.0
if fname_root:
self.datasource = fname_root
if kwargs.get('onecol'):
print('dumb data')
self.get_dumb_data()
else:
self.get_sensible_data()
else:
self.datasource = None
self.isPV = isPV
self.abs = 0
def get_dumb_data(self):
matfilename = 'Data/Materials/' + self.datasource + '.csv'
lct = 0
bothdat = []
with open(matfilename, newline='') as csvfile:
rawdat = csv.reader(csvfile, delimiter=' ')
for row in rawdat:
lct += 1
if row:
bothdat.append(row[0])
if 'k' in row[0]:
kstart = lct
lct = 1
nlams = []
ns = []
klams = []
ks = []
for line in bothdat:
if lct < kstart-1:
if 'n' not in line:
nlams.append(float(line.split(',')[0]))
ns.append(float(line.split(',')[1]))
else:
if 'k' not in line:
#print(line)
klams.append(float(line.split(',')[0]))
ks.append(float(line.split(',')[1]))
lct += 1
nlams = np.array(nlams)
ns = np.array(ns)
#print(nlams)
klams = np.array(klams)
ks = np.array(ks)
#print(klams)
self.n = interp1d(nlams,ns,fill_value="extrapolate")
self.k = interp1d(klams,ks,fill_value="extrapolate")
def get_sensible_data(self):
"""
next we will unpack n and k data from a csv file and turn it into a callable interpolation function
"""
matfilename = pathtodat + '/Data/Materials/' + self.datasource# + '.csv'
#matfilename = './Data/Materials/' + self.datasource# + '.csv'
testdat = np.genfromtxt(matfilename,delimiter=',',skip_header=1)
nlams = testdat[:,0]
ns = testdat[:,1]
ks = testdat[:,2]
self.n = interp1d(nlams,ns,fill_value="extrapolate")
self.k = interp1d(nlams,ks,fill_value="extrapolate")
def nk(self,lam):
return complex(self.n(lam),self.k(lam))
def plotnk(self,lams):
plt.figure()
plt.plot(lams, self.n(lams),label='n')
plt.plot(lams, self.k(lams),label='k')
plt.title(self.datasource)
plt.legend()
plt.show()
def self_summary(self):
print(' Material: ' + str(self.datasource) )
print(' Thickness: ' + str(self.d) )
print(' PV?: ' + str(self.isPV))
class Stack:
"""
I organize layers, interface with tmm,
and calculate interesting things like color, VLT, etc.
"""
def __init__(self, layers,**kwargs):
self.layers = layers
#import data from NIST solar spectrum
alldata = pd.read_excel(pathtodat + '/Data/Spectra/ASTMG173.xls',header=1)
Intensities = np.array(alldata['Direct+circumsolar W*m-2*nm-1'])
wavelengths = np.array(alldata['Wvlgth nm'].values)
self.Is = interp1d(wavelengths/1000.,Intensities*1000)
ciedata = pd.read_csv(pathtodat + '/Data/Spectra/CIEPhotopicLuminosity.csv',names=['lams','phis'])
self.cieplf = interp1d(np.array(ciedata['lams'])/1000.,np.array(ciedata['phis']),bounds_error=False,fill_value=(0.0,0.0))
def get_visible_light_transmission(self,lams,inc_angle):
numerator = trapz(self.Is(lams)*self.cieplf(lams)*self.get_specular_RAT(lams,inc_angle)[2],lams)
denominator = trapz(self.Is(lams)*self.cieplf(lams),lams)
VLT = numerator/denominator
#print(type(Asol.mean))
return VLT
def get_specular_RAT(self,lams,inc_angle):
Ts = []
Rs = []
for lam in lams:
thicks = [tmm.inf]
iorcs = ['i']
nks = [1]
for layer in self.layers:
nks.append(layer.nk(lam))
thicks.append(layer.d)
iorcs.append(layer.i_or_c)
thicks.append(tmm.inf)
iorcs.append('i')
nks.append(1)
front_spol = tmm.inc_tmm('s',nks,thicks,iorcs,inc_angle,lam)
front_ppol = tmm.inc_tmm('p',nks,thicks,iorcs,inc_angle,lam)
R = (front_spol['R']+front_ppol['R']) / 2.
Rs.append(R)
T = (front_spol['T']+front_ppol['T']) / 2.
Ts.append(T)
Rs = np.array(Rs)
Ts = np.array(Ts)
As = 1.-Rs-Ts
return [Rs,As,Ts]
def reverse(self):
flippedstack = Stack(self.layers[::-1])
return flippedstack
def get_specular_PV_abs(self, lams, inc_angle):
'''
note from Byrnes:
Assumes the final layer eventually absorbs all transmitted light.
Assumes the initial layer eventually absorbs all reflected light.
'''
thicks = [inf]
iorcs = ['i']
lnum = 0
pvlayer = 0
pvs = []
for layer in self.layers:
thicks.append(layer.d)
iorcs.append(layer.i_or_c)
pvs.append(layer.isPV)
if layer.isPV:
pvlayer = lnum+1 #+1 because of how tmm is written: always a layer above and below stack
lnum += 1
#print('pvlayer: ' + str(pvlayer))
#print('lnum: ' +str(lnum))
#print(any(pvs))
#print(np.invert(any(pvs)))
if np.invert(any(pvs)):
#print('no PV')
return np.zeros(np.shape(lams))
thicks.append(inf)
iorcs.append('i')
thicks_bw = thicks[::-1]
iorcs_bw = iorcs[::-1]
pvabs = []
for lam in lams:
nks = [1]
for layer in self.layers:
nks.append(layer.nk(lam))
nks.append(1)
#note the plus one because of the assumed before and after layers
front_spol = tmm.inc_tmm('s',nks,thicks,iorcs,inc_angle,lam)
front_ppol = tmm.inc_tmm('p',nks,thicks,iorcs,inc_angle,lam)
pvabs_s = tmm.inc_absorp_in_each_layer(front_spol)[pvlayer]
pvabs_p = tmm.inc_absorp_in_each_layer(front_ppol)[pvlayer]
pvabs.append( (pvabs_s + pvabs_p) / 2. )
#print(allabs)
return pvabs
def get_transmitted_color(self,lams,inc_angle):
[Rs,As,Ts] = self.get_specular_RAT(lams,inc_angle)
nmlams = lams*1000
# get spectral distribution
df_spec = pd.Series(data=Ts,index=nmlams)
sd_spec = SpectralDistribution(df_spec)
# get distribution of illuminant (sun!)
df_ill = pd.Series(data=self.Is(lams),index=nmlams)
sd_ill = SpectralDistribution(df_ill)
# integrate spectrum to get CIE XYZ
XYZ = sd_to_XYZ_integration(sd_spec,illuminant=sd_ill)
# get sRGB from XYZ (note: not RGB, see https://en.wikipedia.org/wiki/SRGB)
#https://colour.readthedocs.io/en/develop/tutorial.html?highlight=colourswatch#convert-to-display-colours
#see above for why 100 is below
sRGB = XYZ_to_sRGB(XYZ/100.)
# remove gamma transfer function to get chromatricity by scaling
# see https://en.wikipedia.org/wiki/SRGB#The_forward_transformation_.28CIE_xyY_or_CIE_XYZ_to_sRGB.29
RGB_degamma = cctf_decoding(np.clip(sRGB,0,1),'GAMMA 2.2')
# scale the untransformed RGB to get chromatricity
RGB_scale = RGB_degamma/np.max(RGB_degamma)
# get the chromatricity for display by getting re-transforming
RGB_chrom = cctf_encoding(RGB_scale,'GAMMA 2.2')
#remove possible negative RGB values
sRGB = np.clip(sRGB,0,1)
# check scaling without tranforming could lead to a chromatricity change
# check if this happens
RGB_badchrom = sRGB/np.max(sRGB)
HEX_color = RGB_to_HEX(sRGB)
HEX_chrom = RGB_to_HEX(RGB_chrom)
'''
plot_multi_colour_swatches( [ColourSwatch(sRGB,'sRGB'),
ColourSwatch(RGB_chrom,'chromatricity'),
ColourSwatch(RGB_badchrom,'~chromatricity')],
text_kwargs={'size': 'x-large'})
'''
return {'color':HEX_color,'chromaticity':HEX_chrom}
def update_from_dash(self,dashdata):
ct = 0
layers = []
for entry in dashdata:
if entry['Thickness [μm]']:
if float(entry['Thickness [μm]'])>100:
ic = 'i'
else:
ic = 'c'
layer = Layer(entry['Thickness [μm]'],
entry['Material'],
i_or_c=ic,
isPV=entry['PV'])
layers.append(layer)
self.layers = layers
return False
def get_performance_characteristics(stack,eta,Ti,To,Ui,Uo,Rs,Rsh,AbsorberLayer,Angle):
layers = stack.layers
spectra = Spectra(layers ,AbsorberLayer,Angle)
AbsByAbsorbers = spectra['AbsByAbsorbers']
Ts = spectra['Ts']
Rfs = spectra['Rfs']
Rbs = spectra['Rbs']
As = spectra['As']
sanities = spectra['Total']
Absorbed = GiveEInterp(AbsByAbsorbers)
VLTcalc = stack.get_visible_light_transmission(lams,Angle) #cvs.getVLT(Ts,lams)#VLT(layers)
Tcell = TcellCalc(As,eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh)
#Absorbed = tpc.GiveEInterp(tpc.Spectra(tpc.GiveLayers(Thickness, Materials),4)['AbsByAbsorbers'])
data = GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
SHGCcalc = SHGC(Ts, Ti, To, Tcell, Ui)
PCE = max_efficiency(eta,Absorbed,Tcell, Rs, Rsh)
#print('PCE = ',PCE,'VLT = ', VLTcalc, 'SHGC = ',SHGCcalc, 'Tcell = ',Tcell)#,'time to calculate PCE from scratch in seconds = ', TimePCE, 'Time to run optimizer in minutes = ',TimeOptimize/60)
return {'PCE':PCE, 'VLT':VLTcalc, 'SHGC':SHGCcalc, 'Tcell':Tcell,'Isc':Isc, 'Voc': Voc, 'Imp': Imp, 'Vmp': Vmp,'Pmp': Pmp}
def self_summary(self):
print('=======================================')
print('I am a stack with the following layers:')
print('=======================================')
ct = 0
for layer in self.layers:
ct+=1
print(' Layer ' + str(ct))
layer.self_summary()
print('')
'''
plt.figure()
plt.plot(wavelengths/1000,self.cieplf(wavelengths/1000))
plt.show()
'''
'''
def get_solar_weighted_absorption(self,lamrange,inc_angle):
integ = vegas.Integrator([lamrange])
Asol = integ(lambda lam: self.Is(lam)*self.get_RAT(lam,inc_angle)[1], nitn=10, neval=100)[0]
Asol /= integ(self.Is, nitn=10, neval=1000)[0]
#print(type(Asol.mean))
return Asol.mean
def get_visible_light_transmission_OLD(self,lamrange,inc_angle):
integ = vegas.Integrator([lamrange])
numerator = integ(lambda lam: self.Is(lam)*self.cieplf(lam)*self.get_RAT(lam,inc_angle)[2], nitn=10, neval=150)[0]
denominator = integ(lambda lam: self.Is(lam)*self.cieplf(lam), nitn=10, neval=150)[0]
VLT = numerator/denominator
#print(type(Asol.mean))
return VLT.mean
'''
# STUFF FROM ADAM
## wierd stuff to fix
'''Gives a spectrum of VLT. Used for plotting'''
def VLTSpectrum(layers):
return Stack(layers)
## other stuff on color
## stuff on PCE
# ******************** Here I add PCE calculation *********************#
'''This stuff imports a spreadsheet of the solar spectrum'''
worksheet = pd.read_excel(pathtodat + '/Data/Spectra/ASTMG173.xls')#('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')
downloaded_array = array(worksheet)
# Wavelength is in column 0, AM1.5G data is column 2
AM15 = downloaded_array[1:, [0,2]]
# The first line should be 280.0 , 4.7309E-23
# The last line should be 4000.0, 7.1043E-03
# print(AM15)
# Interpolate to get a continuous function which I will be able to do integrals on:
'''Interpolated solar spectrum
when using, inputs must be within 300-2500 nm'''
AM15interp = interp1d(AM15[:,0]/1000, AM15[:,1])
# Here’s the plot, it looks correct:
'''Plot of the solar spectrum for verification'''
'''
y_values = np.array([AM15interp(x) for x in lams])
figure()
plot(lams , y_values)
xlabel("Wavelength (nm)")
ylabel("Spectral intensity (W/m$^2$/nm)")
title("Light from the sun");
show()
'''
'''I convert wavelength to energy. E_min and max are used for integration limits '''
Ephoton = hPlanck * c0 / lams *1e6 #J
E_min = min(Ephoton) #J energy units from hPlanck
E_max = max(Ephoton) #J energy units from hPlanck
'''I give the number of photons per......'''
def SPhotonsPerTEA(Ephoton):
λ = hPlanck * c0 / Ephoton *1e6 #um
return AM15interp(λ) * (1 / Ephoton) * (hPlanck * c0 / Ephoton**2) * 1e9
'''I give the power for each......'''
def PowerPerTEA(Ephoton):
return Ephoton * SPhotonsPerTEA(Ephoton)
'''I give the solar constant which is the W/m*2 emitted by the sun. Should be ~1000'''
def Solar_Constant(Ephoton):
#PowerPerTEA = lambda E : E * SPhotonsPerTEA(E)
return quad(PowerPerTEA,E_min,E_max, full_output=1)[0]
# quad() is ordinary integration; full_output=1 is (surprisingly) how you hide
# the messages warning about poor accuracy in integrating.
'''This is the solar constant value. It is called by optimization and used in a variety of functions here
Should always be ~1000'''
solar_constant = Solar_Constant(Ephoton)
'''I return an interpolated function of a spectrum relative to photon wavelength. Used for plotting'''
def GivelamsInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(lams, Curve)
'''I return an interpolated function of a spectrum relative to photon energy'''
def GiveEInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(Ephoton, Curve)
'''I give Q based on a given spectrum. Units are W/m^2
Input is a spectrum interpolated with respect to energy, E
eta should only be used if looking at a PV layer. Otherwise it is set to 1'''
def GiveQ(Spectra, eta = 1):#Spectra must be an interpolated function
def integrand(E):
return eta * Spectra(E) * PowerPerTEA(E)
return quad(integrand, E_min, E_max, full_output=1)[0]
'''
#trapz calcs
def GiveQ(Spectra, eta = 1):#Spectra must be an array
integrand = eta*Spectra*PowerPerTEA(Ephoton)
return -np.trapz(integrand, Ephoton)
'''
'''
def GivePhotons(Spectra, eta):#Spectra must be an interpolated function
def integrand(E):
return eta * Spectra(E) * SPhotonsPerTEA(E)
return quad(integrand, E_min, E_max)[0]
'''
# Here I input the spectrum of photons absorbed by the absorber material (Absorbed)
# and the electron-hole pair extraction efficiency (eta). EQE = eta * Absorbed
'''I give the rate of recombination for the solar cell, Units are photons/(s*m**2)'''
def RR0(eta,Absorbed,Tcell):
integrand = lambda E : eta * Absorbed(E) * (E)**2 / (exp(E / (kB * Tcell)) - 1)
integral = quad(integrand, E_min, E_max, full_output=1)[0]
return ((2 * pi) / (c0**2 * hPlanck**3)) * integral# / 1.60218e-19 #J/eV
#units = photons/(s*m**2)
'''I give the amount of energy converted to electricity in terms of photons, units are photons(s/m**2)'''
def Generated(eta,Absorbed):
integrand = lambda E : eta * Absorbed(E) * SPhotonsPerTEA(E)
# integral = quad(integrand, E_min, E_max, full_output=1)[0]
return quad(integrand, E_min, E_max, full_output=1)[0]
#units photons/(s*m**2)
'''
#Using trapezoidal rule for integration instaed of quad
#AbsByAbsorbers is an aray of intensities, not an interpolated function.
def RR0(eta,Absorbed,Tcell):
AbsByAbsorbers = AbsByAbsorbers.round(8)
integrand = eta * AbsByAbsorbers * (Ephoton)**2 / (np.exp(Ephoton / (kB * Tcell)) - 1)
integral = trapz(integrand, Ephoton)
return ((2 * np.pi) / (c0**2 * hPlanck**3)) * integral
def Generated(eta,Absorbed):
Absorbed = Absorbed.round(8)
integrand = eta * Absorbed * SPhotonsPerTEA(Ephoton)
# integral = quad(integrand, E_min, E_max, full_output=1)[0]
return np.trapz(integrand, Ephoton)
'''
'''I use the single diode equation to return the max power of the cell in watts
Check PVlib documentation for details'''
def Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed,Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
return data['p_mp']
'''I calculate equilibrium tmperature of the cell assuming the cell is infinitely thin
TotalAbs is the full absorptance of the stack as an array of intensities, uninterpolated.
Absorbed is PV layer absorptance interpolated
Temperature calculation is implicit so the numerical solver fsolve is used.
This equation is derived from Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows'''
def TcellCalc(TotalAbs, eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh):
AbsTotal = GiveEInterp(TotalAbs)
Qabs = GiveQ(AbsTotal)
Temp = lambda Tcell: (Qabs - Give_Pmp(eta,Absorbed,Rs,Rsh, Tcell) + Ui*Ti + Uo*To)/(Ui + Uo)-Tcell
return fsolve(Temp, 300)[0]
'''I use the single diode equation to produce an IV curve and power plot
I also return related values such as Voc, Isc, and Pmp in units volts, amps, and watts
See pvlib singlediode equation for more information'''
def GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed, Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
Vvalues = array(data['v'])
Ivalues = array(data['i'])
#print('Isc = ', Isc, ', Voc = ', Voc, ', Imp = ', Imp, ', Vmp = ', Vmp, ', Pmp =', Pmp)
figure()
plot(Vvalues,Ivalues, label = 'IV')
xlabel('Voltage, (V)')
ylabel('Current (A) or Power (W/m^2)')
ylabel('Power (W/m^2)')
P_values = array([Ivalues * Vvalues])
plot(Vvalues , P_values.T, label = 'Power')
ylim(-1, 150)
legend(loc = 'upper right')
show()
return data
'''I give the solar heat gain coefficient. unitless numebr between 0 and 1
Ts is the transmission spectra. Must be a list of intensities, not an interpolated function
This equation comes form a combination of Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows
and equation 3.18 from Fundamentals of Heat and Mass Transfer 6ed Incropera'''
def SHGC(Ts, Ti, To, Tcell, Ui):
#Tcell = TcellCalc(As,Ti,To,eta,Absorbed)
Rtot = 1/Ui #This is approximate because Ui is assumed
#Included in GiveQ for simplicity but should not be used for calculating SHGC
TransTotal = GiveEInterp(Ts)
Qtrans = GiveQ(TransTotal,1)
return (Qtrans + Ui*(Tcell-Ti) - ((To-Ti)/Rtot))/solar_constant
'''I give max efficiency also called PCE'''
'''Absorbed must be an interpolated function of the absorption spectrum of the PV layer'''
def max_efficiency(eta,Absorbed,Tcell, Rs, Rsh):
#Tcell = TcellCalc(As,Ti,To,eta,Absorbed)
return Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell) / solar_constant
'''We determine the incident angle of the sun shining on the cell. Input is in degrees'''
def giveincangle(angle):
degree = pi/180
return angle*degree
'''I assemble a list of layer objects using Thicknesses and Materials'''
def GiveLayers(Thickness,Materials):
x = len(Materials)
if x == len(Thickness):
Layers = []
for i in range(x):
Layers.append(Materials[i](Thickness[i]))
return Layers
else:
raise ValueError ('layers and Thickness lengths do not match')
'''I give important info about a solar cell such as PCE, SHGC, Temperature, etc'''
def GiveImportantInfo(Thickness, Materials,eta,Ti,To,Ui,Uo,Rs,Rsh,AbsorberLayer,Angle=0):
global inc_angle
inc_angle = giveincangle(Angle)
layers = GiveLayers(Thickness,Materials)
spectra = Spectra(layers ,AbsorberLayer)
AbsByAbsorbers = spectra['AbsByAbsorbers']
Ts = spectra['Ts']
Rfs = spectra['Rfs']
Rbs = spectra['Rbs']
As = spectra['As']
sanities = spectra['Total']
Absorbed = GiveEInterp(AbsByAbsorbers)
VLTcalc = cvs.getVLT(Ts,lams)#VLT(layers)
Tcell = TcellCalc(As,eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh)
#Absorbed = tpc.GiveEInterp(tpc.Spectra(tpc.GiveLayers(Thickness, Materials),4)['AbsByAbsorbers'])
data = GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
SHGCcalc = SHGC(Ts, Ti, To, Tcell, Ui)
PCE = max_efficiency(eta,Absorbed,Tcell, Rs, Rsh)
#Spectral Curves
figure()
plot(lams,Rfs,color='magenta',marker=None,label="$R_f$")
plot(lams,Ts,color='green',marker=None,label="$T$")
plot(lams,Rbs,color='purple',marker=None,label="$R_b$")
plot(lams,As,color='black',marker=None,label="A")
plot(lams,AbsByAbsorbers,color='black',linestyle='--',marker=None,label="AbsByAbsorber")
plot(lams,sanities,color='gold',marker=None,label="R+A+T")
plot(lams,VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label="photopic")
xlabel('wavelength, $\mu$m')
ylabel('Intensity')
legend(loc = 'upper right')
show()
EphotoneV = Ephoton*6.241509e+18
figure()
plot(EphotoneV, Ts, color='magenta',marker=None,label="$T$")
plot(EphotoneV, Rfs,color='green',marker=None,label="$R_f$")
plot(EphotoneV, Rbs,color='orange',marker=None,label="$R_b$")
plot(EphotoneV, AbsByAbsorbers,color='black',marker=None,label="Abs")
#plot(Ephoton,tpc.VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label="photopic")
legend(loc = 'upper right')
xlabel('Energy, eV')
ylabel('Intensity')
show()
GiveColorSwatch(Ts, Rfs)
plot_xy_on_fin(Ts, Rfs)
print('PCE = ',PCE,'VLT = ', VLTcalc, 'SHGC = ',SHGCcalc, 'Tcell = ',Tcell)#,'time to calculate PCE from scratch in seconds = ', TimePCE, 'Time to run optimizer in minutes = ',TimeOptimize/60)
return {'PCE':PCE, 'VLT':VLTcalc, 'SHGC':SHGCcalc, 'Tcell':Tcell,'Isc':Isc, 'Voc': Voc, 'Imp': Imp, 'Vmp': Vmp,'Pmp': Pmp}
# Vince hacks some stuff from Adam
'''
This needs work. To do:
1) It should be a method within stack.
2) Figure out a better way to do the interpolated curve for spectrum as a function of photon energy
3) Important: fix T_cell calculation. I don't trust it.
'''
def get_performance_characteristics(stack,Ti,To,Ui,Uo,Rs,Rsh,Angle):
layers = stack.layers
eta = 1
[Refs,As,Ts] = stack.get_specular_RAT(lams,Angle)
Refs=np.array(Refs)
As=np.array(As)
Ts=np.array(Ts)
#spectra = Spectra(layers ,AbsorberLayer,Angle)
#AbsByAbsorbers = spectra['AbsByAbsorbers']
AbsByAbsorbers = stack.get_specular_PV_abs(lams, Angle)
AbsByAbsorbers = np.array(AbsByAbsorbers)
Absorbed = GiveEInterp(AbsByAbsorbers)
#Tcell = TcellCalc(As,eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh)
AbsTotal = GiveEInterp(As)
Qabs = GiveQ(AbsTotal)
def tsolve(Tcell):
return (Qabs - Give_Pmp(eta,Absorbed,Rs,Rsh, Tcell) + Ui*Ti + Uo*To)/(Ui + Uo)-Tcell
Tcell= fsolve(tsolve, 300)[0]
#print(Tcell)
# I don't trust the followinc calculation of the SHGC at all. There is no way it is not a function of Uo
SHGCcalc = SHGC(Ts, Ti, To, Tcell, Ui)
PCE = max_efficiency(eta,Absorbed,Tcell, Rs, Rsh)
#print('PCE = ',PCE,'VLT = ', VLTcalc, 'SHGC = ',SHGCcalc, 'Tcell = ',Tcell)#,'time to calculate PCE from scratch in seconds = ', TimePCE, 'Time to run optimizer in minutes = ',TimeOptimize/60)
return {'PCE':PCE,'SHGC':SHGCcalc,'Tcell':Tcell}
def get_performance_characteristics_old(stack,eta,Ti,To,Ui,Uo,Rs,Rsh,AbsorberLayer,Angle):
layers = stack.layers
[Rs,As,Ts] = stack.get_specular_RAT(lams,Angle)
Rs=np.array(Rs)
As=np.array(As)
Ts=np.array(Ts)
#spectra = Spectra(layers ,AbsorberLayer,Angle)
#AbsByAbsorbers = spectra['AbsByAbsorbers']
AbsByAbsorbers = stack.get_specular_PV_abs(lams, Angle)
#spectra = Spectra(layers ,AbsorberLayer,Angle)
#AbsByAbsorbers = spectra['AbsByAbsorbers']
#Ts = spectra['Ts']
#Rfs = spectra['Rfs']
#Rbs = spectra['Rbs']
#As = spectra['As']
#sanities = spectra['Total']
Absorbed = GiveEInterp(
|
np.array(AbsByAbsorbers)
|
numpy.array
|
"""Student's t-distribution Fitting methods"""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from scipy.special import gammaln, digamma, polygamma
from scipy.optimize import newton
from sklearn.utils.extmath import row_norms
from sklearn.utils import check_array
import warnings
###############################################################################
# Functions to be used by the MultivariateTFit class
def _check_X(X, n_features=None, ensure_min_samples=1):
"""Check the input data X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array, shape (n_samples, n_features)
"""
X = check_array(X, dtype=[np.float64, np.float32],
ensure_min_samples=ensure_min_samples)
if n_features is not None and X.shape[1] != n_features:
raise ValueError("Expected the input data X have %d features, "
"but got %d features"
% (n_features, X.shape[1]))
return X
def _check_shape(param, param_shape, name):
"""Validate the shape of the input parameter 'param'.
Parameters
----------
param : array
param_shape : tuple
name : string
"""
param =
|
np.array(param)
|
numpy.array
|
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
# from sparse_dot_mkl import dot_product_mkl
from scipy.sparse import csr_matrix, vstack, issparse
import numpy as np
import logging
__all__ = ['ClusteringAlgo', 'ClusteringAlgoSparse']
def cosine_distances(x, y, intel_mkl=False):
x_normalized = normalize(x, copy=True)
y_normalized = normalize(y, copy=True)
if intel_mkl:
# s = dot_product_mkl(x_normalized, y_normalized.T.tocsr(), dense=True)
pass
else:
s = (x_normalized * y_normalized.T).toarray()
s *= -1
s += 1
np.clip(s, 0, 2, out=s)
if x is y or y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
s[np.diag_indices_from(s)] = 0.0
return s
class ClusteringAlgo:
def __init__(self, threshold=0.65, window_size=300000, batch_size=8, distance="cosine"):
self.M = None
self.t = threshold
self.w = window_size
self.batch_size = batch_size
self.zeros_vectors = None
self.thread_id = 0
self.distance = distance
def add_vectors(self, vectors):
self.M = vectors
if issparse(vectors):
self.zeros_vectors = vectors.getnnz(1) == 0
else:
self.zeros_vectors = ~vectors.any(axis=1)
def iter_on_matrix(self, ):
if self.distance == "precomputed":
matrix = self.M[~self.zeros_vectors][:, ~self.zeros_vectors]
for idx in range(0, matrix.shape[0], self.batch_size):
lim = min(idx + self.batch_size, matrix.shape[0])
vectors = matrix[idx:lim, max(lim - self.w, 0):lim]
yield idx, vectors
else:
matrix = self.M[~self.zeros_vectors]
for idx in range(0, matrix.shape[0], self.batch_size):
if idx % 10000 == 0:
logging.info(idx)
vectors = matrix[idx:min(idx + self.batch_size, matrix.shape[0])]
yield idx, vectors
def brute_nn(self, data, tweets):
nn = NearestNeighbors(n_neighbors=1, algorithm='brute', metric=self.distance)
if self.distance == "precomputed":
nn.fit(np.zeros((tweets.shape[1], tweets.shape[1])))
else:
nn.fit(data)
distance, neighbor_exact = nn.kneighbors(tweets)
return distance.transpose()[0], neighbor_exact.transpose()[0]
def incremental_clustering(self, ):
if issparse(self.M):
T = csr_matrix((self.w, self.M.shape[1]))
else:
T = np.zeros((self.w, self.M.shape[1]), dtype=self.M.dtype)
threads =
|
np.zeros(self.w, dtype="int")
|
numpy.zeros
|
import copy
import json
import os
import time
import urllib.request
from typing import Any, Dict, List, Tuple, Union
import numpy as np
from scipy.optimize import linprog
global penguin_url, headers
penguin_url = "https://penguin-stats.io/PenguinStats/api/"
headers = {"User-Agent": "ArkPlanner"}
gamedata_langs = ["en_US", "ja_JP", "ko_KR", "zh_CN"]
DEFAULT_LANG = "en_US"
NON_CN_WORLD_NUM = 4
FILTER_FREQ_DEFAULT = 100
class MaterialPlanning(object):
def __init__(
self,
filter_freq=FILTER_FREQ_DEFAULT,
filter_stages=[],
url_stats="result/matrix?show_stage_details=true&show_item_details=true",
url_rules="formula",
path_stats="data/matrix.json",
dont_save_data=False,
path_rules="data/formula.json",
gamedata_path="https://raw.githubusercontent.com/Kengxxiao/ArknightsGameData/"
+ "master/{}/gamedata/excel/item_table.json",
):
"""
Object initialization.
Args:
filter_freq: int or None. The lowest frequency that we consider.
No filter will be applied if None.
url_stats: string. url to the dropping rate stats data.
url_rules: string. url to the composing rules data.
path_stats: string. local path to the dropping rate stats data.
path_rules: string. local path to the composing rules data.
"""
if not dont_save_data:
try:
material_probs, convertion_rules = load_data(path_stats, path_rules)
except FileNotFoundError:
material_probs, convertion_rules = request_data(
penguin_url + url_stats,
penguin_url + url_rules,
path_stats,
path_rules,
gamedata_path,
)
print("done.")
else:
material_probs, convertion_rules = request_data(
penguin_url + url_stats,
penguin_url + url_rules,
path_stats,
path_rules,
gamedata_path,
dont_save_data,
)
self.itemdata = request_itemdata(gamedata_path)
self.itemdata_rv = {
lang: {v: k for k, v in dct.items()} for lang, dct in self.itemdata.items()
}
filtered_probs = []
for dct in material_probs["matrix"]:
if (
dct["stage"]["apCost"] > 0.1
and dct["stage"]["code"] not in filter_stages
):
if not filter_freq or dct["times"] >= filter_freq:
filtered_probs.append(dct)
material_probs["matrix"] = filtered_probs
self._set_lp_parameters(*self._pre_processing(material_probs, convertion_rules))
def _pre_processing(self, material_probs, convertion_rules):
"""
Compute costs, convertion rules and items probabilities from requested dictionaries.
Args:
material_probs: List of dictionaries recording the dropping info per stage per item.
Keys of instances: ["itemID", "times", "itemName", "quantity", "apCost", "stageCode", "stageID"].
convertion_rules: List of dictionaries recording the rules of composing.
Keys of instances: ["id", "name", "level", "source", "madeof"].
"""
# To count items and stages.
additional_items = {"30135": u"D32钢", "30125": u"双极纳米片", "30115": u"聚合剂"}
exp_unit = 200 * (30.0 - 0.048 * 30) / 7400
gold_unit = 0.004
exp_worths = {
"2001": exp_unit,
"2002": exp_unit * 2,
"2003": exp_unit * 5,
"2004": exp_unit * 10,
"3003": exp_unit * 2,
}
gold_worths = {}
item_dct = {}
stage_dct = {}
for dct in material_probs["matrix"]:
item_dct[dct["item"]["itemId"]] = dct["item"]["name"]
stage_dct[dct["stage"]["code"]] = dct["stage"]["code"]
item_dct.update(additional_items)
# To construct mapping from id to item names.
item_array = []
item_id_array = []
for k, v in item_dct.items():
try:
float(k)
item_array.append(v)
item_id_array.append(k)
except ValueError:
pass
self.item_array = np.array(item_array)
self.item_id_array = np.array(item_id_array)
self.item_id_rv = {int(v): k for k, v in enumerate(item_id_array)}
self.item_dct_rv = {v: k for k, v in enumerate(item_array)}
# To construct mapping from stage id to stage names and vice versa.
stage_array = []
for k, v in stage_dct.items():
stage_array.append(v)
self.stage_array = np.array(stage_array)
self.stage_dct_rv = {v: k for k, v in enumerate(self.stage_array)}
# To format dropping records into sparse probability matrix
probs_matrix = np.zeros([len(stage_array), len(item_array)])
cost_lst = np.zeros(len(stage_array))
cost_exp_offset = np.zeros(len(stage_array))
cost_gold_offset = np.zeros(len(stage_array))
for dct in material_probs["matrix"]:
try:
cost_lst[self.stage_dct_rv[dct["stage"]["code"]]] = dct["stage"][
"apCost"
]
float(dct["item"]["itemId"])
probs_matrix[
self.stage_dct_rv[dct["stage"]["code"]],
self.item_dct_rv[dct["item"]["name"]],
] = dct["quantity"] / float(dct["times"])
if cost_lst[self.stage_dct_rv[dct["stage"]["code"]]] != 0:
cost_gold_offset[self.stage_dct_rv[dct["stage"]["code"]]] = -dct[
"stage"
]["apCost"] * (12 * gold_unit)
except ValueError:
pass
try:
cost_exp_offset[self.stage_dct_rv[dct["stage"]["code"]]] -= (
exp_worths[dct["item"]["itemId"]]
* dct["quantity"]
/ float(dct["times"])
)
except (KeyError, ValueError):
pass
try:
cost_gold_offset[self.stage_dct_rv[dct["stage"]["code"]]] -= (
gold_worths[dct["item"]["itemId"]]
* dct["quantity"]
/ float(dct["times"])
)
except (KeyError, ValueError):
pass
# Hardcoding: extra gold farmed.
cost_gold_offset[self.stage_dct_rv["S4-6"]] -= 3228 * gold_unit
cost_gold_offset[self.stage_dct_rv["S5-2"]] -= 2484 * gold_unit
# To build equivalence relationship from convert_rule_dct.
self.convertions_dct = {}
convertion_matrix = []
convertion_outc_matrix = []
convertion_cost_lst = []
for rule in convertion_rules:
convertion = np.zeros(len(self.item_array))
convertion[self.item_dct_rv[rule["name"]]] = 1
comp_dct = {comp["id"]: comp["count"] for comp in rule["costs"]}
self.convertions_dct[rule["id"]] = comp_dct
for item_id in comp_dct:
convertion[self.item_id_rv[int(item_id)]] -= comp_dct[item_id]
convertion_matrix.append(copy.deepcopy(convertion))
outc_dct = {outc["name"]: outc["count"] for outc in rule["extraOutcome"]}
outc_wgh = {outc["name"]: outc["weight"] for outc in rule["extraOutcome"]}
weight_sum = float(sum(outc_wgh.values()))
for item_id in outc_dct:
convertion[self.item_dct_rv[item_id]] += (
outc_dct[item_id] * 0.175 * outc_wgh[item_id] / weight_sum
)
convertion_outc_matrix.append(convertion)
convertion_cost_lst.append(rule["goldCost"] * 0.004)
convertions_group = (
np.array(convertion_matrix),
np.array(convertion_outc_matrix),
np.array(convertion_cost_lst),
)
farms_group = (probs_matrix, cost_lst, cost_exp_offset, cost_gold_offset)
return convertions_group, farms_group
def _set_lp_parameters(self, convertions_group, farms_group):
"""
Object initialization.
Args:
convertion_matrix: matrix of shape [n_rules, n_items].
Each row represent a rule.
convertion_cost_lst: list. Cost in equal value to the currency spent in convertion.
probs_matrix: sparse matrix of shape [n_stages, n_items].
Items per clear (probabilities) at each stage.
cost_lst: list. Costs per clear at each stage.
"""
(
self.convertion_matrix,
self.convertion_outc_matrix,
self.convertion_cost_lst,
) = convertions_group
(
self.probs_matrix,
self.cost_lst,
self.cost_exp_offset,
self.cost_gold_offset,
) = farms_group
assert len(self.probs_matrix) == len(self.cost_lst)
assert len(self.convertion_matrix) == len(self.convertion_cost_lst)
assert self.probs_matrix.shape[1] == self.convertion_matrix.shape[1]
def update(
self,
filter_freq=FILTER_FREQ_DEFAULT,
filter_stages=None,
url_stats="result/matrix?show_stage_details=true&show_item_details=true",
url_rules="formula",
path_stats="data/matrix.json",
path_rules="data/formula.json",
gamedata_path="https://raw.githubusercontent.com/Kengxxiao/ArknightsGameData/master/{}/gamedata/excel/item_table.json",
dont_save_data=False,
):
"""
To update parameters when probabilities change or new items added.
Args:
url_stats: string. url to the dropping rate stats data.
url_rules: string. url to the composing rules data.
path_stats: string. local path to the dropping rate stats data.
path_rules: string. local path to the composing rules data.
"""
material_probs, convertion_rules = request_data(
penguin_url + url_stats,
penguin_url + url_rules,
path_stats,
path_rules,
gamedata_path,
dont_save_data,
)
self.itemdata = request_itemdata(gamedata_path)
self.itemdata_rv = {
lang: {v: k for k, v in dct.items()} for lang, dct in self.itemdata.items()
}
if filter_freq:
if filter_stages is None:
filter_stages = []
filtered_probs = []
for dct in material_probs["matrix"]:
if (
dct["times"] >= filter_freq
and dct["stage"]["code"] not in filter_stages
):
filtered_probs.append(dct)
material_probs["matrix"] = filtered_probs
self._set_lp_parameters(*self._pre_processing(material_probs, convertion_rules))
def _get_plan_no_prioties(
self, demand_lst, outcome=False, gold_demand=True, exp_demand=True
):
"""
To solve linear programming problem without prioties.
Args:
demand_lst: list of materials demand. Should include all items (zero if not required).
Returns:
strategy: list of required clear times for each stage.
fun: estimated total cost.
"""
A_ub = (
np.vstack([self.probs_matrix, self.convertion_outc_matrix])
if outcome
else np.vstack([self.probs_matrix, self.convertion_matrix])
).T
farm_cost = (
self.cost_lst
+ (self.cost_exp_offset if exp_demand else 0)
+ (self.cost_gold_offset if gold_demand else 0)
)
convertion_cost_lst = (
self.convertion_cost_lst
if gold_demand
else
|
np.zeros(self.convertion_cost_lst.shape)
|
numpy.zeros
|
import numpy as np
from Materials import CarbonFibre, SiC, AL, AG,\
ElecMix, ElecMixDense, AlHoney1,\
AlHoney2, AlHoney3, PB, TA, SN, CU
from shield_structure import Shield_Interactions, Sun_Shield_Interactions
from Polygons import Polygon2D
class Swift_Structure(object):
def __init__(self, Polygon, Material, Name=''):
self.Polygon = Polygon
self.Material = Material
self.Name = Name
def set_energy_arr(self, energy):
self.energy = energy
self.Ne = len(energy)
self.tot_rho_mus = self.Material.get_tot_rhomu(self.energy)
self.comp_rho_mus = self.Material.get_comp_rhomu(self.energy)
self.photoe_rho_mus = self.Material.get_photoe_rhomu(self.energy)
if hasattr(self, 'dists'):
self.calc_tot_rhomu_dist()
def set_batxyzs(self, batxs, batys, batzs):
self.batxs = batxs
self.batys = batys
self.batzs = batzs
self.ndets = len(batxs)
def set_theta_phi(self, theta, phi):
self.theta = theta
self.phi = phi
self.dists = self.Polygon.calc_intersection_dist(self.theta, self.phi,\
self.batxs, self.batys,\
self.batzs)
if hasattr(self, 'energy'):
self.calc_tot_rhomu_dist()
def get_dists(self, theta=None, phi=None):
if (np.abs(theta - self.theta) > 1e-3) or (np.abs(phi - self.phi) > 1e-3):
self.set_theta_phi(theta, phi)
return self.dists
def calc_tot_rhomu_dist(self):
self.tot_rhomu_dists = self.dists[:,np.newaxis]*self.tot_rho_mus
def get_trans(self, dist=None):
if dist is None:
dist = self.dists
# trans = np.exp(-dist*self.tot_rho_mus)
trans = np.exp(-self.tot_rhomu_dists)
return trans
def get_tot_rhomu_dist(self):
return self.tot_rhomu_dists
class Swift_Structure_Compound(object):
def __init__(self, ParentPolygon, ChildPolygons, Materials, Name=''):
self.Nchild = len(ChildPolygons)
self.Parent_Polygon = ParentPolygon
self.Child_Polygons = ChildPolygons
self.material_list = Materials
self.Name = Name
def set_energy_arr(self, energy):
self.energy = energy
self.Ne = len(energy)
self.tot_rho_mus_list = []
self.comp_rho_mus_list = []
self.photoe_rho_mus_list = []
for material in self.material_list:
self.tot_rho_mus_list.append(material.get_tot_rhomu(self.energy))
self.comp_rho_mus_list.append(material.get_comp_rhomu(self.energy))
self.photoe_rho_mus_list.append(material.get_photoe_rhomu(self.energy))
if hasattr(self, 'parent_dist'):
self.calc_tot_rhomu_dist()
def set_batxyzs(self, batxs, batys, batzs):
self.batxs = batxs
self.batys = batys
self.batzs = batzs
self.ndets = len(batxs)
def set_theta_phi(self, theta, phi):
self.theta = theta
self.phi = phi
self.calc_dists()
if hasattr(self, 'energy'):
self.calc_tot_rhomu_dist()
def calc_dists(self):
tot_dist = self.Parent_Polygon.calc_intersection_dist(self.theta, self.phi,\
self.batxs, self.batys,\
self.batzs)
tot_child_dist = 0.0
child_dists = []
for child_poly in self.Child_Polygons:
dist = child_poly.calc_intersection_dist(self.theta, self.phi,\
self.batxs, self.batys,\
self.batzs)
tot_child_dist += dist
child_dists.append(dist)
self.parent_dist = tot_dist - tot_child_dist
self.child_dists = child_dists
# def get_dists(self, theta=None, phi=None):
# if (np.abs(theta - self.theta) > 1e-3) or (np.abs(phi - self.phi) > 1e-3):
# self.set_theta_phi(theta, phi)
# return self.dists
def get_trans(self):
self.trans = np.exp(-self.tot_rhomu_dists)
return self.trans
def calc_tot_rhomu_dist(self):
self.tot_rhomu_dists = self.parent_dist*self.tot_rho_mus_list[0]
for i in range(self.Nchild):
self.tot_rhomu_dists += self.child_dists[i]*self.tot_rho_mus_list[i+1]
def get_tot_rhomu_dist(self):
return self.tot_rhomu_dists
class Swift_Structure_wEmbededPolys(object):
def __init__(self, ParentPolygon, ChildPolygons, Materials, Name=''):
self.Nchild = len(ChildPolygons)
self.Parent_Polygon = ParentPolygon
self.Child_Polygons = ChildPolygons
self.material_list = Materials
self.Name = Name
def set_energy_arr(self, energy):
self.energy = energy
self.Ne = len(energy)
self.tot_rho_mus_list = []
self.comp_rho_mus_list = []
self.photoe_rho_mus_list = []
for material in self.material_list:
self.tot_rho_mus_list.append(material.get_tot_rhomu(self.energy))
self.comp_rho_mus_list.append(material.get_comp_rhomu(self.energy))
self.photoe_rho_mus_list.append(material.get_photoe_rhomu(self.energy))
if hasattr(self, 'parent_dist'):
self.calc_tot_rhomu_dist()
def set_batxyzs(self, batxs, batys, batzs):
self.batxs = batxs
self.batys = batys
self.batzs = batzs
self.ndets = len(batxs)
def set_theta_phi(self, theta, phi):
self.theta = theta
self.phi = phi
self.calc_dists()
if hasattr(self, 'energy'):
self.calc_tot_rhomu_dist()
def calc_dists(self):
tot_dist = self.Parent_Polygon.calc_intersection_dist(self.theta, self.phi,\
self.batxs, self.batys,\
self.batzs)
tot_child_dist = 0.0
child_dists = []
for child_poly in self.Child_Polygons:
dist = child_poly.calc_intersection_dist(self.theta, self.phi,\
self.batxs, self.batys,\
self.batzs)
tot_child_dist += dist
child_dists.append(dist)
self.parent_dist = tot_dist - tot_child_dist
self.child_dists = child_dists
# def get_dists(self, theta=None, phi=None):
# if (np.abs(theta - self.theta) > 1e-3) or (np.abs(phi - self.phi) > 1e-3):
# self.set_theta_phi(theta, phi)
# return self.dists
def get_trans(self):
self.trans = np.exp(-self.tot_rhomu_dists)
return self.trans
def calc_tot_rhomu_dist(self):
self.tot_rhomu_dists = self.parent_dist[:,np.newaxis]*self.tot_rho_mus_list[0]
for i in range(self.Nchild):
self.tot_rhomu_dists += self.child_dists[i][:,np.newaxis]*self.tot_rho_mus_list[i+1]
def get_tot_rhomu_dist(self):
return self.tot_rhomu_dists
class Swift_Structure_Shield(object):
def __init__(self):
self.shield_obj = Shield_Interactions()
self.ds_base = [0.00254*np.array([3, 3, 2, 1]),
0.00254*np.array([8, 7, 6, 2]),
0.00254*np.array([5, 5, 4, 1]),
0.00254*np.array([3, 3, 2, 1])]
self.material_list = [PB, TA, SN, CU]
self.Nmaterials = len(self.material_list)
self.Name = 'Shield'
self.polyIDs2ignore = []
def add_polyID2ignore(self, ID):
self.polyIDs2ignore.append(ID)
def set_energy_arr(self, energy):
self.energy = energy
self.Ne = len(energy)
self.tot_rho_mus_list = []
self.comp_rho_mus_list = []
self.photoe_rho_mus_list = []
for material in self.material_list:
self.tot_rho_mus_list.append(material.get_tot_rhomu(self.energy))
self.comp_rho_mus_list.append(material.get_comp_rhomu(self.energy))
self.photoe_rho_mus_list.append(material.get_photoe_rhomu(self.energy))
if hasattr(self, 'dists'):
self.calc_tot_rhomu_dist()
def set_batxyzs(self, batxs, batys, batzs):
self.batxs = batxs
self.batys = batys
self.batzs = batzs
self.ndets = len(batxs)
def set_theta_phi(self, theta, phi):
self.theta = theta
self.phi = phi
self.angs2norm = self.shield_obj.get_angs2norm(self.theta, self.phi)
self.calc_dists()
if hasattr(self, 'energy'):
self.calc_tot_rhomu_dist()
def calc_dists(self):
self.poly_ids = self.shield_obj.which_poly_it_intersects(self.theta, self.phi,\
self.batxs, self.batys,\
self.batzs,\
polyIDs2ignore=self.polyIDs2ignore)
self.poly_ids2use = np.unique(self.poly_ids)
self.dists = [np.zeros(self.ndets) for i in range(self.Nmaterials)]
for polyid in self.poly_ids2use:
poly_bl = (self.poly_ids==polyid)
if polyid < 0:
for i in range(self.Nmaterials):
self.dists[i][poly_bl] = 0.0
continue
poly = self.shield_obj.get_poly(polyid)
layer = self.shield_obj.shield_layer[polyid]
base_dists = self.ds_base[layer]
cos_ang = np.abs(np.cos(self.angs2norm[polyid]))
for i in range(self.Nmaterials):
self.dists[i][poly_bl] = base_dists[i]/cos_ang
def calc_tot_rhomu_dist(self):
self.tot_rhomu_dists = np.zeros((self.ndets,self.Ne))
for i in range(self.Nmaterials):
self.tot_rhomu_dists += self.dists[i][:,np.newaxis]*self.tot_rho_mus_list[i]
def get_trans(self):
self.trans = np.exp(-self.tot_rhomu_dists)
return self.trans
def get_tot_rhomu_dist(self):
return self.tot_rhomu_dists
class Swift_Structure_Sun_Shield(object):
def __init__(self):
self.shield_obj = Sun_Shield_Interactions()
self.ds_base = 0.0145
self.material = AG
self.Name = 'SunShield'
def set_energy_arr(self, energy):
self.energy = energy
self.Ne = len(energy)
self.tot_rho_mus = self.material.get_tot_rhomu(self.energy)
self.comp_rho_mus = self.material.get_comp_rhomu(self.energy)
self.photoe_rho_mus = self.material.get_photoe_rhomu(self.energy)
if hasattr(self, 'dists'):
self.calc_tot_rhomu_dist()
def set_batxyzs(self, batxs, batys, batzs):
self.batxs = batxs
self.batys = batys
self.batzs = batzs
self.ndets = len(batxs)
def set_theta_phi(self, theta, phi):
self.theta = theta
self.phi = phi
self.angs2norm = self.shield_obj.get_angs2norm(self.theta, self.phi)
self.calc_dists()
if hasattr(self, 'energy'):
self.calc_tot_rhomu_dist()
def calc_dists(self):
self.poly_ids = self.shield_obj.which_poly_it_intersects(self.theta, self.phi,\
self.batxs, self.batys,\
self.batzs)
self.poly_ids2use =
|
np.unique(self.poly_ids)
|
numpy.unique
|
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torchvision.models as torch_models
import matplotlib.pyplot as plt
import numpy as np
import torch
from attacks.geoDict import GeoDict
from attacks.imgDict import ImgDict
import os
# from utils import get_label
# from utils import valid_bounds, clip_image_values
from PIL import Image
from torch.autograd import Variable
from numpy import linalg
import math
import cv2
import os
import sys
import numpy as np
import sys
import collections
import cv2
import numpy as np
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
def plot_img(img_tensor, file_name):
img = np.array(img_tensor[0].cpu().numpy()).transpose(1, 2, 0) * 255.
img = img.astype(np.uint8)
height, width = img.shape[:2]
from PIL import Image
im = Image.fromarray(img)
im.save("imgs/" + file_name + ".png")
class Evolutionary_Geo_Attack(object):
def binary_infinity(self, x_a, x, x_img, y, k, model, targeted, batch_indices, ver_lst, depth_lst):
'''
linf binary search
:param k: the number of binary search iteration
'''
b = x_a.size(0)
l = torch.zeros(b)
u, _ = (x_a - x).reshape(b, -1).abs().max(1)
for _ in range(k):
mid = (l + u) / 2
adv = self.project_infinity(x_a, x, mid)
x_a_adv = self.geoDict.convert_uv_2_ncc(adv)
x_a_img = self.geoDict.project_adv_perturbation(x_img, ver_lst, depth_lst, x_a_adv)
check = self.is_adversarial(x_a_img, y, targeted, batch_indices)
u[check.nonzero().squeeze(1)] = mid[check.nonzero().squeeze(1)]
check = check < 1
l[check.nonzero().squeeze(1)] = mid[check.nonzero().squeeze(1)]
return self.project_infinity(x_a, x, u)
def project_infinity(self, x_a, x, l):
'''
linf projection
'''
return torch.max(x - l[:, None, None, None], torch.min(x_a, x + l[:, None, None, None]))
def __init__(self, model, dict_model, use_geo=True, use_dict=True, dimension_reduction=None,
random_background=False, only_one=False):
self.model = model
self.dict_model = dict_model
self.geoDict = GeoDict()
self.imgDict = ImgDict()
self.use_dict = use_dict
self.use_geo = use_geo
self.dimension_reduction = dimension_reduction
self.count = 0
self.decay_factor = 0.99
self.c = 0.001
self.mu = 1e-2
self.sigma = 3e-2
self.num_trial = 200
self.only_one = only_one
self.random_background = random_background
self.visualize = False
def get_predict_label(self, x, batch_indices):
return self.model(x, batch_indices=batch_indices, unnormalization=False).argmax(1)
# 0.0048530106
# 0
# 0
# 8700
# tensor(3.8901)
# tensor(0.1397)
def is_adversarial(self, x, y, targeted=False, batch_indices=0, random_background=False):
'''
check whether the adversarial constrain holds for x
'''
x = x.to(device).contiguous()
if random_background:
if torch.min(self.model.get_num_queries(torch.arange(0, x.size(0)))) % 20 != 0:
if self.use_geo == True:
noised_x = (x + (torch.randn_like(x).to(device) * 0.04) * self.x_back_mask)
else:
noised_x = (x + torch.randn_like(x).to(device) * 0.02)
else:
noised_x = x
if targeted:
return torch.LongTensor((self.get_predict_label(noised_x, batch_indices) == y) + 0)
else:
return torch.LongTensor((self.get_predict_label(noised_x, batch_indices) != y) + 0)
else:
if targeted:
return torch.LongTensor((self.get_predict_label(x, batch_indices) == y) + 0)
else:
return torch.LongTensor((self.get_predict_label(x, batch_indices) != y) + 0)
def clip_and_mask(self, x, uv_mask, original_uv):
x = x * uv_mask
x = torch.min(torch.max(x, -original_uv), 1 - original_uv)
return x
def attack(self, x, y, x_s=None, targeted=False, max_queries=1000, total_search=False):
face_features = self.dict_model.get_features(x.to(device))
b = x.size(0)
# indices for unsuccessful images
indices = torch.ones(b) > 0
num_indices = torch.arange(0, b).long()
background_attack_indices = torch.ones(b) > 0
x_dtype = np.float
if self.visualize == True: # For visualization
plot_img(x, 'x' + str(self.count))
if self.use_geo == True:
ver_lst, depth_lst = self.geoDict.get_face_alignment(x)
# initialize
myT = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=0.5,
contrast=0.5,
saturation=0.5,
hue=0.5),
transforms.ToTensor()])
if targeted:
assert x_s is not None
original_ncc_codes, used_original_uv_codes = self.geoDict.get_image_color(x, ver_lst, depth_lst)
original_uv_img = self.geoDict.convert_ncc_2_uv(original_ncc_codes)
target_ver_lst, target_depth_lst = self.geoDict.get_face_alignment(x_s)
target_ncc_codes, used_target_uv_codes = self.geoDict.get_image_color(x_s, target_ver_lst,
target_depth_lst)
# target_ncc_codes[used_original_uv_codes]
# target_ncc_codes[torch.logical_and(used_original_uv_codes,used_target_uv_codes)] =target_ncc_codes[torch.logical_and(used_original_uv_codes,used_target_uv_codes)] -original_ncc_codes[torch.logical_and(used_original_uv_codes,used_target_uv_codes)]
target_ncc_codes[torch.logical_and(used_original_uv_codes, ~used_target_uv_codes)] = original_ncc_codes[
torch.logical_and(used_original_uv_codes, ~used_target_uv_codes)]
# original_ncc_codes[used_original_uv_codes]=target_ncc_codes[used_original_uv_codes]-original_ncc_codes[used_original_uv_codes]
x_a_uv_o = self.geoDict.convert_ncc_2_uv(target_ncc_codes)
# overlap2 = np.array(x_a_uv_o[0].cpu().numpy()).transpose(1, 2, 0) * 255.
# overlap2 = overlap2.astype(np.uint8)
#
# plot_image(overlap2)
x_a_uv = x_a_uv_o - original_uv_img
x_a_ncc = self.geoDict.convert_uv_2_ncc(x_a_uv)
x_a = self.geoDict.project_adv_perturbation(x, ver_lst, depth_lst, x_a_ncc)
# overlap2 = np.array(x_a[0].cpu().numpy()).transpose(1, 2, 0) * 255.
# overlap2 = overlap2.astype(np.uint8)
# plot_image(overlap2)
check = self.is_adversarial(x_a, y, targeted, batch_indices=num_indices,
random_background=self.random_background)
background_attack_indices[check == True] = False
iters = 0
while check.sum() < np.shape(y)[0]:
# Data augmentation
for n in num_indices[background_attack_indices]:
x_a_uv[n] = myT(x_a_uv_o[n]) - original_uv_img[n]
x_a_ncc[background_attack_indices] = self.geoDict.convert_uv_2_ncc(
x_a_uv[background_attack_indices])
x_a[background_attack_indices] = self.geoDict.project_adv_perturbation(x[background_attack_indices],
ver_lst[
background_attack_indices],
depth_lst[
background_attack_indices],
x_a_ncc[
background_attack_indices])
check[background_attack_indices] = self.is_adversarial(x_a[background_attack_indices],
y[background_attack_indices], targeted,
num_indices[background_attack_indices],
random_background=self.random_background)
background_attack_indices[check == True] = False
iters += 1
if iters > self.num_trial:
# overlap2 = np.array(x_a[0].cpu().numpy()).transpose(1, 2, 0) * 255.
# overlap2 = overlap2.astype(np.uint8)
# plot_image(overlap2)
print('Initialization Failed!')
print('Turn to combination mode')
background_attack_indices[check == True] = False
x_a[background_attack_indices] = x_s[background_attack_indices]
check = self.is_adversarial(x_a, y, targeted, num_indices,
random_background=self.random_background)
# self.count+=1
# print(self.count, ' Error')
break
if check.sum() < y.size(0):
print('Some initial images do not belong to the target class!')
return x, torch.zeros(b)
check = self.is_adversarial(x, y, targeted, num_indices)
if check.sum() > 0:
print('Some original images already belong to the target class!')
return x, torch.zeros(b)
else: # Untargeted Attack
check = self.is_adversarial(x, y, True, num_indices)
if check.sum() < y.size(0):
print('Some original images do not belong to the original class!')
return x, torch.zeros(b)
original_ncc_codes = self.geoDict.get_image_color(x, ver_lst)
original_uv_img = self.geoDict.convert_ncc_2_uv(original_ncc_codes)
if total_search:
x_a_uv = self.geoDict.init_item(face_features, original_uv_images=original_uv_img)
x_a_uv = torch.min(torch.max(x_a_uv, -original_uv_img), 1 - original_uv_img)
# x_a_uv=x_a_uv_o.clone()
x_a_ncc = self.geoDict.convert_uv_2_ncc(x_a_uv)
x_a = self.geoDict.project_adv_perturbation(x, ver_lst, depth_lst, x_a_ncc)
min_norm = torch.norm(x_a.reshape(b, -1) - x.reshape(b, -1), dim=1)
l = self.geoDict.get_len_dict()
for i in range(l):
x_a_uv_temp = torch.unsqueeze(self.geoDict.uv_dict[i], 0)
x_a_uv_temp = torch.min(torch.max(x_a_uv_temp, -original_uv_img), 1 - original_uv_img)
x_a_ncc_temp = self.geoDict.convert_uv_2_ncc(x_a_uv_temp)
x_a_temp = self.geoDict.project_adv_perturbation(x, ver_lst, depth_lst, x_a_ncc_temp)
check = self.is_adversarial(x_a_temp, y, targeted, num_indices)
perturbation_norm = torch.norm(x_a_temp.reshape(b, -1) - x.reshape(b, -1), dim=1)
x_a_uv[perturbation_norm < min_norm and check] = x_a_uv_temp[
perturbation_norm < min_norm and check]
min_norm[perturbation_norm < min_norm and check] = perturbation_norm[
perturbation_norm < min_norm and check]
else:
x_a_uv = self.geoDict.init_item(face_features, original_uv_images=original_uv_img)
x_a_uv = torch.min(torch.max(x_a_uv, -original_uv_img), 1 - original_uv_img)
# x_a_uv=x_a_uv_o.clone()
x_a_ncc = self.geoDict.convert_uv_2_ncc(x_a_uv)
x_a = self.geoDict.project_adv_perturbation(x, ver_lst, depth_lst, x_a_ncc)
if self.visualize == True: # For visualization
from _3DDFA_V2.utils.io import _load, _dump
import os.path as osp
make_abs_path = lambda fn: osp.join(osp.dirname(osp.realpath(__file__)), fn)
ncc_code = _load(make_abs_path('../_3DDFA_V2/configs/ncc_code.npy')).T * 0.6 - \
original_ncc_codes[0].numpy() * 0.2
def _to_ctype(arr):
if not arr.flags.c_contiguous:
return arr.copy(order='C')
return arr
ncc_code = _to_ctype(ncc_code)
x_a_3d = self.geoDict.project_adv_perturbation(x, ver_lst,
depth_lst,
np.expand_dims(ncc_code, 0))
plot_img(x_a_3d, 'x_a_face_img_' + str(self.count))
plot_img(x_a, 'x_a_img_' + str(self.count))
plot_img(x_a_uv * 50 + 0.5,
'x_a_' + str(self.count))
self.x_back_mask = 1 - self.geoDict.project_adv_perturbation(x, ver_lst, depth_lst,
torch.ones_like(x_a_ncc))
self.x_back_mask = self.x_back_mask.to(device)
iters = 0
# print(torch.min(x_a_uv), torch.max(x_a_uv))
check = self.is_adversarial(x_a, y, targeted, num_indices)
while check.sum() < np.shape(y)[0]:
x_a_uv[check == False] = x_a_uv[check == False] * 1.05
x_a_uv[check == False] = torch.min(
torch.max(x_a_uv[check == False], -original_uv_img[check == False]+0.2),
1 - original_uv_img[check == False]-0.2)
x_a_ncc[check == False] = self.geoDict.convert_uv_2_ncc(x_a_uv[check == False])
x_a[check == False] = self.geoDict.project_adv_perturbation(x[check == False],
ver_lst[check == False],
depth_lst[check == False],
x_a_ncc[check == False])
check = self.is_adversarial(x_a, y, targeted, num_indices, random_background=self.random_background)
iters += 1
if iters > self.num_trial:
print('Initialization failed for some images!')
break
background_attack_indices[check == True] = False
if check.sum() < np.shape(y)[0]:
iters = 0
x_a[check == False] = self.imgDict.init_item(face_features[check == False],
original_images=x[check == False])
check = self.is_adversarial(x_a, y, targeted, num_indices)
while check.sum() < np.shape(y)[0]:
x_a[check == False] = x_a[check == False] + torch.randn_like(
x_a[check == False]) * iters / self.num_trial
# x_a[check == False] = x[check == False] + (x_a[check == False] - x[check == False]) * 1.05
x_a[check == False] = x_a[check == False].clamp(0, 1)
check = self.is_adversarial(x_a, y, targeted, num_indices,
random_background=self.random_background)
iters += 1
background_attack_indices[check == True] = False
if iters > self.num_trial:
print('Initialization failed!')
return x, torch.zeros(b)
else: # Wihout GeoDict
# initialize
if targeted:
x_a = x_s
check = self.is_adversarial(x_a, y, targeted, num_indices)
if check.sum() < y.size(0):
print('Some initial images do not belong to the target class!')
return x, torch.zeros(b)
check = self.is_adversarial(x, y, targeted, num_indices)
if check.sum() > 0:
print('Some original images already belong to the target class!')
return x, torch.zeros(b)
else: # Untargeted Attack
check = self.is_adversarial(x, y, True, num_indices)
if check.sum() < y.size(0):
print('Some original images do not belong to the original class!')
return x, torch.zeros(b)
background_attack_indices = indices
iters = 0
x_a = self.imgDict.init_item(face_features, original_images=x)
check = self.is_adversarial(x_a, y, targeted, num_indices)
if self.visualize == True: # For visualization
plot_img(x_a, 'x_a_img_' + str(self.count))
while check.sum() < np.shape(y)[0]:
# x_a[check == False] = x_a[check == False] + torch.randn_like(
# x_a[check == False]) * iters / self.num_trial
x_a[check == False] = x[check == False] + (x_a[check == False] - x[check == False]) * 1.05
x_a[check == False] = x_a[check == False].clamp(0, 1)
check = self.is_adversarial(x_a, y, targeted, num_indices)
iters += 1
if iters > self.num_trial:
print('Initialization failed!')
return x, torch.zeros(b)
background_attack_batch_size = background_attack_indices.sum()
if background_attack_batch_size > 0:
x_shape = x.size()
pert_shape_img = (x_shape[1], *self.dimension_reduction)
N_img = np.prod(pert_shape_img)
K_img = int(N_img / 20)
evolution_path_img = torch.zeros((b, *pert_shape_img))
diagonal_covariances_img = np.ones((b, *pert_shape_img), dtype=x_dtype)
mu_img = torch.ones(b) * self.mu
stats_adversarial_img = [collections.deque(maxlen=30) for i in range(b)]
x_a_img = x_a.clone()
if b - background_attack_batch_size > 0:
# x_a_uv = self.binary_infinity(x_a_uv,torch.zeros_like(x_a_uv), x,y, 10, self.model, targeted, num_indices,ver_lst,depth_lst)
x_uv_shape = x_a_uv.size()
pert_shape = (x_uv_shape[1],
*self.dimension_reduction) # int(x_uv_shape[2]/resize_factor),int(x_uv_shape[3]/resize_factor))
N = np.prod(pert_shape)
K = int(N / 20)
evolution_path = torch.zeros((b, *pert_shape))
x_a_uv_c = x_a_uv.clone()
diagonal_covariances = np.ones((b, *pert_shape), dtype=x_dtype)
x_a_uv_mask = self.geoDict.convert_ncc_2_uv(torch.ones_like(original_ncc_codes))
x_a_uv_mask[:, :, :20, :] = 1
x_a_uv_mask[:, :, 36:76, 36:76] = 1
x_a_uv = x_a_uv * x_a_uv_mask
diagonal_covariances = diagonal_covariances * torch.nn.functional.upsample_bilinear(x_a_uv_mask,
self.dimension_reduction).numpy()
stats_adversarial = [collections.deque(maxlen=30) for i in range(b)]
mu = torch.ones(b) * self.mu
perturbation = torch.zeros((b, x.size(1), *self.dimension_reduction))
# q_num: current queries
step = 0
# x_advs=torch.zeros_like(x)
while torch.min(self.model.get_num_queries(num_indices)) < max_queries:
# print(torch.norm(x_a - x_s), torch.norm(x_a - x))
# if torch.min(self.model.get_num_queries(num_indices)) %100==0:
# print(torch.min(x_a_uv),torch.max(x_a_uv))
Q = self.model.get_num_queries(num_indices)
for b_c in torch.arange(0, b)[~background_attack_indices].long():
if self.visualize == True: # For visualization
if Q[b_c] % 1000 == 0:
if self.use_geo:
x_a = self.geoDict.project_adv_perturbation(x, ver_lst,
depth_lst,
self.geoDict.convert_uv_2_ncc(
x_a_uv_mask))
plot_img(x_a, 'x_a_' + str(self.count) + '_' + str(Q[b_c].item()))
plot_img(x_a_uv * 50 + 0.5,
'x_a_uv_' + str(self.count) + '_' + str(Q[b_c].item()))
if Q[b_c] < max_queries:
unnormalized_source_direction = -x_a_uv[b_c]
source_norm = torch.norm(unnormalized_source_direction)
selection_probability = diagonal_covariances[b_c].reshape(-1) / np.sum(diagonal_covariances[b_c])
selected_indices = np.random.choice(N, K, replace=False, p=selection_probability)
perturbation[b_c] = torch.randn(pert_shape)
factor = torch.zeros(N)
factor[selected_indices] = 1
perturbation[b_c] *= factor.reshape(pert_shape) * np.sqrt(diagonal_covariances[b_c])
if self.dimension_reduction:
perturbation_large = torch.nn.functional.upsample_bilinear(perturbation[b_c].unsqueeze(0),
x_uv_shape[2:]).squeeze()
else:
perturbation_large = perturbation[b_c]
biased = x_a_uv[b_c] + mu[b_c] * unnormalized_source_direction
x_a_uv_c[b_c] = biased + self.sigma * source_norm * perturbation_large / torch.norm(
perturbation_large)
x_a_uv_c[b_c] = (x_a_uv_c[b_c]) / torch.norm(x_a_uv_c[b_c]) * torch.norm(biased)
x_a_uv_c[b_c] = torch.min(torch.max(x_a_uv_c[b_c], -original_uv_img[b_c]), 1 - original_uv_img[b_c])
x_a[b_c] = self.geoDict.project_adv_perturbation(x[b_c].unsqueeze(0), ver_lst[b_c].unsqueeze(0),
depth_lst[b_c].unsqueeze(0),
self.geoDict.convert_uv_2_ncc(
x_a_uv_c[b_c].unsqueeze(0)))
for b_c in torch.arange(0, b)[background_attack_indices].long():
if self.visualize == True: # For visualization
if Q[b_c] % 1000 == 0:
if self.use_geo == False:
plot_img(x_a_img, 'x_a_img_final_' + str(self.count))
plot_img((x_a_img[background_attack_indices] - x[background_attack_indices]) * 50 + 0.5,
'x_a_' + str(self.count) + '_' + str(Q[b_c]))
if Q[b_c] < max_queries:
unnormalized_source_direction = x[b_c] - x_a_img[b_c]
source_norm = torch.norm(unnormalized_source_direction)
selection_probability = diagonal_covariances_img[b_c].reshape(-1) / np.sum(
diagonal_covariances_img[b_c])
selected_indices = np.random.choice(N_img, K_img, replace=False, p=selection_probability)
perturbation[b_c] = torch.randn(pert_shape_img)
factor = torch.zeros(N_img)
factor[selected_indices] = 1
perturbation[b_c] *= factor.reshape(pert_shape_img) *
|
np.sqrt(diagonal_covariances_img[b_c])
|
numpy.sqrt
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 20 11:23:59 2017
@author: mariosm
"""
import pandas as pd
from nltk.corpus import stopwords
from collections import Counter
import numpy as np
import sys
from nltk.corpus import stopwords
import random
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from scipy.sparse import csr_matrix,hstack
#stops = set(stopwords.words("english"))
stops = set(["http","www","img","border","home","body","a","about","above","after","again","against","all","am","an",
"and","any","are","aren't","as","at","be","because","been","before","being","below","between","both","but","by","can't",
"cannot","could","couldn't","did","didn't","do","does","doesn't","doing","don't","down","during","each","few","for","from",
"further","had","hadn't","has","hasn't","have","haven't","having","he","he'd","he'll","he's","her","here","here's","hers",
"herself","him","himself","his","how","how's","i","i'd","i'll","i'm","i've","if","in","into","is","isn't","it","it's","its",
"itself","let's","me","more","most","mustn't","my","myself","no","nor","not","of","off","on","once","only","or","other","ought",
"our","ours","ourselves","out","over","own","same","shan't","she","she'd","she'll","she's","should","shouldn't","so","some","such",
"than","that","that's","the","their","theirs","them","themselves","then","there","there's","these","they","they'd","they'll","they're",
"they've","this","those","through","to","too","under","until","up","very","was","wasn't","we","we'd","we'll","we're","we've","were",
"weren't","what","what's","when","when's""where","where's","which","while","who","who's","whom","why","why's","with","won't","would",
"wouldn't","you","you'd","you'll","you're","you've","your","yours","yourself","yourselves" ])
weights={}
def fromsparsetofile(filename, array, deli1=" ", deli2=":",ytarget=None):
zsparse=csr_matrix(array)
indptr = zsparse.indptr
indices = zsparse.indices
data = zsparse.data
print(" data lenth %d" % (len(data)))
print(" indices lenth %d" % (len(indices)))
print(" indptr lenth %d" % (len(indptr)))
f=open(filename,"w")
counter_row=0
for b in range(0,len(indptr)-1):
#if there is a target, print it else , print nothing
if ytarget!=None:
f.write(str(ytarget[b]) + deli1)
for k in range(indptr[b],indptr[b+1]):
if (k==indptr[b]):
if np.isnan(data[k]):
f.write("%d%s%f" % (indices[k],deli2,-1))
else :
f.write("%d%s%f" % (indices[k],deli2,data[k]))
else :
if np.isnan(data[k]):
f.write("%s%d%s%f" % (deli1,indices[k],deli2,-1))
else :
f.write("%s%d%s%f" % (deli1,indices[k],deli2,data[k]))
f.write("\n")
counter_row+=1
if counter_row%10000==0:
print(" row : %d " % (counter_row))
f.close()
# If a word appears only once, we ignore it completely (likely a typo)
# Epsilon defines a smoothing constant, which makes the effect of extremely rare words smaller
def get_weight(count, eps=5000.0, min_count=2.0):
if count < min_count:
return 0.0
else:
return 1.0 / (count + eps)
def word_shares(row,wei,stop):
q1 = set(str(row['question1']).lower().split())
q1words = q1.difference(stop)
if len(q1words) == 0:
return '0:0:0:0:0'
q2 = set(str(row['question2']).lower().split())
q2words = q2.difference(stop)
if len(q2words) == 0:
return '0:0:0:0:0'
q1stops = q1.intersection(stop)
q2stops = q2.intersection(stop)
shared_words = q1words.intersection(q2words)
#print(len(shared_words))
shared_weights = [wei.get(w, 0) for w in shared_words]
total_weights = [wei.get(w, 0) for w in q1words] + [wei.get(w, 0) for w in q2words]
R1 = np.sum(shared_weights) / np.sum(total_weights) #tfidf share
R2 = float(len(shared_words)) / (float(len(q1words)) + float(len(q2words))) #count share
R31 = float(len(q1stops)) / float(len(q1words)) #stops in q1
R32 = float(len(q2stops)) / float(len(q2words)) #stops in q2
return '{}:{}:{}:{}:{}'.format(R1, R2, float(len(shared_words)), R31, R32)
def main():
input_folder="input/" # set your input folder here
df_train = pd.read_csv(input_folder + 'train.csv')
df_test = pd.read_csv(input_folder + 'test.csv')
print("Original data: X_train: {}, X_test: {}".format(df_train.shape, df_test.shape))
train_mix = (df_train['question1']+ " " + df_train['question2']).astype(str).values
test_mix = (df_test['question1']+ " " + df_test['question2'] ).astype(str).values
print("Features processing, be patient...")
train_qs = pd.Series(df_train['question1'].tolist() + df_train['question2'].tolist()).astype(str)
words = (" ".join(train_qs)).lower().split()
counts = Counter(words)
weights = {word: get_weight(count) for word, count in counts.items()}
#stops = set(stopwords.words("english"))
X = pd.DataFrame()
X_test = pd.DataFrame()
df_train['word_shares'] = df_train.apply(word_shares, args = (weights,stops,),axis=1, raw=True)
df_test['word_shares'] = df_test.apply(word_shares, args = (weights,stops,),axis=1, raw=True)
X['word_match'] = df_train['word_shares'].apply(lambda x: float(x.split(':')[0]))
X['tfidf_word_match'] = df_train['word_shares'].apply(lambda x: float(x.split(':')[1]))
X['shared_count'] = df_train['word_shares'].apply(lambda x: float(x.split(':')[2]))
X['stops1_ratio'] = df_train['word_shares'].apply(lambda x: float(x.split(':')[3]))
X['stops2_ratio'] = df_train['word_shares'].apply(lambda x: float(x.split(':')[4]))
X['diff_stops_r'] = X['stops1_ratio'] - X['stops2_ratio']
X['len_q1'] = df_train['question1'].apply(lambda x: len(str(x)))
X['len_q2'] = df_train['question2'].apply(lambda x: len(str(x)))
X['diff_len'] = X['len_q1'] - X['len_q2']
X['len_char_q1'] = df_train['question1'].apply(lambda x: len(str(x).replace(' ', '')))
X['len_char_q2'] = df_train['question2'].apply(lambda x: len(str(x).replace(' ', '')))
X['diff_len_char'] = X['len_char_q1'] - X['len_char_q2']
X['len_word_q1'] = df_train['question1'].apply(lambda x: len(str(x).split()))
X['len_word_q2'] = df_train['question2'].apply(lambda x: len(str(x).split()))
X['diff_len_word'] = X['len_word_q1'] - X['len_word_q2']
X['avg_world_len1'] = X['len_char_q1'] / X['len_word_q1']
X['avg_world_len2'] = X['len_char_q2'] / X['len_word_q2']
X['diff_avg_word'] = X['avg_world_len1'] - X['avg_world_len2']
X['exactly_same'] = (df_train['question1'] == df_train['question2']).astype(int)
X_test['word_match'] = df_test['word_shares'].apply(lambda x: float(x.split(':')[0]))
X_test['tfidf_word_match'] = df_test['word_shares'].apply(lambda x: float(x.split(':')[1]))
X_test['shared_count'] = df_test['word_shares'].apply(lambda x: float(x.split(':')[2]))
X_test['stops1_ratio'] = df_test['word_shares'].apply(lambda x: float(x.split(':')[3]))
X_test['stops2_ratio'] = df_test['word_shares'].apply(lambda x: float(x.split(':')[4]))
X_test['diff_stops_r'] = X_test['stops1_ratio'] - X_test['stops2_ratio']
X_test['len_q1'] = df_test['question1'].apply(lambda x: len(str(x)))
X_test['len_q2'] = df_test['question2'].apply(lambda x: len(str(x)))
X_test['diff_len'] = X_test['len_q1'] - X_test['len_q2']
X_test['len_char_q1'] = df_test['question1'].apply(lambda x: len(str(x).replace(' ', '')))
X_test['len_char_q2'] = df_test['question2'].apply(lambda x: len(str(x).replace(' ', '')))
X_test['diff_len_char'] = X_test['len_char_q1'] - X_test['len_char_q2']
X_test['len_word_q1'] = df_test['question1'].apply(lambda x: len(str(x).split()))
X_test['len_word_q2'] = df_test['question2'].apply(lambda x: len(str(x).split()))
X_test['diff_len_word'] = X_test['len_word_q1'] - X_test['len_word_q2']
X_test['avg_world_len1'] = X_test['len_char_q1'] / X_test['len_word_q1']
X_test['avg_world_len2'] = X_test['len_char_q2'] / X_test['len_word_q2']
X_test['diff_avg_word'] = X_test['avg_world_len1'] - X_test['avg_world_len2']
X_test['exactly_same'] = (df_test['question1'] == df_test['question2']).astype(int)
print (
|
np.mean(X['word_match'])
|
numpy.mean
|
"""
Structure contains the description of a set of atoms in space, for periodic structures it adds a lattice.
"""
try:
import itertools.izip as zip
except ImportError:
pass
import json
import os
import struct
import sys
import numpy as np
from collections.abc import MutableSequence
from itertools import combinations, repeat
from math import sin, cos
from multiprocessing import Pool
from pychemia import pcm_log
from pychemia.crystal.lattice import Lattice
from pychemia.core.composition import Composition
from pychemia.core.delaunay import get_reduced_bases
from pychemia.utils.computing import deep_unicode
from pychemia.utils.periodic import mass, atomic_number, covalent_radius, valence, atomic_symbols
import scipy.spatial
class Structure(MutableSequence):
"""
Define an object that contains information about atomic positions,
cell parameters and periodicity and provides methods to manipulate
those elements
A Structure is basically a set of sites with eventually a lattice
each site could have one or more species with occupations equal
or lower than one.
A Structure could represents a molecule, cluster, wire, slab,
crystal structure or alloy with define sites.
The positions of the atoms and their atomic symbols are declared
in 'positions' and 'symbols' respectively.
For periodic structures, the 'periodicity' can be declared.
and cell parameters in 'cell'
Magnetic moments can be associated in the array vector_info['magnetic_moments'].
"""
def __init__(self, natom=None, symbols=None, periodicity=False, cell=None, positions=None, reduced=None,
mag_moments=None, occupancies=None, sites=None, name=None, comment=None, vector_info=None):
""" Structure is a container for geometric structure and composition for both periodic and non periodic
atomic structures
:param natom: Number of atoms
:param symbols: List of atomic symbols
:param periodicity: (True) if the structure is periodic, False for finite structures and a list of booleans for
structures that are periodic only along specific directions.
:param cell: The cell parameters, could be scalar for a cubic cell, a list of 3 numbers for a orthogonal cell
or a complete numpy array or list of lists. Each row is considered a cell vector.
:param positions: Array of rows with 3 elements for the positions of atoms in cartesian coordinates.
:param reduced: Positions of atoms as reduced coordinates relative to the cell vectors ie cell-scaled in range
[0,1]
:param mag_moments: Magnetic moments for atoms in the structure
:param occupancies: Atomic occupancies. 1 by default, lower values for vacancies and non-perfect crystals.
:param sites: Atomic sites
>>> a = Structure()
>>> print(a)
Empty structure
>>> a = Structure(symbols=['Xe'])
>>> print(a.natom)
1
>>> d = 1.104
>>> a = Structure(symbols=['N', 'N'], positions=[[0, 0, 0], [0, 0, d]], periodicity=False)
>>> print(a.natom)
2
>>> a = 4.05
>>> b = a/2
>>> fcc = Structure(symbols=['Au'], cell=[[0, b, b], [b, 0, b], [b, b, 0]], periodicity=True)
>>> print(fcc.natom)
1
"""
self.vector_info = {}
self.name = None
self.comment = None
self.natom = None
self.symbols = None
self.positions = None
self.reduced = None
self.cell = None
self.periodicity = None
self.vector_info['mag_moments'] = None
self.sites = None
self.occupancies = None
self._lattice = None
self._composition = None
self.vector_info = None
# By default the number of atoms will be the value given or zero except if other information overrules
# that value
if natom is not None:
self.natom = int(natom)
else:
self.natom = 0
if symbols is not None:
if symbols in atomic_symbols:
self.symbols = [symbols]
else:
for iatom in list(symbols):
assert(iatom in atomic_symbols)
self.symbols = list(symbols)
self.natom = len(self.symbols)
else:
if self.natom != 0:
raise ValueError('List of atomic symbols not provided for structure with %d atoms', self.natom)
# No periodicity will be assumed except if cell or reduced coordinates are provided
if periodicity is None:
periodicity = 3*[False]
if isinstance(periodicity, bool):
periodicity = 3*[periodicity]
self.set_periodicity(periodicity)
if cell is not None:
cell = np.array(cell)
self.set_cell(cell)
self.periodicity = 3*[True]
if positions is not None:
positions = np.array(positions)
self.set_positions(positions)
if reduced is not None:
reduced = np.array(reduced)
self.set_reduced(reduced)
self.periodicity = 3*[True]
if mag_moments is not None:
self.set_mag_moments(np.array(mag_moments))
if occupancies is not None:
self.occupancies = list(occupancies)
if sites is not None:
self.sites = sites
if vector_info is None:
self.vector_info = {'mag_moments': None}
else:
self.vector_info = vector_info
self.name = name
self.comment = comment
# This routine completes the missing values and makes all the values coherent.
self._autocomplete()
if not self._check():
raise ValueError('Arguments non consistent')
def __len__(self):
""" Number of sites in structure.
In for perfect crystals it will match the number of atoms as each atomic site will have a single atom.
:return: Number of sites in structure
:rtype; int
>>> st = Structure(symbols=['H', 'O'], positions= [[0,0,0], [0,0,1]])
>>> len(st)
2
"""
return self.nsites
def __str__(self):
"""String representation of Structure
:return: Human readable text for Structure
>>> st = Structure(symbols=['Xe'])
>>> print(st)
1
<BLANKLINE>
Symb ( Positions )
Xe ( 0.0000 0.0000 0.0000 )
<BLANKLINE>
Non-periodic structure
>>> a = 4.05
>>> b = a/2
>>> fcc = Structure(symbols=['Au'], cell=[[0, b, b], [b, 0, b], [b, b, 0]], periodicity=True)
>>> print(fcc)
1
<BLANKLINE>
Symb ( Positions ) [ Cell-reduced coordinates ]
Au ( 0.0000 0.0000 0.0000 ) [ 0.0000 0.0000 0.0000 ]
<BLANKLINE>
Periodicity: X Y Z
<BLANKLINE>
Lattice vectors:
0.0000 2.0250 2.0250
2.0250 0.0000 2.0250
2.0250 2.0250 0.0000
<BLANKLINE>
"""
if self.natom == 0:
xyz = 'Empty structure'
else:
xyz = str(self.natom) + '\n\n'
if self.is_crystal:
xyz += 'Symb ( Positions ) [ Cell-reduced coordinates ]\n'
else:
xyz += 'Symb ( Positions )\n'
for i in range(self.natom):
if self.is_crystal:
xyz += ("%4s ( %10.4f %10.4f %10.4f ) [ %10.4f %10.4f %10.4f ]\n"
% (self.symbols[i],
self.positions[i, 0],
self.positions[i, 1],
self.positions[i, 2],
self.reduced[i, 0],
self.reduced[i, 1],
self.reduced[i, 2]))
else:
xyz += ("%4s ( %10.4f %10.4f %10.4f )\n"
% (self.symbols[i],
self.positions[i, 0],
self.positions[i, 1],
self.positions[i, 2]))
if self.periodicity[0] or self.periodicity[1] or self.periodicity[2]:
xyz += '\nPeriodicity: '
if self.periodicity[0]:
xyz += ' X'
if self.periodicity[1]:
xyz += ' Y'
if self.periodicity[2]:
xyz += ' Z'
xyz += '\n\nLattice vectors:\n'
for i in range(3):
xyz += (" %10.4f %10.4f %10.4f\n"
% (self.cell[i, 0], self.cell[i, 1], self.cell[i, 2]))
else:
xyz += '\nNon-periodic structure'
return xyz
def __repr__(self):
"""
Evaluatable representation of Structure
:return: String representation of the structure
:rtype: str
>>> st1 = Structure(symbols=['H'])
>>> st2 = eval(repr(st1))
>>> st1 == st2
True
>>> st = Structure(symbols='He', cell=[2,2,2])
>>> st
Structure(symbols=['He'], cell=2, reduced=[[0.0, 0.0, 0.0]], periodicity=True)
"""
ret = 'Structure(symbols=' + str(self.symbols)
if self.is_periodic:
if np.all(np.diag(self.cell.diagonal()) == self.cell):
if np.max(self.cell.diagonal()) == np.min(self.cell.diagonal()):
ret += ', cell=' + str(self.cell[0, 0])
else:
ret += ', cell=' + str(self.cell.diagonal().tolist())
else:
ret += ', cell=' + str(self.cell.tolist())
ret += ', reduced=' + str(self.reduced.tolist())
else:
ret += ', positions=' + str(self.positions.tolist())
if all([self.periodicity[0] == item for item in self.periodicity]):
ret += ', periodicity=' + str(self.periodicity[0])
else:
ret += ', periodicity=' + str(self.periodicity)
ret += ')'
return ret
def __delitem__(self, key):
self.del_atom(key)
def __setitem__(self, key, value):
self.add_atom(value['symbols'], value['positions'])
def __getitem__(self, item):
return SiteSet(self)[item]
def __iter__(self):
return iter(SiteSet(self))
def insert(self, index, value):
self.add_atom(value['symbols'], value['positions'])
def _autocomplete(self):
if self.natom is None:
if self.positions is not None:
self.natom = len(self.positions)
elif self.reduced is not None:
self.natom = len(self.reduced)
elif self.symbols is not None:
self.natom = len(self.symbols)
else:
self.natom = 0
if self.symbols is None and self.natom == 0:
self.symbols = []
if self.periodicity is None:
self.set_periodicity(True)
if self.cell is None and self.is_periodic:
self.set_cell(1)
if self.positions is None:
if self.reduced is not None:
self.reduced2positions()
else:
if self.natom == 0:
self.positions = np.array([])
elif self.natom == 1:
self.positions = np.array([[0.0, 0.0, 0.0]])
else:
raise ValueError('Positions must be present for more than 1 atom')
if self.reduced is None and self.is_crystal:
if self.positions is not None and self.natom > 0:
self.positions2reduced()
else:
self.reduced = np.array([])
if self.sites is None:
self.sites = range(self.natom)
if self.occupancies is None:
self.occupancies = self.natom * [1.0]
def _check(self):
check = True
if len(self.symbols) != self.natom:
print('Error: Bad symbols')
check = False
if len(self.positions) != self.natom:
print('Error: Bad positions')
check = False
if self.is_crystal and len(self.reduced) != self.natom:
print('Error: Bad reduced')
check = False
if self.vector_info['mag_moments'] is not None and len(self.vector_info['mag_moments']) != self.natom:
print('Error: Bad mag_moments')
check = False
return check
def add_atom(self, name, coordinates, option='cartesian'):
"""
Add an atom with a given 'name' and cartesian or reduced 'position'
The atom will be added at the end of the list of atoms in the Structure
:param name: (str)
:param coordinates: (list, numpy.array)
:param option: (str)
"""
assert (name in atomic_symbols)
assert (option in ['cartesian', 'reduced'])
self.symbols.append(name)
self.natom += 1
self._composition = None
if option == 'cartesian':
if self.natom == 0:
self.positions = np.array(coordinates).reshape([-1, 3])
else:
self.positions = np.append(self.positions, coordinates).reshape([-1, 3])
self.positions2reduced()
elif option == 'reduced':
if self.natom == 0:
self.reduced = np.array(coordinates).reshape([-1, 3])
else:
self.reduced = np.append(self.reduced, coordinates).reshape([-1, 3])
self.reduced2positions()
def del_atom(self, index):
"""
Removes the atom with the given index
:param index:
:return:
"""
assert (abs(index) < self.natom)
self.symbols.pop(index)
np.delete(self.positions, index, 0)
if self.is_periodic:
np.delete(self.reduced, index, 0)
self.natom -= 1
self._composition = None
def center_mass(self, list_of_atoms=None):
"""
Computes the center of mass (CM) of the XYZ object or
a partial list of atoms. The default is to compute the
CM of all the atoms in the object, if a list
is enter only those in the list will be included for the CM
Return the CM as a numpy array
"""
if list_of_atoms is None:
list_of_atoms = range(self.natom)
total_mass = 0.0
center_of_mass = np.zeros(3)
if self.natom == 0:
return center_of_mass
atomicnumber = atomic_number(list(self.symbols))
for i in range(self.natom):
if i in list_of_atoms:
total_mass += mass(atomicnumber[i])
center_of_mass += mass(atomicnumber[i]) * self.positions[i]
return center_of_mass / total_mass
def rotation(self, tx, ty, tz):
"""
Rotate the molecule in the three directions
"""
rotationx = np.array([[1, 0, 0], [0, cos(tx), -sin(tx)], [0, sin(tx), cos(tx)]])
rotationy = np.array([[cos(ty), 0, sin(ty)], [0, 1, 0], [-sin(ty), 0, cos(ty)]])
rotationz = np.array([[cos(tz), -sin(tz), 0], [sin(tz), cos(tz), 0], [0, 0, 1]])
rotation = np.dot(np.dot(rotationx, rotationy), rotationz)
for i in range(self.natom):
self.positions[i] = np.dot(rotation, self.positions[i])
def get_cell(self):
if self._lattice is None:
self._lattice = Lattice(self.cell)
return self._lattice
@property
def lattice(self):
return self.get_cell()
def get_composition(self, gcd=True):
"""
Computes the composition of the Structure
as the count of each species in the cell
If gcd is True the values are divided by the
greatest common divisor
:param gcd: bool
:rtype : Composition
"""
if self._composition is None:
species = {}
for atom in self.symbols:
if atom in species:
species[atom] += 1
else:
species[atom] = 1
self._composition = Composition(species)
return self._composition
def positions2reduced(self):
"""
Computes the cell-reduced coordinates from the
cartesian dimensional coordinates
"""
self.reduced = np.linalg.solve(self.cell.T, self.positions.T).T
for i in range(3):
if self.periodicity[i]:
self.reduced[:, i] %= 1.0
def reduced2positions(self):
"""
Computes the dimensional cartesian coordinates
from the adimensional cell-reduced coordinates
"""
self.positions = np.dot(self.reduced, self.cell)
def relocate_to_cm(self, list_of_atoms=None):
"""
Relocates the system of atoms to the center of mass
a partial list of atoms can be used to compute
the center, but all the atoms are moved to the
computed center
:param list_of_atoms: (list) List of atoms that will be considered for computing the center of mass
by default all atoms are included
"""
cm = self.center_mass(list_of_atoms)
self.positions += -cm
def get_distance(self, iatom, jatom, with_periodicity=True, tolerance=1e-5):
"""
Calculates the distance between 2 atom, identified by index
iatom and jatom
:param iatom: (int) index of first atom
:param jatom: (int) index of second atom
:param with_periodicity: (bool) if the periodic images should be considered to compute the shortest distance
:param tolerance: (float) Tolerance for the bases reduction
:rtype : (float) distance between iatom and jatom
"""
if with_periodicity:
reduced_bases = get_reduced_bases(self.cell, tolerance)
scaled_pos = np.dot(self.positions, np.linalg.inv(reduced_bases))
# move scaled atomic positions into -0.5 < r <= 0.5
for pos in scaled_pos:
pos -= pos.round()
# Look for the shortest one in surrounded 3x3x3 cells
distances_list = []
for i in (-1, 0, 1):
for j in (-1, 0, 1):
for k in (-1, 0, 1):
distances_list.append(np.linalg.norm(
np.dot(scaled_pos[iatom] - scaled_pos[jatom] +
np.array([i, j, k]), reduced_bases)))
ret = min(distances_list)
else:
posi = self.positions[iatom]
posj = self.positions[jatom]
ret = np.linalg.norm(posi - posj)
return ret
@staticmethod
def random_cell(composition, method='stretching', stabilization_number=20, nparal=5, periodic=True,
factor_optimal_volume=8):
"""
Generate a random cell
There are two algorithms implemented:
scaling: Generate a random cell and random distribution of atoms and
scale the lattice to separate the atoms.
stretching: Generating a random cell and random distribution of atoms
and stretching their bonds until the distance between any
two atoms is always greater than the sum of covalent radius.
:param composition: (pychemia.Composition)
:param method: (str)
:param stabilization_number: (int)
:param nparal: (int)
:param periodic: (bool)
:param factor_optimal_volume: (float)
:return:
>>> import os
>>> st = Structure.random_cell('LiAlCl4', stabilization_number=3)
>>> st.natom
6
>>> st.save_json('test.json')
>>> st2 = Structure.load_json('test.json')
>>> st == st2
True
>>> os.remove('test.json')
"""
comp = Composition(composition)
pcm_log.debug('Generating a random structure with composition: ' + str(comp.composition))
natom = comp.natom
symbols = comp.symbols
best_volume = sys.float_info.max
best_volume = float('inf')
best_structure = None
optimal_volume = comp.covalent_volume('cubes')
stabilization_history = 0
pool = Pool(processes=nparal)
trial = 0
while stabilization_history < stabilization_number:
args = list(best_volume * np.ones(10))
ret = pool.map(worker_star, zip(repeat(method), repeat(composition), repeat(periodic), args))
ngood = 0
for structure in ret:
if structure is not None:
# print('SH:%d Vol:%10.3f Factor:%10.3f' % (stabilization_history,
# structure.volume,
# structure.volume / optimal_volume))
ngood += 1
if best_structure is not None:
if structure.volume < best_structure.volume:
best_structure = structure
else:
best_structure = structure
# log.debug('Good structures: %d/10 Best volume: %7.3f' % (ngood, best_structure.volume))
if best_structure is not None and best_volume > best_structure.volume:
best_volume = best_structure.volume
stabilization_history = 0
else:
stabilization_history += 1
if best_volume < factor_optimal_volume * optimal_volume:
break
trial += 1
# log.debug('Trial: %4d Volume: %7.2f Optimal Volume: %7.2f Ratio: %5.2f' %
# (trial, best_volume, optimal_volume, best_volume/optimal_volume))
pool.close()
if best_structure is not None and periodic:
# Analysis of the quality for the best structure
rpos = best_structure.reduced
for i, j in combinations(range(natom), 2):
distance = best_structure.lattice.minimal_distance(rpos[i], rpos[j])
covalent_distance = sum(covalent_radius([symbols[i], symbols[j]]))
if distance < covalent_distance:
pcm_log.debug('Covalent distance: %7.4f Minimal distance: %7.4f Difference: %7.3e' %
(covalent_distance, distance, covalent_distance - distance))
best_structure.canonical_form()
return best_structure
@staticmethod
def random_cluster(composition, method='stretching', stabilization_number=20, nparal=5):
st = Structure.random_cell(composition=composition, method=method, stabilization_number=stabilization_number,
nparal=nparal, periodic=False)
return Structure(symbols=st.symbols, positions=st.positions, periodicity=False)
def adjust_reduced(self):
for i in range(self.natom):
for j in range(3):
for value in [0.5, 0.25, 0.75, 0.125]:
if abs(value - self.reduced[i, j]) < 1E-4:
self.reduced[i, j] = value
self.reduced2positions()
def set_cell(self, cell):
"""
Set the vectors defining the cell
:param cell: A matrix with the 3 unit cell
vectors
:return:
"""
npcell = np.array(cell)
if npcell.shape == () or npcell.shape == (1,):
self.cell = npcell * np.eye(3)
elif npcell.shape == (3,):
self.cell = np.diag(npcell)
else:
self.cell = np.array(cell).reshape((3, 3))
self._lattice = None
def set_mag_moments(self, mag_moments):
"""
Set the magnetic moments with one vector on each
atom
Args:
mag_moments: List or numpy array with one
vector for each atom.
The values will be converted into a numpy array
"""
self.vector_info['mag_moments'] = np.array(mag_moments).reshape([-1, 3])
def set_periodicity(self, periodicity):
"""
Set periodicity of the structure
Args:
periodicity: (Boolean) a single value means that the structure has that
periodicity all along the 3 directions. Otherwise a list
of 3 booleans is required
"""
if isinstance(periodicity, bool):
self.periodicity = 3 * [periodicity]
elif isinstance(periodicity, list) and len(periodicity) == 1:
self.periodicity = 3 * periodicity
else:
self.periodicity = list(periodicity)
def set_positions(self, positions):
"""
Set the positions of the atoms
This contains dimensional values
in cartesian coordinates
Args:
positions: A array of 3 vectors
with dimensional coordinates
"""
self.positions = np.array(positions).reshape([-1, 3])
def set_reduced(self, reduced):
"""
Set the reduced positions of the atoms
This contains adimensional values
relative to cell vectors
:param reduced:
:return:
"""
self.reduced = np.array(reduced).reshape([-1, 3])
def sort_sites_using_list(self, sorted_indices):
sorted_indices = np.array([int(x) for x in sorted_indices])
self.symbols = list(np.array(self.symbols)[sorted_indices])
self.positions = self.positions[sorted_indices]
if self.is_periodic:
self.reduced = self.reduced[sorted_indices]
if self.vector_info is not None:
for vi in self.vector_info:
if self.vector_info[vi] is not None:
self.vector_info[vi] = self.vector_info[vi][sorted_indices]
def sort_sites(self):
# First: Sort sites using the distance to the origin
sorted_indices = np.array([np.linalg.norm(self.positions[i]) for i in range(self.nsites)]).argsort()
# print sorted_indices
self.sort_sites_using_list(sorted_indices)
# Second: Sort again using the atomic number
if len(self.species) > 1:
sorted_indices = np.array([atomic_number(x) for x in self.symbols]).argsort()
self.sort_sites_using_list(sorted_indices)
def sort_axes(self):
"""
Sort the lattice vectors in decremental order of their size.
'a' will be the longest lattice vector
'c' he shortest
"""
sorted_indices = self.lattice.lengths.argsort()[::-1]
self.set_cell(self.cell[sorted_indices])
self.reduced = self.reduced[:, sorted_indices]
def align_with_axis(self, axis=0, round_decimals=14):
lattice = self.lattice
lattice.align_with_axis(axis=axis, round_decimals=round_decimals)
self.set_cell(lattice.cell)
self.reduced2positions()
def align_with_plane(self, axis=2, round_decimals=14):
lattice = self.lattice
lattice.align_with_plane(axis=axis, round_decimals=round_decimals)
self.set_cell(lattice.cell)
self.reduced2positions()
def align_inertia_momenta(self):
ii = self.inertia_matrix()
eigval, eigvec = np.linalg.eig(ii)
eigvec = eigvec.T[eigval.argsort()[::-1]].T
inveigvec = np.linalg.inv(eigvec)
self.positions = np.dot(inveigvec, self.positions.T).T
def canonical_form(self):
if not self.is_periodic:
self.relocate_to_cm()
self.align_inertia_momenta()
self.sort_sites()
if self.is_periodic:
self.sort_axes()
self.align_with_axis()
self.align_with_plane()
self.atoms_in_box()
self.sort_sites()
def supercell(self, size):
"""
Creates a supercell, replicating the positions
of atoms in the x,y,z directions a number of
size=(nx,ny,nz) times
"""
new_natom = np.prod(size) * self.natom
new_symbols = []
new_positions = np.zeros((new_natom, 3))
size = np.array(size).astype(int)
index = 0
for i in range(size[0]):
for j in range(size[1]):
for k in range(size[2]):
for n in range(self.natom):
new_symbols.append(self.symbols[n])
new_positions[index] = self.positions[n] + (
i * self.cell[0] + j * self.cell[1] + k * self.cell[2])
index += 1
new_cell = np.zeros((3, 3))
new_cell[0] = size[0] * self.cell[0]
new_cell[1] = size[1] * self.cell[1]
new_cell[2] = size[2] * self.cell[2]
return Structure(symbols=new_symbols, positions=new_positions, cell=new_cell)
def copy(self):
"""
Get a copy of the object
"""
copy_struct = Structure(name=self.name, comment=self.comment, natom=self.natom, symbols=self.symbols,
periodicity=self.periodicity, cell=self.cell, positions=self.positions,
reduced=self.reduced, vector_info=self.vector_info, sites=self.sites,
occupancies=self.occupancies)
return copy_struct
@property
def to_dict(self):
ret = {'natom': self.natom,
'symbols': self.symbols,
'periodicity': self.periodicity,
'positions': self.positions.tolist(),
'nspecies': len(self.species),
'formula': self.formula}
if self.is_periodic:
ret['cell'] = self.cell.tolist()
ret['reduced'] = self.reduced.tolist()
ret['density'] = self.density
if self.name is not None:
ret['name'] = self.name
if self.comment is not None:
ret['comment'] = self.comment
if self.sites != range(self.natom):
ret['sites'] = list(self.sites)
if self.occupancies != self.natom * [1.0]:
ret['occupancies'] = self.occupancies
# if len(self.vector_info) != 1 or self.vector_info['mag_moments'] is not None:
# ret['vector_info'] = self.vector_info
return ret
def round(self, decimals=6, pos='reduced'):
self.set_cell(np.around(self.cell, decimals))
if pos == 'reduced':
self.set_reduced(np.around(self.reduced, decimals))
self.reduced2positions()
else:
self.set_positions(np.around(self.positions, decimals))
self.positions2reduced()
@staticmethod
def from_dict(structdict):
natom = structdict['natom']
symbols = deep_unicode(structdict['symbols'])
periodicity = structdict['periodicity']
positions = np.array(structdict['positions'])
if 'name' in structdict:
name = structdict['name']
else:
name = None
if 'comment' in structdict:
comment = structdict['comment']
else:
comment = None
if 'cell' in structdict:
cell = np.array(structdict['cell'])
else:
cell = None
if 'reduced' in structdict:
reduced = np.array(structdict['reduced'])
else:
reduced = None
if 'vector_info' in structdict:
vector_info = structdict['vector_info']
else:
vector_info = None
if 'sites' in structdict:
sites = structdict['sites']
else:
sites = range(natom)
if 'occupancies' in structdict:
occupancies = structdict['occupancies']
else:
occupancies = list(np.ones(natom))
return Structure(name=name, comment=comment, natom=natom, symbols=symbols, periodicity=periodicity, cell=cell,
positions=positions, reduced=reduced, vector_info=vector_info, sites=sites,
occupancies=occupancies)
def save_json(self, filename):
filep = open(filename, 'w')
json.dump(self.to_dict, filep, sort_keys=True, indent=4, separators=(',', ': '))
filep.close()
@staticmethod
def load_json(filename):
filep = open(filename, 'r')
structdict = deep_unicode(json.load(filep))
filep.close()
return Structure.from_dict(structdict)
def distance2(self, atom1, atom2):
assert (isinstance(atom1, int))
assert (isinstance(atom2, int))
assert (atom1 < self.natom)
assert (atom2 < self.natom)
if self.is_periodic:
return self.lattice.distance2(self.reduced[atom1], self.reduced[atom2])
else:
dm = scipy.spatial.distance_matrix(self.positions, self.positions)
return dm[atom1, atom2]
def distance_matrix(self):
if self.is_periodic:
dm = np.zeros((self.nsites, self.nsites))
for i in range(self.nsites - 1):
for j in range(i + 1, self.nsites):
d = self.lattice.distance2(self.reduced[i], self.reduced[j], radius=1E10, limits=[1, 1, 1])
# print("%d %d - %d" % (i,j, len(d)))
dm[i, j] = min([d[x]['distance'] for x in d])
dm[j, i] = dm[i, j]
else:
dm = scipy.spatial.distance_matrix(self.positions, self.positions)
return dm
def valence_electrons(self):
ret = 0
for key, value in self.composition.items():
ret += value * valence(key)
return ret
def __eq__(self, other):
if self.natom != other.natom:
ret = False
elif not np.array_equal(self.positions, other.positions):
ret = False
elif not
|
np.array_equal(self.periodicity, other.periodicity)
|
numpy.array_equal
|
import os
import pytest
import numpy as np
from stwcs import distortion
from ..resources import BaseUnit
import drizzlepac.adrizzle as adrizzle
import drizzlepac.ablot as ablot
class TestDriz(BaseUnit):
def test_square_with_point(self):
"""
Test do_driz square kernel with point
"""
input = os.path.basename(self.get_input_file('input','j8bt06nyq_unit.fits'))
output = 'output_square_point.fits'
output_difference = 'difference_square_point.txt'
output_template = os.path.basename(self.get_data('truth',
'reference_square_point.fits'))
insci = self.read_image(input)
input_wcs = self.read_wcs(input)
insci = self.make_point_image(insci, (500, 200), 100.0)
inwht = np.ones(insci.shape,dtype=insci.dtype)
output_wcs = self.read_wcs(output_template)
naxis1, naxis2 = output_wcs.pixel_shape
outsci = np.zeros((naxis2, naxis1), dtype='float32')
outwht = np.zeros((naxis2, naxis1), dtype='float32')
outcon = np.zeros((1, naxis2, naxis1), dtype='i4')
expin = 1.0
wt_scl = expin
in_units = 'cps'
wcslin = distortion.utils.output_wcs([input_wcs],undistort=False)
adrizzle.do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, in_units, wt_scl, wcslin_pscale=wcslin.pscale)
output_bounds = self.bound_image(outsci)
self.write_image(output, output_wcs, outsci, outwht, outcon[0])
template_data = self.read_image(output_template)
template_bounds = self.bound_image(template_data)
(min_diff, med_diff, max_diff) = self.centroid_statistics("square with point", output_difference,
outsci, template_data, 20.0, 8)
assert(med_diff < 1.0e-6)
assert(max_diff < 1.0e-5)
def test_square_with_grid(self):
"""
Test do_driz square kernel with grid
"""
input = os.path.basename(self.get_input_file('input','j8bt06nyq_unit.fits'))
output = 'output_square_grid.fits'
output_difference = 'difference_square_grid.txt'
output_template = os.path.basename(self.get_data('truth',
'reference_square_grid.fits'))
insci = self.read_image(input)
input_wcs = self.read_wcs(input)
insci = self.make_grid_image(insci, 64, 100.0)
inwht = np.ones(insci.shape,dtype=insci.dtype)
output_wcs = self.read_wcs(output_template)
naxis1, naxis2 = output_wcs.pixel_shape
outsci = np.zeros((naxis2, naxis1), dtype='float32')
outwht = np.zeros((naxis2, naxis1), dtype='float32')
outcon = np.zeros((1, naxis2, naxis1), dtype='i4')
expin = 1.0
wt_scl = expin
in_units = 'cps'
wcslin = distortion.utils.output_wcs([input_wcs],undistort=False)
adrizzle.do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, in_units, wt_scl, wcslin_pscale=wcslin.pscale)
self.write_image(output, output_wcs, outsci, outwht, outcon[0])
template_data = self.read_image(output_template)
(min_diff, med_diff, max_diff) = self.centroid_statistics("square with grid", output_difference,
outsci, template_data, 20.0, 8)
assert(med_diff < 1.0e-6)
assert(max_diff < 1.0e-5)
def test_turbo_with_grid(self):
"""
Test do_driz turbo kernel with grid
"""
input = os.path.basename(self.get_input_file('input','j8bt06nyq_unit.fits'))
output = 'output_turbo_grid.fits'
output_difference = os.path.basename(self.get_data('truth',
'difference_turbo_grid.txt'))
output_template = os.path.basename(self.get_data('truth',
'reference_turbo_grid.fits'))
insci = self.read_image(input)
input_wcs = self.read_wcs(input)
insci = self.make_grid_image(insci, 64, 100.0)
inwht = np.ones(insci.shape,dtype=insci.dtype)
output_wcs = self.read_wcs(output_template)
naxis1, naxis2 = output_wcs.pixel_shape
outsci = np.zeros((naxis2, naxis1), dtype='float32')
outwht = np.zeros((naxis2, naxis1), dtype='float32')
outcon = np.zeros((1, naxis2, naxis1), dtype='i4')
expin = 1.0
wt_scl = expin
in_units = 'cps'
wcslin = distortion.utils.output_wcs([input_wcs],undistort=False)
adrizzle.do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, in_units, wt_scl,
kernel='turbo', wcslin_pscale=wcslin.pscale)
self.write_image(output, output_wcs, outsci, outwht, outcon[0])
template_data = self.read_image(output_template)
(min_diff, med_diff, max_diff) = self.centroid_statistics("turbo with grid", output_difference,
outsci, template_data, 20.0, 8)
assert(med_diff < 1.0e-6)
assert(max_diff < 1.0e-5)
def test_gaussian_with_grid(self):
"""
Test do_driz gaussian kernel with grid
"""
input = os.path.basename(self.get_input_file('input','j8bt06nyq_unit.fits'))
output = 'output_gaussian_grid.fits'
output_difference = os.path.basename(self.get_data('truth',
'difference_gaussian_grid.txt'))
output_template = os.path.basename(self.get_data('truth',
'reference_gaussian_grid.fits'))
insci = self.read_image(input)
input_wcs = self.read_wcs(input)
insci = self.make_grid_image(insci, 64, 100.0)
inwht = np.ones(insci.shape,dtype=insci.dtype)
output_wcs = self.read_wcs(output_template)
naxis1, naxis2 = output_wcs.pixel_shape
outsci = np.zeros((naxis2, naxis1), dtype='float32')
outwht = np.zeros((naxis2, naxis1), dtype='float32')
outcon = np.zeros((1, naxis2, naxis1), dtype='i4')
expin = 1.0
wt_scl = expin
in_units = 'cps'
wcslin = distortion.utils.output_wcs([input_wcs],undistort=False)
adrizzle.do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, in_units, wt_scl,
kernel='gaussian', wcslin_pscale=wcslin.pscale)
self.write_image(output, output_wcs, outsci, outwht, outcon[0])
template_data = self.read_image(output_template)
(min_diff, med_diff, max_diff) = self.centroid_statistics("gaussian with grid", output_difference,
outsci, template_data, 20.0, 8)
assert(med_diff < 1.0e-6)
assert(max_diff < 2.0e-5)
def test_lanczos_with_grid(self):
"""
Test do_driz lanczos kernel with grid
"""
input = os.path.basename(self.get_input_file('input', 'j8bt06nyq_unit.fits'))
output = 'output_lanczos_grid.fits'
output_difference = os.path.basename(self.get_data('truth',
'difference_lanczos_grid.txt'))
output_template = os.path.basename(self.get_data('truth',
'reference_lanczos_grid.fits'))
insci = self.read_image(input)
input_wcs = self.read_wcs(input)
insci = self.make_grid_image(insci, 64, 100.0)
inwht = np.ones(insci.shape,dtype=insci.dtype)
output_wcs = self.read_wcs(output_template)
naxis1, naxis2 = output_wcs.pixel_shape
outsci = np.zeros((naxis2, naxis1), dtype='float32')
outwht = np.zeros((naxis2, naxis1), dtype='float32')
outcon = np.zeros((1, naxis2, naxis1), dtype='i4')
expin = 1.0
wt_scl = expin
in_units = 'cps'
wcslin = distortion.utils.output_wcs([input_wcs],undistort=False)
adrizzle.do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, in_units, wt_scl,
kernel='lanczos3', wcslin_pscale=wcslin.pscale)
self.write_image(output, output_wcs, outsci, outwht, outcon[0])
template_data = self.read_image(output_template)
(min_diff, med_diff, max_diff) = self.centroid_statistics("lanczos with grid", output_difference,
outsci, template_data, 20.0, 8)
assert(med_diff < 1.0e-6)
assert(max_diff < 1.0e-5)
def test_tophat_with_grid(self):
"""
Test do_driz tophat kernel with grid
"""
input = os.path.basename(self.get_input_file('input', 'j8bt06nyq_unit.fits'))
output = 'output_tophat_grid.fits'
output_difference = os.path.basename(self.get_data('truth',
'difference_tophat_grid.txt'))
output_template = os.path.basename(self.get_data('truth',
'reference_tophat_grid.fits'))
insci = self.read_image(input)
input_wcs = self.read_wcs(input)
insci = self.make_grid_image(insci, 64, 100.0)
inwht = np.ones(insci.shape,dtype=insci.dtype)
output_wcs = self.read_wcs(output_template)
naxis1, naxis2 = output_wcs.pixel_shape
outsci = np.zeros((naxis2, naxis1), dtype='float32')
outwht = np.zeros((naxis2, naxis1), dtype='float32')
outcon = np.zeros((1, naxis2, naxis1), dtype='i4')
expin = 1.0
wt_scl = expin
in_units = 'cps'
wcslin = distortion.utils.output_wcs([input_wcs],undistort=False)
adrizzle.do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, in_units, wt_scl,
kernel='tophat', wcslin_pscale=wcslin.pscale)
self.write_image(output, output_wcs, outsci, outwht, outcon[0])
template_data = self.read_image(output_template)
(min_diff, med_diff, max_diff) = self.centroid_statistics("tophat with grid", output_difference,
outsci, template_data, 20.0, 8)
assert(med_diff < 1.0e-6)
assert(max_diff < 1.0e-5)
def test_point_with_grid(self):
"""
Test do_driz point kernel with grid
"""
input = os.path.basename(self.get_input_file('input', 'j8bt06nyq_unit.fits'))
output = 'output_point_grid.fits'
output_difference = os.path.basename(self.get_data('truth',
'difference_point_grid.txt'))
output_template = os.path.basename(self.get_data('truth',
'reference_point_grid.fits'))
insci = self.read_image(input)
input_wcs = self.read_wcs(input)
insci = self.make_grid_image(insci, 64, 100.0)
inwht = np.ones(insci.shape,dtype=insci.dtype)
output_wcs = self.read_wcs(output_template)
naxis1, naxis2 = output_wcs.pixel_shape
outsci = np.zeros((naxis2, naxis1), dtype='float32')
outwht = np.zeros((naxis2, naxis1), dtype='float32')
outcon = np.zeros((1, naxis2, naxis1), dtype='i4')
expin = 1.0
wt_scl = expin
in_units = 'cps'
wcslin = distortion.utils.output_wcs([input_wcs],undistort=False)
adrizzle.do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, in_units, wt_scl,
kernel='point', wcslin_pscale=wcslin.pscale)
self.write_image(output, output_wcs, outsci, outwht, outcon[0])
template_data = self.read_image(output_template)
(min_diff, med_diff, max_diff) = self.centroid_statistics("point with grid", output_difference,
outsci, template_data, 20.0, 8)
assert(med_diff < 1.0e-6)
assert(max_diff < 1.0e-5)
def test_square_with_image(self):
"""
Test do_driz square kernel
"""
input = os.path.basename(self.get_input_file('input', 'j8bt06nyq_unit.fits'))
output = 'output_square_image.fits'
output_template = os.path.basename(self.get_data('truth',
'reference_square_image.fits'))
insci = self.read_image(input)
input_wcs = self.read_wcs(input)
inwht = np.ones(insci.shape,dtype=insci.dtype)
output_wcs = self.read_wcs(output_template)
naxis1, naxis2 = output_wcs.pixel_shape
outsci = np.zeros((naxis2, naxis1), dtype='float32')
outwht = np.zeros((naxis2, naxis1), dtype='float32')
outcon = np.zeros((1, naxis2, naxis1), dtype='i4')
expin = 1.0
wt_scl = expin
in_units = 'cps'
wcslin = distortion.utils.output_wcs([input_wcs],undistort=False)
adrizzle.do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, in_units, wt_scl, wcslin_pscale=wcslin.pscale)
self.write_image(output, output_wcs, outsci, outwht, outcon[0])
template_data = self.read_image(output_template)
self.ignore_keywords += ['rootname']
self.compare_outputs([(output, output_template)])
def test_turbo_with_image(self):
"""
Test do_driz turbo kernel
"""
input = os.path.basename(self.get_input_file('input', 'j8bt06nyq_unit.fits'))
output = 'output_turbo_image.fits'
output_template = os.path.basename(self.get_data('truth',
'reference_turbo_image.fits'))
insci = self.read_image(input)
input_wcs = self.read_wcs(input)
inwht = np.ones(insci.shape,dtype=insci.dtype)
output_wcs = self.read_wcs(output_template)
naxis1, naxis2 = output_wcs.pixel_shape
outsci = np.zeros((naxis2, naxis1), dtype='float32')
outwht = np.zeros((naxis2, naxis1), dtype='float32')
outcon =
|
np.zeros((1, naxis2, naxis1), dtype='i4')
|
numpy.zeros
|
# LatticeBoltzmannDemo.py: a two-dimensional lattice-Boltzmann "wind tunnel" simulation
# Uses numpy to speed up all array handling.
# Uses matplotlib to plot and animate the curl of the macroscopic velocity field.
# Copyright 2013, <NAME> (Weber State University) 2013
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated data and documentation (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Except as contained in this notice, the name of the author shall not be used in
# advertising or otherwise to promote the sale, use or other dealings in this
# Software without prior written authorization.
# Credits:
# The "wind tunnel" entry/exit conditions are inspired by <NAME>'s code
# (http://www.many-core.group.cam.ac.uk/projects/LBdemo.shtml). Additional inspiration from
# Thomas Pohl's applet (http://thomas-pohl.info/work/lba.html). Other portions of code are based
# on Wagner (http://www.ndsu.edu/physics/people/faculty/wagner/lattice_boltzmann_codes/) and
# Gonsalves (http://www.physics.buffalo.edu/phy411-506-2004/index.html; code adapted from Succi,
# http://global.oup.com/academic/product/the-lattice-boltzmann-equation-9780199679249).
# For related materials see http://physics.weber.edu/schroeder/fluids
import numpy, time, matplotlib.pyplot, matplotlib.animation
# Define constants:
height = 80 # lattice dimensions
width = 200
viscosity = 0.005 # fluid viscosity
omega = 1 / (3*viscosity + 0.5) # "relaxation" parameter
u0 = 0.1 # initial and in-flow speed
four9ths = 4.0/9.0 # abbreviations for lattice-Boltzmann weight factors
one9th = 1.0/9.0
one36th = 1.0/36.0
performanceData = True # set to True if performance data is desired
# Initialize all the arrays to steady rightward flow:
n0 = four9ths * (numpy.ones((height,width)) - 1.5*u0**2) # particle densities along 9 directions
nN = one9th * (numpy.ones((height,width)) - 1.5*u0**2)
nS = one9th * (numpy.ones((height,width)) - 1.5*u0**2)
nE = one9th * (numpy.ones((height,width)) + 3*u0 + 4.5*u0**2 - 1.5*u0**2)
nW = one9th * (
|
numpy.ones((height,width))
|
numpy.ones
|
import numpy as np
from pose import Pose
from sensor import ProximitySensor
from robot import Robot
from math import ceil, exp, sin, cos, tan, pi
from helpers import Struct
class Khepera3_IRSensor(ProximitySensor):
"""Inherits from the proximity sensor class. Performs calculations specific to the khepera3 for its characterized proximity sensors"""
def __init__(self,pose,robot):
# values copied from SimIAm
ProximitySensor.__init__(self, pose, robot, (0.02, 0.2, np.radians(20)))
def distance_to_value(self,dst):
"""Returns the distance calculation from the distance readings of the proximity sensors"""
if dst < self.rmin :
return 3960;
else:
return (3960*exp(-30*(dst-self.rmin)));
class Khepera3(Robot):
"""Inherts for the simobject--->robot class for behavior specific to the Khepera3"""
def __init__(self, pose, color = 0xFFFFFF):
Robot.__init__(self, pose, color)
# create shape
self._p1 = np.array([[-0.031, 0.043, 1],
[-0.031, -0.043, 1],
[ 0.033, -0.043, 1],
[ 0.052, -0.021, 1],
[ 0.057, 0 , 1],
[ 0.052, 0.021, 1],
[ 0.033, 0.043, 1]])
self._p2 = np.array([[-0.024, 0.064, 1],
[ 0.033, 0.064, 1],
[ 0.057, 0.043, 1],
[ 0.074, 0.010, 1],
[ 0.074, -0.010, 1],
[ 0.057, -0.043, 1],
[ 0.033, -0.064, 1],
[-0.025, -0.064, 1],
[-0.042, -0.043, 1],
[-0.048, -0.010, 1],
[-0.048, 0.010, 1],
[-0.042, 0.043, 1]])
# create IR sensors
self.ir_sensors = []
ir_sensor_poses = [
Pose(-0.038, 0.048, np.radians(128)),
Pose( 0.019, 0.064, np.radians(75)),
Pose( 0.050, 0.050, np.radians(42)),
Pose( 0.070, 0.017, np.radians(13)),
Pose( 0.070, -0.017, np.radians(-13)),
Pose( 0.050, -0.050, np.radians(-42)),
Pose( 0.019, -0.064, np.radians(-75)),
Pose(-0.038, -0.048,
|
np.radians(-128)
|
numpy.radians
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.