prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
from pandas import read_csv
from astropy import constants
from pkg_resources import resource_filename
from .likelihoods import *
from .priors import *
from .funcs import stellar_relations, flux_relation
np.seterr(divide='ignore')
Msun = constants.M_sun.cgs.value
Rsun = constants.R_sun.cgs.value
Rearth = constants.R_earth.cgs.value
G = constants.G.cgs.value
au = constants.au.cgs.value
pi = np.pi
ln2pi = np.log(2*pi)
# load TESS limb darkening coefficients
LDC_FILE = resource_filename('triceratops', 'data/ldc_tess.csv')
ldc_T = read_csv(LDC_FILE)
ldc_T_Zs = np.array(ldc_T.Z, dtype=float)
ldc_T_Teffs = np.array(ldc_T.Teff, dtype=int)
ldc_T_loggs = np.array(ldc_T.logg, dtype=float)
ldc_T_u1s = np.array(ldc_T.aLSM, dtype=float)
ldc_T_u2s = np.array(ldc_T.bLSM, dtype=float)
# load Kepler limb darkening coefficients
LDC_FILE = resource_filename('triceratops', 'data/ldc_kepler.csv')
ldc_K = read_csv(LDC_FILE)
ldc_K_Zs = np.array(ldc_K.Z, dtype=float)
ldc_K_Teffs = np.array(ldc_K.Teff, dtype=int)
ldc_K_loggs = np.array(ldc_K.logg, dtype=float)
ldc_K_u1s = np.array(ldc_K.a, dtype=float)
ldc_K_u2s = np.array(ldc_K.b, dtype=float)
def lnZ_TTP(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, N: int = 1000000, parallel: bool = False,
mission: str = "TESS"):
"""
Calculates the marginal likelihood of the TTP scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
N (int): Number of draws for MC.
parallel (bool): Whether or not to simulate light curves
in parallel.
mission (str): TESS, Kepler, or K2.
Returns:
res (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
a = ((G*M_s*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
if mission == "TESS":
ldc_Zs = ldc_T_Zs
ldc_Teffs = ldc_T_Teffs
ldc_loggs = ldc_T_loggs
ldc_u1s = ldc_T_u1s
ldc_u2s = ldc_T_u2s
else:
ldc_Zs = ldc_K_Zs
ldc_Teffs = ldc_K_Teffs
ldc_loggs = ldc_K_loggs
ldc_u1s = ldc_K_u1s
ldc_u2s = ldc_K_u2s
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# calculate short-period planet prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_planet(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_planet(P_orb)
# sample from prior distributions
rps = sample_rp(np.random.rand(N), np.full_like(N, M_s))
incs = sample_inc(np.random.rand(N))
eccs = sample_ecc(np.random.rand(N), planet=True, P_orb=P_orb)
argps = sample_w(np.random.rand(N))
# calculate transit probability for each instance
e_corr = (1+eccs*np.sin(argps*pi/180))/(1-eccs**2)
Ptra = (rps*Rearth + R_s*Rsun)/a * e_corr
# find instances with collisions
coll = ((rps*Rearth + R_s*Rsun) > a*(1-eccs))
lnL = np.full(N, -np.inf)
if parallel:
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra <= 1.] = np.arccos(Ptra[Ptra <= 1.]) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll == False)
# calculate lnL for transiting systems
a_arr = np.full(N, a)
R_s_arr = np.full(N, R_s)
u1_arr = np.full(N, u1)
u2_arr = np.full(N, u2)
companion_fluxratio = np.zeros(N)
lnL[mask] = -0.5*ln2pi - lnsigma - lnL_TP_p(
time, flux, sigma, rps[mask],
P_orb, incs[mask], a_arr[mask], R_s_arr[mask],
u1_arr[mask], u2_arr[mask],
eccs[mask], argps[mask],
companion_fluxratio=companion_fluxratio[mask]
)
else:
for i in range(N):
if Ptra[i] <= 1.:
inc_min = np.arccos(Ptra[i]) * 180./pi
else:
continue
if (incs[i] >= inc_min) & (coll[i] == False):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_TP(
time, flux, sigma, rps[i],
P_orb, incs[i], a, R_s, u1, u2,
eccs[i], argps[i]
)
idx = lnL.argmax()
Z = np.mean(np.nan_to_num(
np.exp(lnL + lnprior_Mstar + lnprior_Porb + 600)
))
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': rps[idx],
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': 0, 'R_EB': 0, 'fluxratio_EB': 0,
'fluxratio_comp': 0, 'lnZ': lnZ
}
return res
def lnZ_TEB(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, N: int = 1000000, parallel: bool = False,
mission: str = "TESS"):
"""
Calculates the marginal likelihood of the TEB scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
N (int): Number of draws for MC.
parallel (bool): Whether or not to simulate light curves
in parallel.
mission (str): TESS, Kepler, or K2.
Returns:
res (dict): Best-fit properties and marginal likelihood.
res_twin (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
if mission == "TESS":
ldc_Zs = ldc_T_Zs
ldc_Teffs = ldc_T_Teffs
ldc_loggs = ldc_T_loggs
ldc_u1s = ldc_T_u1s
ldc_u2s = ldc_T_u2s
else:
ldc_Zs = ldc_K_Zs
ldc_Teffs = ldc_K_Teffs
ldc_loggs = ldc_K_loggs
ldc_u1s = ldc_K_u1s
ldc_u2s = ldc_K_u2s
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# sample from prior distributions
incs = sample_inc(np.random.rand(N))
qs = sample_q(np.random.rand(N), M_s)
eccs = sample_ecc(np.random.rand(N), planet=False, P_orb=P_orb)
argps = sample_w(np.random.rand(N))
# calculate properties of the drawn EBs
masses = qs*M_s
radii, Teffs = stellar_relations(
masses, np.full(N, R_s), np.full(N, Teff)
)
# calculate flux ratios in the TESS band
fluxratios = (
flux_relation(masses)
/ (flux_relation(masses) + flux_relation(np.array([M_s])))
)
# calculate short-period binary prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_binary(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_binary(P_orb)
# calculate transit probability for each instance
e_corr = (1+eccs*np.sin(argps*pi/180))/(1-eccs**2)
a = ((G*(M_s+masses)*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
Ptra = (radii*Rsun + R_s*Rsun)/a * e_corr
a_twin = ((G*(M_s+masses)*Msun)/(4*pi**2)*(2*P_orb*86400)**2)**(1/3)
Ptra_twin = (radii*Rsun + R_s*Rsun)/a_twin * e_corr
# find instances with collisions
coll = ((radii*Rsun + R_s*Rsun) > a*(1-eccs))
coll_twin = ((2*R_s*Rsun) > a_twin*(1-eccs))
lnL = np.full(N, -np.inf)
lnL_twin = np.full(N, -np.inf)
if parallel:
# q < 0.95
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra <= 1.] = np.arccos(Ptra[Ptra <= 1.]) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll == False) & (qs < 0.95)
# calculate lnL for transiting systems
R_s_arr = np.full(N, R_s)
u1_arr = np.full(N, u1)
u2_arr = np.full(N, u2)
companion_fluxratio = np.zeros(N)
lnL[mask] = -0.5*ln2pi - lnsigma - lnL_EB_p(
time, flux, sigma, radii[mask], fluxratios[mask],
P_orb, incs[mask], a[mask], R_s_arr[mask],
u1_arr[mask], u2_arr[mask],
eccs[mask], argps[mask],
companion_fluxratio=companion_fluxratio[mask]
)
# q >= 0.95
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra_twin <= 1.] = np.arccos(
Ptra_twin[Ptra_twin <= 1.]
) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll_twin == False) & (qs >= 0.95)
# calculate lnL for transiting systems
R_s_arr = np.full(N, R_s)
u1_arr = np.full(N, u1)
u2_arr = np.full(N, u2)
companion_fluxratio = np.zeros(N)
lnL_twin[mask] = -0.5*ln2pi - lnsigma - lnL_EB_twin_p(
time, flux, sigma, radii[mask], fluxratios[mask],
2*P_orb, incs[mask], a_twin[mask], R_s_arr[mask],
u1_arr[mask], u2_arr[mask],
eccs[mask], argps[mask],
companion_fluxratio=companion_fluxratio[mask]
)
else:
for i in range(N):
# q < 0.95
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] < 0.95) & (coll[i] == False):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_EB(
time, flux, sigma, radii[i], fluxratios[i],
P_orb, incs[i], a[i], R_s, u1, u2,
eccs[i], argps[i]
)
# q >= 0.95 and 2xP_orb
if Ptra_twin[i] <= 1:
inc_min = np.arccos(Ptra_twin[i]) * 180/pi
else:
continue
if ((incs[i] >= inc_min) & (qs[i] >= 0.95)
& (coll_twin[i] == False)):
lnL_twin[i] = -0.5*ln2pi - lnsigma - lnL_EB_twin(
time, flux, sigma, radii[i], fluxratios[i],
2*P_orb, incs[i], a_twin[i], R_s, u1, u2,
eccs[i], argps[i]
)
# results for q < 0.95
idx = lnL.argmax()
Z = np.mean(np.nan_to_num(
np.exp(lnL + lnprior_Mstar + lnprior_Porb + 600)
))
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': 0,
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': 0, 'lnZ': lnZ
}
# results for q >= 0.95 and 2xP_orb
idx = lnL_twin.argmax()
Z = np.mean(np.nan_to_num(
np.exp(lnL_twin + lnprior_Mstar + lnprior_Porb + 600)
))
lnZ = np.log(Z)
res_twin = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': 2*P_orb, 'inc': incs[idx], 'R_p': 0,
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': 0, 'lnZ': lnZ
}
return res, res_twin
def lnZ_PTP(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, plx: float, contrast_curve_file: str = None,
filt: str = "TESS",
N: int = 1000000, parallel: bool = False,
mission: str = "TESS"):
"""
Calculates the marginal likelihood of the PTP scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
plx (float): Target star parallax [mas].
contrast_curve_file (string): Path to contrast curve file.
filt (string): Photometric filter of contrast curve. Options
are TESS, Vis, J, H, and K.
N (int): Number of draws for MC.
parallel (bool): Whether or not to simulate light curves
in parallel.
mission (str): TESS, Kepler, or K2.
Returns:
res (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
a = ((G*M_s*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
if mission == "TESS":
ldc_Zs = ldc_T_Zs
ldc_Teffs = ldc_T_Teffs
ldc_loggs = ldc_T_loggs
ldc_u1s = ldc_T_u1s
ldc_u2s = ldc_T_u2s
else:
ldc_Zs = ldc_K_Zs
ldc_Teffs = ldc_K_Teffs
ldc_loggs = ldc_K_loggs
ldc_u1s = ldc_K_u1s
ldc_u2s = ldc_K_u2s
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# sample from q prior distributions
qs_comp = sample_q_companion(np.random.rand(N), M_s)
# calculate properties of the drawn companions
masses_comp = qs_comp*M_s
radii_comp, Teffs_comp = stellar_relations(
masses_comp, np.full(N, R_s), np.full(N, Teff)
)
# calculate flux ratios in the TESS band
fluxratios_comp = (
flux_relation(masses_comp)
/ (flux_relation(masses_comp) + flux_relation(np.array([M_s])))
)
# calculate priors for companions
if contrast_curve_file is None:
# use TESS/Vis band flux ratios
delta_mags = 2.5*np.log10(fluxratios_comp/(1-fluxratios_comp))
lnprior_companion = lnprior_bound_TP(
M_s, plx, np.abs(delta_mags),
np.array([2.2]), np.array([1.0])
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
# use flux ratio of contrast curve filter
fluxratios_comp_cc = (
flux_relation(masses_comp, filt)
/ (flux_relation(masses_comp, filt)
+ flux_relation(np.array([M_s]), filt))
)
delta_mags = 2.5*np.log10(fluxratios_comp_cc/(1-fluxratios_comp_cc))
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_bound_TP(
M_s, plx, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period planet prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_planet(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_planet(P_orb)
# sample from prior distributions
rps = sample_rp(np.random.rand(N), np.full_like(N, M_s))
incs = sample_inc(np.random.rand(N))
eccs = sample_ecc(np.random.rand(N), planet=True, P_orb=P_orb)
argps = sample_w(np.random.rand(N))
# calculate transit probability for each instance
e_corr = (1+eccs*np.sin(argps*pi/180))/(1-eccs**2)
Ptra = (rps*Rearth + R_s*Rsun)/a * e_corr
# find instances with collisions
coll = ((rps*Rearth + R_s*Rsun) > a*(1-eccs))
lnL = np.full(N, -np.inf)
if parallel:
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra <= 1.] = np.arccos(Ptra[Ptra <= 1.]) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll == False)
# calculate lnL for transiting systems
a_arr = np.full(N, a)
R_s_arr = np.full(N, R_s)
u1_arr = np.full(N, u1)
u2_arr = np.full(N, u2)
lnL[mask] = -0.5*ln2pi - lnsigma - lnL_TP_p(
time, flux, sigma, rps[mask],
P_orb, incs[mask], a_arr[mask], R_s_arr[mask],
u1_arr[mask], u2_arr[mask],
eccs[mask], argps[mask],
companion_fluxratio=fluxratios_comp[mask],
companion_is_host=False
)
else:
for i in range(N):
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (coll[i] == False):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_TP(
time, flux, sigma, rps[i],
P_orb, incs[i], a, R_s, u1, u2,
eccs[i], argps[i],
companion_fluxratio=fluxratios_comp[i],
companion_is_host=False
)
idx = lnL.argmax()
Z = np.mean(
np.nan_to_num(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb + 600)
)
)
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': rps[idx],
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': 0, 'R_EB': 0, 'fluxratio_EB': 0,
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
return res
def lnZ_PEB(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, plx: float, contrast_curve_file: str = None,
filt: str = "TESS",
N: int = 1000000, parallel: bool = False,
mission: str = "TESS"):
"""
Calculates the marginal likelihood of the PEB scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
plx (float): Target star parallax [mas].
contrast_curve_file (string): Path to contrast curve file.
filt (string): Photometric filter of contrast curve. Options
are TESS, Vis, J, H, and K.
N (int): Number of draws for MC.
parallel (bool): Whether or not to simulate light curves
in parallel.
mission (str): TESS, Kepler, or K2.
Returns:
res (dict): Best-fit properties and marginal likelihood.
res_twin (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
if mission == "TESS":
ldc_Zs = ldc_T_Zs
ldc_Teffs = ldc_T_Teffs
ldc_loggs = ldc_T_loggs
ldc_u1s = ldc_T_u1s
ldc_u2s = ldc_T_u2s
else:
ldc_Zs = ldc_K_Zs
ldc_Teffs = ldc_K_Teffs
ldc_loggs = ldc_K_loggs
ldc_u1s = ldc_K_u1s
ldc_u2s = ldc_K_u2s
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# sample from prior distributions
incs = sample_inc(np.random.rand(N))
qs = sample_q(np.random.rand(N), M_s)
qs_comp = sample_q_companion(np.random.rand(N), M_s)
eccs = sample_ecc(np.random.rand(N), planet=False, P_orb=P_orb)
argps = sample_w(np.random.rand(N))
# calculate properties of the drawn EBs
masses = qs*M_s
radii, Teffs = stellar_relations(
masses, np.full(N, R_s), np.full(N, Teff)
)
# calculate flux ratios in the TESS band
fluxratios = (
flux_relation(masses)
/ (flux_relation(masses) + flux_relation(np.array([M_s])))
)
# calculate properties of the drawn companions
masses_comp = qs_comp*M_s
radii_comp, Teffs_comp = stellar_relations(
masses_comp, np.full(N, R_s), np.full(N, Teff)
)
# calculate flux ratios in the TESS band
fluxratios_comp = (
flux_relation(masses_comp)
/ (flux_relation(masses_comp) + flux_relation(np.array([M_s])))
)
# calculate priors for companions
if contrast_curve_file is None:
# use TESS/Vis band flux ratios
delta_mags = 2.5*np.log10(fluxratios_comp/(1-fluxratios_comp))
lnprior_companion = lnprior_bound_EB(
M_s, plx, np.abs(delta_mags),
np.array([2.2]), np.array([1.0])
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
# use flux ratio of contrast curve filter
fluxratios_comp_cc = (
flux_relation(masses_comp, filt)
/ (flux_relation(masses_comp, filt)
+ flux_relation(np.array([M_s]), filt))
)
delta_mags = 2.5*np.log10(fluxratios_comp_cc/(1-fluxratios_comp_cc))
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_bound_EB(
M_s, plx, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period binary prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_binary(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_binary(P_orb)
# calculate transit probability for each instance
e_corr = (1+eccs*np.sin(argps*pi/180))/(1-eccs**2)
a = ((G*(M_s+masses)*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
Ptra = (radii*Rsun + R_s*Rsun)/a * e_corr
a_twin = ((G*(M_s+masses)*Msun)/(4*pi**2)*(2*P_orb*86400)**2)**(1/3)
Ptra_twin = (radii*Rsun + R_s*Rsun)/a_twin * e_corr
# find instances with collisions
coll = ((radii*Rsun + R_s*Rsun) > a*(1-eccs))
coll_twin = ((2*R_s*Rsun) > a_twin*(1-eccs))
lnL = np.full(N, -np.inf)
lnL_twin = np.full(N, -np.inf)
if parallel:
# q < 0.95
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra <= 1.] = np.arccos(Ptra[Ptra <= 1.]) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll == False) & (qs < 0.95)
# calculate lnL for transiting systems
R_s_arr = np.full(N, R_s)
u1_arr = np.full(N, u1)
u2_arr = np.full(N, u2)
lnL[mask] = -0.5*ln2pi - lnsigma - lnL_EB_p(
time, flux, sigma, radii[mask], fluxratios[mask],
P_orb, incs[mask], a[mask], R_s_arr[mask],
u1_arr[mask], u2_arr[mask],
eccs[mask], argps[mask],
companion_fluxratio=fluxratios_comp[mask],
companion_is_host=False
)
# q >= 0.95
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra_twin <= 1.] = np.arccos(
Ptra_twin[Ptra_twin <= 1.]
) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll_twin == False) & (qs >= 0.95)
# calculate lnL for transiting systems
R_s_arr = np.full(N, R_s)
u1_arr = np.full(N, u1)
u2_arr = np.full(N, u2)
lnL_twin[mask] = -0.5*ln2pi - lnsigma - lnL_EB_twin_p(
time, flux, sigma, radii[mask], fluxratios[mask],
2*P_orb, incs[mask], a_twin[mask], R_s_arr[mask],
u1_arr[mask], u2_arr[mask],
eccs[mask], argps[mask],
companion_fluxratio=fluxratios_comp[mask],
companion_is_host=False
)
else:
for i in range(N):
# q < 0.95
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] < 0.95) & (coll[i] == False):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_EB(
time, flux, sigma, radii[i], fluxratios[i],
P_orb, incs[i], a[i], R_s, u1, u2,
eccs[i], argps[i],
companion_fluxratio=fluxratios_comp[i],
companion_is_host=False
)
# q >= 0.95 and 2xP_orb
if Ptra_twin[i] <= 1:
inc_min = np.arccos(Ptra_twin[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] >= 0.95) & (coll_twin[i] == False):
lnL_twin[i] = -0.5*ln2pi - lnsigma - lnL_EB_twin(
time, flux, sigma, radii[i], fluxratios[i],
2*P_orb, incs[i], a_twin[i], R_s, u1, u2,
eccs[i], argps[i],
companion_fluxratio=fluxratios_comp[i],
companion_is_host=False
)
# results for q < 0.95
idx = lnL.argmax()
Z = np.mean(
np.nan_to_num(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb + 600)
)
)
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': 0,
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
# results for q >= 0.95 and 2xP_orb
idx = lnL_twin.argmax()
Z = np.mean(
np.nan_to_num(
np.exp(lnL_twin+lnprior_companion+lnprior_Mstar+lnprior_Porb+600)
)
)
lnZ = np.log(Z)
res_twin = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': 2*P_orb, 'inc': incs[idx], 'R_p': 0,
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
return res, res_twin
def lnZ_STP(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float, Z: float,
plx: float, contrast_curve_file: str = None,
filt: str = "TESS",
N: int = 1000000, parallel: bool = False,
mission: str = "TESS"):
"""
Calculates the marginal likelihood of the STP scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
plx (float): Target star parallax [mas].
contrast_curve_file (string): contrast curve file.
filt (string): Photometric filter of contrast curve. Options
are TESS, Vis, J, H, and K.
N (int): Number of draws for MC.
parallel (bool): Whether or not to simulate light curves
in parallel.
mission (str): TESS, Kepler, or K2.
Returns:
res (dict): Best-fit properties and marginal likelihood.
res_twin (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
# sample from q prior distribution
qs_comp = sample_q_companion(np.random.rand(N), M_s)
# calculate properties of the drawn companions
masses_comp = qs_comp*M_s
radii_comp, Teffs_comp = stellar_relations(
masses_comp, np.full(N, R_s), np.full(N, Teff)
)
loggs_comp = np.log10(G*(masses_comp*Msun)/(radii_comp*Rsun)**2)
# calculate flux ratios in the TESS band
fluxratios_comp = (
flux_relation(masses_comp)
/ (flux_relation(masses_comp) + flux_relation(np.array([M_s])))
)
# calculate limb darkening ceofficients for companions
if mission == "TESS":
ldc_Zs = ldc_T_Zs
ldc_Teffs = ldc_T_Teffs
ldc_loggs = ldc_T_loggs
ldc_u1s = ldc_T_u1s
ldc_u2s = ldc_T_u2s
ldc_at_Z = ldc_T[(ldc_Zs == ldc_Zs[np.abs(ldc_Zs - Z).argmin()])]
Teffs_at_Z = np.array(ldc_at_Z.Teff, dtype=int)
loggs_at_Z = np.array(ldc_at_Z.logg, dtype=float)
u1s_at_Z = np.array(ldc_at_Z.aLSM, dtype=float)
u2s_at_Z = np.array(ldc_at_Z.bLSM, dtype=float)
else:
ldc_Zs = ldc_K_Zs
ldc_Teffs = ldc_K_Teffs
ldc_loggs = ldc_K_loggs
ldc_u1s = ldc_K_u1s
ldc_u2s = ldc_K_u2s
ldc_at_Z = ldc_K[(ldc_Zs == ldc_Zs[np.abs(ldc_Zs - Z).argmin()])]
Teffs_at_Z = np.array(ldc_at_Z.Teff, dtype=int)
loggs_at_Z = np.array(ldc_at_Z.logg, dtype=float)
u1s_at_Z = np.array(ldc_at_Z.a, dtype=float)
u2s_at_Z = np.array(ldc_at_Z.b, dtype=float)
rounded_loggs_comp = np.round(loggs_comp/0.5) * 0.5
rounded_loggs_comp[rounded_loggs_comp < 3.5] = 3.5
rounded_loggs_comp[rounded_loggs_comp > 5.0] = 5.0
rounded_Teffs_comp = np.round(Teffs_comp/250) * 250
rounded_Teffs_comp[rounded_Teffs_comp < 3500] = 3500
rounded_Teffs_comp[rounded_Teffs_comp > 10000] = 10000
u1s_comp, u2s_comp = np.zeros(N), np.zeros(N)
for i, (comp_Teff, comp_logg) in enumerate(
zip(rounded_Teffs_comp, rounded_loggs_comp)
):
mask = (Teffs_at_Z == comp_Teff) & (loggs_at_Z == comp_logg)
u1s_comp[i], u2s_comp[i] = u1s_at_Z[mask], u2s_at_Z[mask]
# calculate priors for companions
if contrast_curve_file is None:
# use TESS/Vis band flux ratios
delta_mags = 2.5*np.log10(fluxratios_comp/(1-fluxratios_comp))
lnprior_companion = lnprior_bound_TP(
M_s, plx, np.abs(delta_mags),
np.array([2.2]), np.array([1.0])
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
# use flux ratio of contrast curve filter
fluxratios_comp_cc = (
flux_relation(masses_comp, filt)
/ (flux_relation(masses_comp, filt)
+ flux_relation(np.array([M_s]), filt))
)
delta_mags = 2.5*np.log10(fluxratios_comp_cc/(1-fluxratios_comp_cc))
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_bound_TP(
M_s, plx, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period planet prior for stars
# with masses masses_comp
lnprior_Mstar = lnprior_Mstar_planet(masses_comp)
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_planet(P_orb)
# sample from prior distributions
rps = sample_rp(np.random.rand(N), masses_comp)
incs = sample_inc(np.random.rand(N))
eccs = sample_ecc(np.random.rand(N), planet=True, P_orb=P_orb)
argps = sample_w(np.random.rand(N))
# calculate transit probability for each instance
e_corr = (1+eccs*np.sin(argps*pi/180))/(1-eccs**2)
a = ((G*masses_comp*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
Ptra = (rps*Rearth + radii_comp*Rsun)/a * e_corr
# find instances with collisions
coll = ((rps*Rearth + radii_comp*Rsun) > a*(1-eccs))
lnL = np.full(N, -np.inf)
if parallel:
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra <= 1.] = np.arccos(Ptra[Ptra <= 1.]) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll == False)
# calculate lnL for transiting systems
lnL[mask] = -0.5*ln2pi - lnsigma - lnL_TP_p(
time, flux, sigma, rps[mask],
P_orb, incs[mask], a[mask], radii_comp[mask],
u1s_comp[mask], u2s_comp[mask],
eccs[mask], argps[mask],
companion_fluxratio=fluxratios_comp[mask],
companion_is_host=True
)
else:
for i in range(N):
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (coll[i] == False):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_TP(
time, flux, sigma, rps[i],
P_orb, incs[i], a[i], radii_comp[i],
u1s_comp[i], u2s_comp[i],
eccs[i], argps[i],
companion_fluxratio=fluxratios_comp[i],
companion_is_host=True
)
idx = lnL.argmax()
Z = np.mean(
np.nan_to_num(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb + 600)
)
)
lnZ = np.log(Z)
res = {
'M_s': masses_comp[idx], 'R_s': radii_comp[idx],
'u1': u1s_comp[idx], 'u2': u2s_comp[idx],
'P_orb': P_orb, 'inc': incs[idx], 'R_p': rps[idx],
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': 0, 'R_EB': 0, 'fluxratio_EB': 0,
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
return res
def lnZ_SEB(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, plx: float, contrast_curve_file: str = None,
filt: str = "TESS",
N: int = 1000000, parallel: bool = False,
mission: str = "TESS"):
"""
Calculates the marginal likelihood of the SEB scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
plx (float): Target star parallax [mas].
contrast_curve_file (string): Path to contrast curve file.
filt (string): Photometric filter of contrast curve. Options
are TESS, Vis, J, H, and K.
N (int): Number of draws for MC.
parallel (bool): Whether or not to simulate light curves
in parallel.
mission (str): TESS, Kepler, or K2.
Returns:
res (dict): Best-fit properties and marginal likelihood.
res_twin (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
# sample from prior distributions
incs = sample_inc(np.random.rand(N))
qs = sample_q(np.random.rand(N), M_s)
qs_comp = sample_q_companion(np.random.rand(N), M_s)
eccs = sample_ecc(np.random.rand(N), planet=False, P_orb=P_orb)
argps = sample_w(np.random.rand(N))
# calculate properties of the drawn companions
masses_comp = qs_comp*M_s
radii_comp, Teffs_comp = stellar_relations(
masses_comp, np.full(N, R_s), np.full(N, Teff)
)
loggs_comp = np.log10(G*(masses_comp*Msun)/(radii_comp*Rsun)**2)
# calculate flux ratios in the TESS band
fluxratios_comp = (
flux_relation(masses_comp)
/ (flux_relation(masses_comp) + flux_relation(np.array([M_s])))
)
# calculate limb darkening ceofficients for companions
if mission == "TESS":
ldc_Zs = ldc_T_Zs
ldc_Teffs = ldc_T_Teffs
ldc_loggs = ldc_T_loggs
ldc_u1s = ldc_T_u1s
ldc_u2s = ldc_T_u2s
ldc_at_Z = ldc_T[(ldc_Zs == ldc_Zs[np.abs(ldc_Zs - Z).argmin()])]
Teffs_at_Z = np.array(ldc_at_Z.Teff, dtype=int)
loggs_at_Z = np.array(ldc_at_Z.logg, dtype=float)
u1s_at_Z = np.array(ldc_at_Z.aLSM, dtype=float)
u2s_at_Z = np.array(ldc_at_Z.bLSM, dtype=float)
else:
ldc_Zs = ldc_K_Zs
ldc_Teffs = ldc_K_Teffs
ldc_loggs = ldc_K_loggs
ldc_u1s = ldc_K_u1s
ldc_u2s = ldc_K_u2s
ldc_at_Z = ldc_K[(ldc_Zs == ldc_Zs[np.abs(ldc_Zs - Z).argmin()])]
Teffs_at_Z = np.array(ldc_at_Z.Teff, dtype=int)
loggs_at_Z = np.array(ldc_at_Z.logg, dtype=float)
u1s_at_Z = np.array(ldc_at_Z.a, dtype=float)
u2s_at_Z = np.array(ldc_at_Z.b, dtype=float)
rounded_loggs_comp = np.round(loggs_comp/0.5) * 0.5
rounded_loggs_comp[rounded_loggs_comp < 3.5] = 3.5
rounded_loggs_comp[rounded_loggs_comp > 5.0] = 5.0
rounded_Teffs_comp = np.round(Teffs_comp/250) * 250
rounded_Teffs_comp[rounded_Teffs_comp < 3500] = 3500
rounded_Teffs_comp[rounded_Teffs_comp > 13000] = 13000
u1s_comp, u2s_comp = np.zeros(N), np.zeros(N)
for i, (comp_Teff, comp_logg) in enumerate(
zip(rounded_Teffs_comp, rounded_loggs_comp)
):
mask = (Teffs_at_Z == comp_Teff) & (loggs_at_Z == comp_logg)
u1s_comp[i], u2s_comp[i] = u1s_at_Z[mask], u2s_at_Z[mask]
# calculate properties of the drawn EBs
masses = qs*masses_comp
radii, Teffs = stellar_relations(masses, radii_comp, Teffs_comp)
# calculate flux ratios in the TESS band
fluxratios = (
flux_relation(masses)
/ (flux_relation(masses) + flux_relation(np.array([M_s])))
)
# calculate priors for companions
if contrast_curve_file is None:
# use TESS/Vis band flux ratios
delta_mags = 2.5*np.log10(
(fluxratios_comp/(1-fluxratios_comp))
+ (fluxratios/(1-fluxratios))
)
lnprior_companion = lnprior_bound_EB(
M_s, plx, np.abs(delta_mags),
np.array([2.2]), np.array([1.0])
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
# use flux ratio of contrast curve filter
fluxratios_cc = (
flux_relation(masses, filt)
/ (flux_relation(masses, filt)
+ flux_relation(np.array([M_s]), filt))
)
fluxratios_comp_cc = (
flux_relation(masses_comp, filt)
/ (flux_relation(masses_comp, filt)
+ flux_relation(np.array([M_s]), filt))
)
delta_mags = 2.5*np.log10(
(fluxratios_comp_cc/(1-fluxratios_comp_cc))
+ (fluxratios_cc/(1-fluxratios_cc))
)
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_bound_EB(
M_s, plx, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period binary prior for stars
# with masses masses_comp
lnprior_Mstar = lnprior_Mstar_binary(masses_comp)
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_binary(P_orb)
# calculate transit probability for each instance
e_corr = (1+eccs*np.sin(argps*pi/180))/(1-eccs**2)
a = (
(G*(masses_comp+masses)*Msun)/(4*pi**2)*(P_orb*86400)**2
)**(1/3)
Ptra = (radii*Rsun + radii_comp*Rsun)/a * e_corr
a_twin = (
(G*(masses_comp+masses)*Msun)/(4*pi**2)*(2*P_orb*86400)**2
)**(1/3)
Ptra_twin = (radii*Rsun + radii_comp*Rsun)/a_twin * e_corr
# find instances with collisions
coll = ((radii*Rsun + radii_comp*Rsun) > a*(1-eccs))
coll_twin = ((2*radii_comp*Rsun) > a_twin*(1-eccs))
lnL = np.full(N, -np.inf)
lnL_twin = np.full(N, -np.inf)
if parallel:
# q < 0.95
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra <= 1.] = np.arccos(Ptra[Ptra <= 1.]) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll == False) & (qs < 0.95)
# calculate lnL for transiting systems
lnL[mask] = -0.5*ln2pi - lnsigma - lnL_EB_p(
time, flux, sigma, radii[mask], fluxratios[mask],
P_orb, incs[mask], a[mask], radii_comp[mask],
u1s_comp[mask], u2s_comp[mask],
eccs[mask], argps[mask],
companion_fluxratio=fluxratios_comp[mask],
companion_is_host=True
)
# q >= 0.95
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra_twin <= 1.] = np.arccos(
Ptra_twin[Ptra_twin <= 1.]
) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll_twin == False) & (qs >= 0.95)
# calculate lnL for transiting systems
lnL_twin[mask] = -0.5*ln2pi - lnsigma - lnL_EB_twin_p(
time, flux, sigma, radii[mask], fluxratios[mask],
2*P_orb, incs[mask], a_twin[mask], radii_comp[mask],
u1s_comp[mask], u2s_comp[mask],
eccs[mask], argps[mask],
companion_fluxratio=fluxratios_comp[mask],
companion_is_host=True
)
else:
for i in range(N):
# q < 0.95
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] < 0.95) & (coll[i] == False):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_EB(
time, flux, sigma, radii[i], fluxratios[i],
P_orb, incs[i], a[i], radii_comp[i],
u1s_comp[i], u2s_comp[i],
eccs[i], argps[i],
companion_fluxratio=fluxratios_comp[i],
companion_is_host=True
)
# q >= 0.95 and 2xP_orb
if Ptra_twin[i] <= 1:
inc_min = np.arccos(Ptra_twin[i]) * 180/pi
else:
continue
if ((incs[i] >= inc_min) & (qs[i] >= 0.95)
& (coll_twin[i] == False)):
lnL_twin[i] = -0.5*ln2pi - lnsigma - lnL_EB_twin(
time, flux, sigma, radii[i], fluxratios[i],
2*P_orb, incs[i], a_twin[i], radii_comp[i],
u1s_comp[i], u2s_comp[i],
eccs[i], argps[i],
companion_fluxratio=fluxratios_comp[i],
companion_is_host=True
)
# results for q < 0.95
idx = lnL.argmax()
Z = np.mean(
np.nan_to_num(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb + 600)
)
)
lnZ = np.log(Z)
res = {
'M_s': masses_comp[idx], 'R_s': radii_comp[idx],
'u1': u1s_comp[idx], 'u2': u2s_comp[idx],
'P_orb': P_orb, 'inc': incs[idx], 'R_p': 0,
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
# results for q >= 0.95 and 2xP_orb
idx = lnL_twin.argmax()
Z = np.mean(
np.nan_to_num(
np.exp(lnL_twin+lnprior_companion+lnprior_Mstar+lnprior_Porb+600)
)
)
lnZ = np.log(Z)
res_twin = {
'M_s': masses_comp[idx], 'R_s': radii_comp[idx],
'u1': u1s_comp[idx], 'u2': u2s_comp[idx],
'P_orb': 2*P_orb, 'inc': incs[idx], 'R_p': 0,
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
return res, res_twin
def lnZ_DTP(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, Tmag: float, Jmag: float, Hmag: float,
Kmag: float, output_url: str,
contrast_curve_file: str = None, filt: str = "TESS",
N: int = 1000000, parallel: bool = False,
mission: str = "TESS"):
"""
Calculates the marginal likelihood of the DTP scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
Tmag (float): Target star TESS magnitude.
Jmag (float): Target star J magnitude.
Hmag (float): Target star H magnitude.
Kmag (float): Target star K magnitude.
output_url (string): Link to trilegal query results.
contrast_curve_file (string): Contrast curve file.
filt (string): Photometric filter of contrast curve. Options
are TESS, Vis, J, H, and K.
N (int): Number of draws for MC.
parallel (bool): Whether or not to simulate light curves
in parallel.
mission (str): TESS, Kepler, or K2.
Returns:
res (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
a = ((G*M_s*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
if mission == "TESS":
ldc_Zs = ldc_T_Zs
ldc_Teffs = ldc_T_Teffs
ldc_loggs = ldc_T_loggs
ldc_u1s = ldc_T_u1s
ldc_u2s = ldc_T_u2s
else:
ldc_Zs = ldc_K_Zs
ldc_Teffs = ldc_K_Teffs
ldc_loggs = ldc_K_loggs
ldc_u1s = ldc_K_u1s
ldc_u2s = ldc_K_u2s
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# determine background star population properties
(Tmags_comp, masses_comp, loggs_comp, Teffs_comp, Zs_comp,
Jmags_comp, Hmags_comp, Kmags_comp) = (
trilegal_results(output_url, Tmag)
)
delta_mags = Tmag - Tmags_comp
delta_Jmags = Jmag - Jmags_comp
delta_Hmags = Hmag - Hmags_comp
delta_Kmags = Kmag - Kmags_comp
fluxratios_comp = 10**(delta_mags/2.5) / (1 + 10**(delta_mags/2.5))
N_comp = Tmags_comp.shape[0]
# draw random sample of background stars
idxs = np.random.randint(0, N_comp-1, N)
# calculate priors for companions
if contrast_curve_file is None:
# use TESS/Vis band flux ratios
delta_mags = 2.5*np.log10(
fluxratios_comp[idxs]/(1-fluxratios_comp[idxs])
)
lnprior_companion = np.full(
N, np.log10((N_comp/0.1) * (1/3600)**2 * 2.2**2)
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
if filt == "J":
delta_mags = delta_Jmags[idxs]
elif filt == "H":
delta_mags = delta_Hmags[idxs]
elif filt == "K":
delta_mags = delta_Kmags[idxs]
else:
delta_mags = delta_mags[idxs]
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_background(
N_comp, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period planet prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_planet(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_planet(P_orb)
# sample from R_p and inc prior distributions
rps = sample_rp(np.random.rand(N), np.full_like(N, M_s))
incs = sample_inc(np.random.rand(N))
eccs = sample_ecc(np.random.rand(N), planet=True, P_orb=P_orb)
argps = sample_w(np.random.rand(N))
# calculate transit probability for each instance
e_corr = (1+eccs*np.sin(argps*pi/180))/(1-eccs**2)
Ptra = (rps*Rearth + R_s*Rsun)/a * e_corr
# find instances with collisions
coll = ((rps*Rearth + R_s*Rsun) > a*(1-eccs))
lnL = np.full(N, -np.inf)
if parallel:
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra <= 1.] = np.arccos(Ptra[Ptra <= 1.]) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll == False)
# calculate lnL for transiting systems
a_arr = np.full(N, a)
R_s_arr = np.full(N, R_s)
u1_arr = np.full(N, u1)
u2_arr = np.full(N, u2)
lnL[mask] = -0.5*ln2pi - lnsigma - lnL_TP_p(
time, flux, sigma, rps[mask],
P_orb, incs[mask], a_arr[mask], R_s_arr[mask],
u1_arr[mask], u2_arr[mask],
eccs[mask], argps[mask],
companion_fluxratio=fluxratios_comp[idxs[mask]],
companion_is_host=False
)
else:
for i in range(N):
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (coll[i] == False):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_TP(
time, flux, sigma, rps[i],
P_orb, incs[i], a, R_s, u1, u2,
eccs[i], argps[i],
companion_fluxratio=fluxratios_comp[idxs[i]],
companion_is_host=False
)
idx = lnL.argmax()
Z = np.mean(
np.nan_to_num(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb + 600)
)
)
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': rps[idx],
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': 0, 'R_EB': 0, 'fluxratio_EB': 0,
'fluxratio_comp': fluxratios_comp[idxs[idx]], 'lnZ': lnZ
}
return res
def lnZ_DEB(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, Tmag: float, Jmag: float, Hmag: float,
Kmag: float, output_url: str,
contrast_curve_file: str = None, filt: str = "TESS",
N: int = 1000000, parallel: bool = False,
mission: str = "TESS"):
"""
Calculates the marginal likelihood of the DEB scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
Tmag (float): Target star TESS magnitude.
Jmag (float): Target star J magnitude.
Hmag (float): Target star H magnitude.
Kmag (float): Target star K magnitude.
output_url (string): Link to trilegal query results.
contrast_curve_file (string): Path to contrast curve file.
filt (string): Photometric filter of contrast curve. Options
are TESS, Vis, J, H, and K.
N (int): Number of draws for MC.
parallel (bool): Whether or not to simulate light curves
in parallel.
mission (str): TESS, Kepler, or K2.
Returns:
res (dict): Best-fit properties and marginal likelihood.
res_twin (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
if mission == "TESS":
ldc_Zs = ldc_T_Zs
ldc_Teffs = ldc_T_Teffs
ldc_loggs = ldc_T_loggs
ldc_u1s = ldc_T_u1s
ldc_u2s = ldc_T_u2s
else:
ldc_Zs = ldc_K_Zs
ldc_Teffs = ldc_K_Teffs
ldc_loggs = ldc_K_loggs
ldc_u1s = ldc_K_u1s
ldc_u2s = ldc_K_u2s
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# sample from inc and q prior distributions
incs = sample_inc(np.random.rand(N))
qs = sample_q(np.random.rand(N), M_s)
eccs = sample_ecc(np.random.rand(N), planet=False, P_orb=P_orb)
argps = sample_w(np.random.rand(N))
# calculate properties of the drawn EBs
masses = qs*M_s
radii, Teffs = stellar_relations(
masses, np.full(N, R_s), np.full(N, Teff)
)
# calculate flux ratios in the TESS band
fluxratios = (
flux_relation(masses)
/ (flux_relation(masses) + flux_relation(np.array([M_s])))
)
# determine background star population properties
(Tmags_comp, masses_comp, loggs_comp, Teffs_comp, Zs_comp,
Jmags_comp, Hmags_comp, Kmags_comp) = (
trilegal_results(output_url, Tmag)
)
delta_mags = Tmag - Tmags_comp
delta_Jmags = Jmag - Jmags_comp
delta_Hmags = Hmag - Hmags_comp
delta_Kmags = Kmag - Kmags_comp
fluxratios_comp = 10**(delta_mags/2.5) / (1 + 10**(delta_mags/2.5))
N_comp = Tmags_comp.shape[0]
# draw random sample of background stars
idxs = np.random.randint(0, N_comp-1, N)
# calculate priors for companions
if contrast_curve_file is None:
# use TESS/Vis band flux ratios
delta_mags = 2.5*np.log10(
fluxratios_comp[idxs]/(1-fluxratios_comp[idxs])
)
lnprior_companion = np.full(
N, np.log10((N_comp/0.1) * (1/3600)**2 * 2.2**2)
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
if filt == "J":
delta_mags = delta_Jmags[idxs]
elif filt == "H":
delta_mags = delta_Hmags[idxs]
elif filt == "K":
delta_mags = delta_Kmags[idxs]
else:
delta_mags = delta_mags[idxs]
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_background(
N_comp, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period binary prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_binary(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_binary(P_orb)
# calculate transit probability for each instance
e_corr = (1+eccs*np.sin(argps*pi/180))/(1-eccs**2)
a = ((G*(M_s+masses)*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
Ptra = (radii*Rsun + R_s*Rsun)/a * e_corr
a_twin = ((G*(M_s+masses)*Msun)/(4*pi**2)*(2*P_orb*86400)**2)**(1/3)
Ptra_twin = (radii*Rsun + R_s*Rsun)/a_twin * e_corr
# find instances with collisions
coll = ((radii*Rsun + R_s*Rsun) > a*(1-eccs))
coll_twin = ((2*R_s*Rsun) > a_twin*(1-eccs))
lnL = np.full(N, -np.inf)
lnL_twin = np.full(N, -np.inf)
if parallel:
# q < 0.95
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra <= 1.] = np.arccos(Ptra[Ptra <= 1.]) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll == False) & (qs < 0.95)
# calculate lnL for transiting systems
R_s_arr = np.full(N, R_s)
u1_arr = np.full(N, u1)
u2_arr = np.full(N, u2)
lnL[mask] = -0.5*ln2pi - lnsigma - lnL_EB_p(
time, flux, sigma, radii[mask], fluxratios[mask],
P_orb, incs[mask], a[mask], R_s_arr[mask],
u1_arr[mask], u2_arr[mask],
eccs[mask], argps[mask],
companion_fluxratio=fluxratios_comp[idxs[mask]],
companion_is_host=False
)
# q >= 0.95
# find minimum inclination each planet can have while transiting
inc_min = np.full(N, 90.)
inc_min[Ptra_twin <= 1.] = np.arccos(
Ptra_twin[Ptra_twin <= 1.]
) * 180./pi
# filter out systems that do not transit or have a collision
mask = (incs >= inc_min) & (coll_twin == False) & (qs >= 0.95)
# calculate lnL for transiting systems
R_s_arr = np.full(N, R_s)
u1_arr = np.full(N, u1)
u2_arr = np.full(N, u2)
lnL_twin[mask] = -0.5*ln2pi - lnsigma - lnL_EB_twin_p(
time, flux, sigma, radii[mask], fluxratios[mask],
2*P_orb, incs[mask], a_twin[mask], R_s_arr[mask],
u1_arr[mask], u2_arr[mask],
eccs[mask], argps[mask],
companion_fluxratio=fluxratios_comp[idxs[mask]],
companion_is_host=False
)
else:
for i in range(N):
# q < 0.95
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] < 0.95) & (coll[i] == False):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_EB(
time, flux, sigma, radii[i], fluxratios[i],
P_orb, incs[i], a[i], R_s, u1, u2,
eccs[i], argps[i],
companion_fluxratio=fluxratios_comp[idxs[i]],
companion_is_host=False
)
# q >= 0.95 and 2xP_orb
if Ptra_twin[i] <= 1:
inc_min = np.arccos(Ptra_twin[i]) * 180/pi
else:
continue
if ((incs[i] >= inc_min) & (qs[i] >= 0.95)
& (coll_twin[i] == False)):
lnL_twin[i] = -0.5*ln2pi - lnsigma - lnL_EB_twin(
time, flux, sigma, radii[i], fluxratios[i],
2*P_orb, incs[i], a_twin[i], R_s, u1, u2,
eccs[i], argps[i],
companion_fluxratio=fluxratios_comp[idxs[i]],
companion_is_host=False
)
# results for q < 0.95
idx = lnL.argmax()
Z = np.mean(
np.nan_to_num(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb + 600)
)
)
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': 0,
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idxs[idx]], 'lnZ': lnZ
}
# results for q >= 0.95 and 2xP_orb
idx = lnL_twin.argmax()
Z = np.mean(
np.nan_to_num(
np.exp(lnL_twin+lnprior_companion+lnprior_Mstar+lnprior_Porb+600)
)
)
lnZ = np.log(Z)
res_twin = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': 2*P_orb, 'inc': incs[idx], 'R_p': 0,
'ecc': eccs[idx], 'argp': argps[idx],
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idxs[idx]], 'lnZ': lnZ
}
return res, res_twin
def lnZ_BTP(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Tmag: float, Jmag: float, Hmag: float, Kmag: float,
output_url: str,
contrast_curve_file: str = None, filt: str = "TESS",
N: int = 1000000, parallel: bool = False,
mission: str = "TESS"):
"""
Calculates the marginal likelihood of the BTP scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Tmag (float): Target star TESS magnitude.
Jmag (float): Target star J magnitude.
Hmag (float): Target star H magnitude.
Kmag (float): Target star K magnitude.
output_url (string): Link to trilegal query results.
contrast_curve_file (string): Path to contrast curve file.
filt (string): Photometric filter of contrast curve. Options
are TESS, Vis, J, H, and K.
N (int): Number of draws for MC.
parallel (bool): Whether or not to simulate light curves
in parallel.
mission (str): TESS, Kepler, or K2.
Returns:
res (dict): Best-fit properties and marginal likelihood.
"""
lnsigma =
|
np.log(sigma)
|
numpy.log
|
# coding: utf-8
########### an plotting function given by professor #########
import pdb
import pylab as pl
import numpy as np
def plotDecisionBoundary(X, Y, scoreFn, values, title = ""):
# Plot the decision boundary. For that, we will asign a score to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
h = max((x_max-x_min)/200., (y_max-y_min)/200.)
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
zz = np.array([scoreFn(x) for x in np.c_[xx.ravel(), yy.ravel()]])
zz = zz.reshape(xx.shape)
pl.figure()
CS = pl.contour(xx, yy, zz, values, colors = 'green', linestyles = 'solid', linewidths = 2)
pl.clabel(CS, fontsize=9, inline=1)
# Plot the training points
pl.scatter(X[:, 0], X[:, 1], c=(1.-Y), s=50, cmap = pl.cm.cool)
pl.title(title)
pl.axis('tight')
########### 1. LOGISTIC REGRESSION ###########
from scipy.optimize import fmin_bfgs # the optimizer we tried in PA1
def get_path_pair(name):
return 'data/data_{}_train.csv'.format(name), 'data/data_{}_validate.csv'.format(name)
dataset_names = ('ls', 'nls', 'nonlin')
def get_xy(path):
# given data format being x1, x2, y
D = np.loadtxt(path)
return D[:,0:2], D[:,2:3]
class logistic_classifier():
def Wx(self, W, x):
return W[0] + np.dot(W[1:],x)
def set_init_W(self, x_length):
self.W = np.ones(x_length + 1)
def train(self, X, Y, lmbda):
Y = Y.reshape(len(X))
# uniformed initial guess, does not matter much though
self.set_init_W(X.shape[1])
# negative log likelyhood function as our loss
NLL = lambda W: sum(np.log(1 + np.exp(- y * self.Wx(W,x))) for x,y in zip(X,Y)) + lmbda * np.linalg.norm(W[1:])**2
# optimize NLL and save the weights
self.W = fmin_bfgs(NLL, self.W, disp=0)
def score(self, x):
return 1/(1 + np.exp(- self.Wx(self.W, x)))
def predit(self, x):
return 1 if self.score(x) > 0.5 else -1
def train(model, dataset, norm_penalty):
X, Y = get_xy(dataset)
clf = model()
clf.train(X, Y, norm_penalty)
return clf
def validate(clf, dataset_v):
X, Y = get_xy(dataset_v)
Y_estimate = np.array([clf.predit(x) for x in X])
acuracy = sum(Y_estimate == Y.reshape(len(Y))) / len(Y)
return acuracy
def get_accuary(model, dataset_name, norm_penalty):
t, v = get_path_pair(dataset_name)
clf = train(model, t, norm_penalty)
acuracy_t = validate(clf, t)
acuracy_v = validate(clf, v)
print('validate: {} acuracy being {}:{} percent under norm penalty {}.'.format(dataset_name, acuracy_t*100, acuracy_v*100, norm_penalty))
return acuracy_t, acuracy_v
def plot_logistic_regression_boundaries(model, dataset_name, norm_penalty, show=1):
t, v = get_path_pair(dataset_name)
clf = model()
X_t, Y_t = get_xy(t)
clf.train(X_t, Y_t, norm_penalty)
title = '{} λ={} Train'.format(dataset_name.upper(),norm_penalty)
plotDecisionBoundary(X_t, Y_t, clf.score, [0.5], title=title)
if show:
pl.show()
# else:
# pl.savefig(title)
X_v, Y_v = get_xy(v)
title = '{} λ={} Validate'.format(dataset_name.upper(),norm_penalty)
plotDecisionBoundary(X_v, Y_v, clf.score, [0.5], title=title)
if show:
pl.show()
# else:
# pl.savefig(title)
############## 1.3 Polynomial feature expansion #############
# since we already have the non-expanded classifier
# we can simply expand X before train and expand u before perdict
from itertools import combinations as C
class logistic_classifier_expanded(logistic_classifier):
def expand(self, x):
expanded = np.fromiter(map(lambda x:x[0]*x[1], C(x, 2)), float)
return np.concatenate([[1], x, expanded])
def set_init_W(self, x_length):
self.W = self.expand(np.ones(x_length))
def Wx(self, W, x):
return np.dot(W, self.expand(x))
# for n in dataset_names:
# for p in [0,.1,.2,.5,.9,1,3,10,100]:
# get_accuary(logistic_classifier_expanded, n, p)
# plot_logistic_regression_boundaries(logistic_classifier_expanded,n, p,0)
def tune(model, dataset_name):
lambda_range = np.arange(0, 15, .1)
acc_range = [get_accuary(model, dataset_name, p) for p in lambda_range]
acc_range_t = [a[0] for a in acc_range]
acc_range_v = [a[1] for a in acc_range]
pl.plot(lambda_range,acc_range_t)
pl.plot(lambda_range,acc_range_v)
pl.xlabel('λ')
pl.ylabel('Accuracy')
pl.legend(['train', 'validate'], loc='upper right')
pl.savefig(dataset_name)
pl.show()
# Code for generating graphs
# for n in dataset_names:
# tune(logistic_classifier, n)
# for n in dataset_names:
# tune(logistic_classifier_expanded, n)
################# 2. SVM ##################
class svm_primal:
def train(self, X, Y, K, C):
assert (len(X) == len(Y))
n, m = X.shape
# don't have kernal, just ignore the parameter K
# parameter theta being optimized being concat(b, w, slack).T
# formating constraints accordingly
P =
|
np.zeros((1+n+m, 1+n+m))
|
numpy.zeros
|
import abc
from inspect import isclass
from math import acos, asin, atan2, cos, degrees, pi, radians, sin, sqrt
import numpy as np
from matplotlib import patheffects
from matplotlib.collections import LineCollection
from matplotlib.mlab import griddata
from matplotlib.patches import Circle
# http://treyhunner.com/2016/02/how-to-merge-dictionaries-in-python/
try:
from collections import ChainMap
except ImportError:
from itertools import chain
def ChainMap(*args):
return dict(chain(*map(lambda d: d.items(), reversed(args))))
def dcos_line(trend_plunge):
tr, pl = np.transpose(np.radians(trend_plunge)) # trend, plunge
return np.array(
(np.cos(pl) * np.sin(tr), np.cos(pl) * np.cos(tr), -np.sin(pl))
).T
def sphere_line(dcos_data):
x, y, z = np.transpose(dcos_data)
sign_z = np.where(z > 0, -1, 1)
z = np.clip(z, -1.0, 1.0)
return np.array(
(
np.degrees(np.arctan2(sign_z * x, sign_z * y)) % 360,
np.degrees(np.arcsin(np.abs(z))),
)
).T
def normalized_cross(a, b):
c = np.cross(a, b)
length = sqrt(c.dot(c))
return c / length if length > 0 else c
def build_rotation_matrix(azim, plng, rake):
azim, plng, rake = radians(azim), radians(plng), radians(rake)
R1 = np.array(
(
(cos(rake), 0.0, sin(rake)),
(0.0, 1.0, 0.0),
(-sin(rake), 0.0, cos(rake)),
)
)
R2 = np.array(
(
(1.0, 0.0, 0.0),
(0.0, cos(plng), sin(plng)),
(0.0, -sin(plng), cos(plng)),
)
)
R3 = np.array(
(
(cos(azim), sin(azim), 0.0),
(-sin(azim), cos(azim), 0.0),
(0.0, 0.0, 1.0),
)
)
return R3.dot(R2).dot(R1)
def fit_girdle(data):
direction_tensor = np.dot(np.transpose(data), data)
eigenvalues, eigenvectors = np.linalg.eigh(direction_tensor)
axis = Vector(eigenvectors[:, eigenvalues.argmin()])
return axis/axis.length
def fit_small_circle(data):
eigenvalues, eigenvectors = np.linalg.eigh(np.cov(data, rowvar=False))
axis = Vector(eigenvectors[:, eigenvalues.argmin()])
return axis/axis.length
class Vector(np.ndarray):
def __new__(cls, dcos_data):
return np.asarray(dcos_data).view(cls)
def angle_with(self, other, precise=False):
if not precise:
self_length = self.length
other_length = sqrt(other.dot(other))
return acos(np.clip(self.dot(other) / (self_length * other_length), -1, 1))
else:
return atan2(self.cross_with(other), self.dot(other))
def cross_with(self, other):
return Vector(np.cross(self, other))
def normalized_cross_with(self, other):
return Vector(normalized_cross(self, other))
@property
def attitude(self):
x, y, z = self / self.length
if z > 0:
x, y = -x, -y
return degrees(atan2(x, y)) % 360, degrees(asin(abs(z)))
@property # this should be cached
def length(self):
return sqrt(self.dot(self))
@property
def direction_vector(self):
if abs(self[2]) == 1.0:
return Vector((1.0, 0.0, 0.0))
direction = Vector((self[1], -self[0], 0.0))
return direction / direction.length
@property
def dip_vector(self):
return Vector(np.cross(self / self.length, self.direction_vector))
@property
def projection_matrix(self):
return np.outer(self, self)
@property
def rejection_matrix(self):
return np.eye(3) - self.projection_matrix
@property
def cross_product_matrix(self):
return np.array(
(
(0.0, -self[2], self[1]),
(self[2], 0.0, -self[0]),
(-self[1], self[0], 0.0),
)
)
def get_rotation_matrix(self, theta):
return (
cos(theta) * np.eye(3)
+ sin(theta) * self.cross_product_matrix
+ (1 - cos(theta)) * self.projection_matrix
)
def get_great_circle(self, step=radians(1.0), offset=0.0):
theta_range = np.arange(offset, 2 * pi + offset, step) % (2 * pi)
sin_range = np.sin(theta_range)
cos_range = np.cos(theta_range)
return (
(
self.direction_vector[:, None] * cos_range
+ self.dip_vector[:, None] * sin_range
).T,
)
def get_small_circle(self, alpha, A=0, B=0, step=radians(1.0), offset=0.0):
if A == 0 and B == 0:
sc = self.get_great_circle(step, offset)[0].T * sin(alpha) + self[
:, None
] * cos(alpha)
else:
theta_range = np.arange(0, 2 * pi, step)
alpha_ = (
alpha
+ A * np.cos(2 * theta_range)
+ B * np.sin(2 * theta_range)
)
sc = self.get_great_circle(step)[0].T * np.sin(alpha_) + self[
:, None
] * np.cos(alpha_)
return sc.T, -sc.T
def arc_to(self, other, step=radians(1.0)):
normal = self.rejection_matrix.dot(other)
normal /= sqrt(normal.dot(normal))
theta_range = np.arange(0, self.angle_with(other), step)
sin_range = np.sin(theta_range)
cos_range = np.cos(theta_range)
return ((self * cos_range[:, None] + normal * sin_range[:, None]),)
@staticmethod
def from_attitude(trend, plunge):
return Vector(dcos_line((trend, plunge)))
class VectorSet(np.ndarray):
"""Class that represents a set (collection) of Vectors.
Parameters:
dcos_data: Is an array of direction cosines.
"""
item_class = Vector
def __new__(cls, dcos_data):
obj = np.asarray(dcos_data).view(cls)
return obj
def __finalize_array__(self, obj):
if obj is None:
return
def __getitem__(self, x):
item = super(VectorSet, self).__getitem__(x)
if np.atleast_2d(item).shape == (1, 3):
return item.view(self.item_class)
else:
return item
# @property
# def stats(self):
# """Contains spherical statistics object for the data
# set.
# """
# return SphericalStatistics(self)
@property
def attitude(self):
"""Converts this data from direction cosines to attitudes."""
return sphere_line(self)
# def count_fisher(self, k=None, grid=None):
# """Performs grid counting of the data by Fisher smoothing.
# Parameters:
# k: von Mises-Fisher k parameter, see
# stats.SphericalGrid.count_fisher.
# grid: A stats.Spherical grid object to count on. If None
# the default grid defined on stats.DEFAULT_GRID will be
# used.
# """
# if grid is None:
# grid = DEFAULT_GRID
# return grid.count_fisher(self, k)
# def count_kamb(self, theta=None, grid=None):
# """Performs grid counting of the data by small circles of
# aperture theta.
# Parameters:
# theta: <NAME> (1986) based on Kamb (1956) theta
# parameter, see stats.SphericalGrid.count_kamb.
# grid: A stats.Spherical grid object to count on. If None
# the default grid defined on stats.DEFAULT_GRID will be
# used.
# """
# if grid is None:
# grid = DEFAULT_GRID
# return grid.count_kamb(self, theta)
def normalized_cross_with(self, other):
"""Returns a VectorSet object containing the normalized cross
product of all possible pairs between this VectorSet and an
(n, 3) array-like
Parameter:
other: A VectorSet like object.
"""
vectors = np.zeros((len(self) * len(other), 3))
i = 0
for self_vector in self:
for other_vector in other:
cross = normalized_cross(self_vector, other_vector)
vectors[i] = cross if cross[2] < 0 else -cross
i += 1
return VectorSet(vectors)
def angle_with(self, other, precise=False):
"""Returns the angles matrix between this Spherical Data and an
(n, 3) array-like.
Parameter:
other: A VectorSet like object.
precise: whether to use arccosine or arctangent (defaults False)
"""
angles = np.zeros((len(self), len(other)))
for i, self_vector in enumerate(self):
for j, other_vector in enumerate(other):
angles[i, j] = self_vector.angle_with(other_vector, precise)
return angles
def get_great_circle(self, step=radians(1.0)):
"""Returns a generator to the list of great circles of
this VectorSet vectors.
Parameters:
step: Angular step in radians to generate points around great
circle.
"""
for vector in self:
yield vector.get_great_circle(step)[0] # because of plot_circles
class ProjectionBase(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def _dtr(self, x, y, z):
raise Exception()
@abc.abstractmethod
def _itr(self, X, Y):
raise Exception()
def __init__(self, rotation=None):
# Maybe check if rotation is already a rotation matrix, and
# sets it directly if so. Then a matrix generated by a
# Vector could be used here.
self.rotation = rotation
if rotation is not None:
self.R = build_rotation_matrix(*rotation)
self.I = np.linalg.inv(self.R)
else:
self.R = self.I = np.eye(3)
def _pre_direct(self, data, invert_positive, rotate):
if rotate and self.rotation is not None:
x, y, z = self.R.dot(np.transpose(data))
else:
x, y, z = np.transpose(data)
d = 1.0 / np.sqrt(x * x + y * y + z * z)
if invert_positive:
c = np.where(z > 0, -1, 1) * d
return c * x, c * y, c * z
else:
return d * x, d * y, d * z
def _post_inverse(self, data, rotate):
if rotate and self.rotation is not None:
return np.transpose(self.I.dot(data))
else:
return np.transpose(data)
def direct(self, data, invert_positive=True, rotate=True):
return self._dtr(*self._pre_direct(data, invert_positive, rotate))
def inverse(self, data, rotate=True):
return self._post_inverse(self._itr(*np.transpose(data)), rotate)
class EqualAngle(ProjectionBase):
def _dtr(self, x, y, z):
"""equal-angle (stereographic) projection.
Projects a point from the unit sphere to a plane using
stereographic projection"""
return x / (1 - z), y / (1 - z)
def _itr(self, X, Y):
"""inverse equal-angle (stereographic) projection.
Inverts the projection of a point from the unit sphere
to a plane using stereographic projection"""
x = 2.0 * X / (1.0 + X * X + Y * Y)
y = 2.0 * Y / (1.0 + X * X + Y * Y)
z = (-1.0 + X * X + Y * Y) / (1.0 + X * X + Y * Y)
return x, y, z
class Orthographic(ProjectionBase):
def _dtr(self, x, y, z):
"""orthographic projection on z=0 plane."""
return x, y
def _itr(self, X, Y):
"""Inverse orthographic projection from z=0 plane to unit sphere."""
x, y = X, Y
z = np.sqrt(1 - x * x - y * y)
return x, y, z
class EqualArea(ProjectionBase):
def _dtr(self, x, y, z):
"""equal-area (schmidt-lambert) projection.
Projects a point from the unit sphere to a plane using
lambert equal-area projection, though shrinking the projected
sphere radius to 1 from sqrt(2)."""
return x * np.sqrt(1 / (1 - z)), y * np.sqrt(1 / (1 - z))
def _itr(self, X, Y):
"""inverse equal-area (schmidt-lambert) projection.
Inverts the projection of a point from the unit sphere
to a plane using lambert equal-area projection, cosidering
that the projected radius of the sphere was shrunk to 1 from
sqrt(2)."""
X, Y = X * sqrt(2), Y * sqrt(2)
x = np.sqrt(1 - (X * X + Y * Y) / 4.0) * X
y = np.sqrt(1 - (X * X + Y * Y) / 4.0) * Y
z = -1.0 + (X * X + Y * Y) / 2
return x, y, z
class ProjectionPlot(object):
point_defaults = {"marker": "o", "c": "#000000", "ms": 3.0}
line_defaults = {"linewidths": 0.8, "colors": "#4D4D4D", "linestyles": "-"}
polygon_defaults = {
"linewidths": 0.8,
"edgecolors": "#4D4D4D",
"facecolors": "#FF8000",
}
contour_defaults = {"cmap": "Reds", "linestyles": "-", "antialiased": True}
arrow_defaults = {"lw": 1.0, "ls": "-"}
net_gc_defaults = {
"linewidths": 0.25,
"colors": "#808080",
"linestyles": "-",
}
net_sc_defaults = {
"linewidths": 0.25,
"colors": "#808080",
"linestyles": "-",
}
text_defaults = {
"family": "sans-serif",
"size": "x-small",
"horizontalalignment": "center",
}
@staticmethod
def _clip_lines(data, z_tol=0.1):
"""segment point pairs between inside and outside of primitive, for
avoiding spurious lines when plotting circles."""
z = np.transpose(data)[2]
inside = z < z_tol
results = []
current = []
for i, is_inside in enumerate(inside):
if is_inside:
current.append(data[i])
elif current:
results.append(current)
current = []
if current:
results.append(current)
return results
@staticmethod
def _join_segments(segments, c_tol=radians(1.0)):
"""segment point pairs between inside and outside of primitive, for
avoiding spurious lines when plotting circles."""
all_joined = False
while not all_joined and len(segments) > 1:
all_joined = True
segment = segments.pop(0)
if abs(segment[-1].angle_with(segments[0][0])) < c_tol:
segment.extend(segments.pop(0))
all_joined = False
elif abs(segment[0].angle_with(segments[0][-1])) < c_tol:
segment_b = segments.pop(0)
segment_b.extend(segment)
segment = segment_b
all_joined = False
elif abs(segment[-1].angle_with(segments[0][-1])) < c_tol:
segment.extend(reversed(segments.pop(0)))
all_joined = False
elif abs(segment[0].angle_with(segments[0][0])) < c_tol:
segment_b = segments.pop(0)
segment_b.extend(reversed(segment))
segment = segment_b
all_joined = False
segments.append(segment)
return segments
# @staticmethod
# def _close_polygon(projected_polygon):
# print(projected_polygon.shape)
# first = projected_polygon[0]
# last = projected_polygon[-1]
# mid = (first + last) / 2
# mid = mid / np.linalg.norm(mid)
# if np.dot(first, last) == 0.0:
# mid = np.array([first[1], -first[0]])
# if np.linalg.norm(first) > 1.0 and np.linalg.norm(last) > 1.0:
# return np.vstack(
# [projected_polygon, [2 * last, 3 * mid, 2 * first]]
# )
# return projected_polygon
@staticmethod
def _net_grid(gc_spacing=10.0, sc_spacing=10.0, n=360, clean_caps=True):
theta = np.linspace(0.0, 2 * pi, n)
gc_spacing, sc_spacing = radians(gc_spacing), radians(sc_spacing)
if clean_caps:
theta_gc = np.linspace(0.0 + sc_spacing, pi - sc_spacing, n)
else:
theta_gc = np.linspace(0.0, pi, n)
gc_range = np.arange(0.0, pi + gc_spacing, gc_spacing)
gc_range = np.hstack((gc_range, -gc_range))
sc_range = np.arange(0.0, pi + sc_spacing, sc_spacing)
i, j, k = np.eye(3)
ik_circle = i[:, None] * np.sin(theta) + k[:, None] * np.cos(theta)
great_circles = [
(
np.array((cos(alpha), 0.0, -sin(alpha)))[:, None]
* np.sin(theta_gc)
+ j[:, None] * np.cos(theta_gc)
).T
for alpha in gc_range
]
small_circles = [
(ik_circle * sin(alpha) + j[:, None] * cos(alpha)).T
for alpha in sc_range
]
if clean_caps:
for cap in (0, pi):
theta_gc = np.linspace(cap - sc_spacing, cap + sc_spacing, n)
great_circles += [
(
np.array((cos(alpha), 0.0, -sin(alpha)))[:, None]
*
|
np.sin(theta_gc)
|
numpy.sin
|
"""
The goal of this script is to train a TD3 RL algorithm on the random deformation
task and compare the cumulative rewards to the ones gathered by alternative
discretization strategies.
For this, do the following
1. Definitions and imports
2. Train with stable baselines
3. Apply alternative methods
4. Summarize and plot results
"""
"""
1. Definitions and imports
"""
# i) Import basics and custom environment
import numpy as np
import time
from scipy.optimize import basinhopping
import class_random_def_2D_env as def_2D
# ii) Import stable baselines
from stable_baselines3 import TD3
from stable_baselines3.common.env_checker import check_env
# iii) Initialize and check
np.random.seed(0)
def_2D_env=def_2D.Env()
def_2D_env.reset()
check_env(def_2D_env)
"""
2. Train with stable baselines
"""
# i) Train a TD3 Model
# start_time=time.time()
# model = TD3("MlpPolicy", def_2D_env,verbose=1, seed=0)
# model.learn(total_timesteps=100000)
# end_time=time.time()
# model.save('./Saved_models/trained_benchmark_random_def_2D')
model=TD3.load('./Saved_models/trained_benchmark_random_def_2D')
"""
3. Apply alternative methods
"""
# Note: All actions are in [-1,1]x[-1,1] and get mapped to [0,1]x[0,1] by
# the environment translating input actions from the symmetric box space
# [-1,1]x[-1,1] to indices
# i) Grid based sampling
def grid_based_sampling(environment):
grid_x1=np.kron(np.array([-1/3, 1/3, 1]),np.array([1, 1, 1]))
grid_x2=np.kron(np.array([1, 1, 1]), np.array([-1/3, 1/3, 1]))
grid=np.vstack((grid_x1, grid_x2))
action=grid[:,environment.epoch]
return action
# ii) Pseudo random sampling
def pseudo_random_sampling(environment):
Halton_sequence=np.array([[1/2, 1/4, 3/4, 1/8, 5/8, 3/8, 7/8, 1/16, 9/17,
3/16],[1/3, 2/3, 1/9, 4/9, 7/9, 2/9, 5/9, 8/9, 1/27, 10/27]])*2-np.ones([2,10])
action=Halton_sequence[:, environment.epoch]
return action
# iii) Random sampling
def random_sampling(environment):
action=np.random.uniform(-1,1,[2])
return action
# iv) Numerical integration
def quadrature_sampling(environment):
Gauss_points_x1=np.kron(np.array([-0.77, 0, 0.77]),np.array([1, 1, 1]))
Gauss_points_x2=np.kron(np.array([1, 1, 1]),np.array([-0.77, 0, 0.77]))
Gauss_points=np.vstack((Gauss_points_x1, Gauss_points_x2))
action=Gauss_points[:, environment.epoch]
return action
# v) Experiment design based sampling
n_average=1000
fun_table=np.zeros([n_average,(def_2D_env.x_max[0]*def_2D_env.x_max[1]).astype(int).item()])
for k in range(n_average):
def_2D_env.reset()
fun_table[k,:]=def_2D_env.fun.flatten()
def loss_fun(x_vec):
x_vec=np.reshape(x_vec, [2,9])
index_vec=np.zeros([2,def_2D_env.n_meas])
lin_ind_vec=np.zeros(def_2D_env.n_meas)
for k in range(def_2D_env.n_meas):
index_vec[:,k]=def_2D_env.round_to_index(x_vec[:,k]*0.5+0.5)
lin_ind_vec[k]=np.ravel_multi_index((index_vec[0,k].astype(int), index_vec[1,k].astype(int)),
[def_2D_env.x_max[0].item(),def_2D_env.x_max[1].item()])
f_max=np.max(fun_table,axis=1)
f_obs_mat=fun_table[:,lin_ind_vec.astype(int)]
f_obs_max=np.max(f_obs_mat,axis=1)
loss_vec=np.abs(f_obs_max-f_max)
loss_val=
|
np.mean(loss_vec)
|
numpy.mean
|
"""
Provides unit tests for sampler diagnostics.
"""
# License: MIT
from __future__ import absolute_import, division
import arviz as az
import numpy as np
import pytest
import scipy.optimize as so
import scipy.sparse as sa
import scipy.special as sp
import scipy.stats as ss
import reanalysis_dbns.models as rdm
from reanalysis_dbns.models.sampler_diagnostics import (
_count_model_transitions, _estimate_dirichlet_shape_parameters,
_get_chisq_class_lookup, _invert_digamma,
_relabel_unobserved_markov_chain_states,
_sample_stationary_distributions)
def test_rjmcmc_rhat_single_model():
"""Test calculation of convergence diagnostics for RJMCMC."""
random_seed = 0
rng = np.random.default_rng(random_seed)
n_chains = 4
n_iter = 10
k = np.zeros((n_chains, n_iter))
theta = rng.uniform(size=(n_chains, n_iter))
rhat_result = rdm.rjmcmc_rhat(k, theta, split=True)
expected_rhat = az.rhat(
az.convert_to_dataset(theta[:, :, np.newaxis]),
method='split')['x'].data
assert rhat_result['PSRF1'].shape == expected_rhat.shape
assert np.allclose(rhat_result['PSRF1'], expected_rhat)
model_indicators = np.array([2])
with pytest.raises(ValueError):
rhat_result = rdm.rjmcmc_rhat(
k, theta, model_indicators=model_indicators, split=True)
n_parameters = 3
theta = rng.uniform(size=(n_chains, n_iter, n_parameters))
rhat_result = rdm.rjmcmc_rhat(k, theta, split=True)
expected_rhat = az.rhat(
az.convert_to_dataset(theta), method='split')['x'].data
assert rhat_result['PSRF1'].shape == expected_rhat.shape
assert np.allclose(rhat_result['PSRF1'], expected_rhat)
def test_rjmcmc_rhat_single_parameter_no_split(): # noqa: C901
"""Test RJMCMC with a single parameter."""
random_seed = 0
rng = np.random.default_rng(random_seed)
n_chains = 4
n_iter = 10
k = np.zeros((n_chains, n_iter))
k[:, 5:] = 1
theta = rng.uniform(size=(n_chains, n_iter))
rhat_result = rdm.rjmcmc_rhat(k, theta, split=False)
present_models = np.unique(k)
n_models = np.size(present_models)
theta_bar_cm = np.zeros((n_chains, n_models))
theta_bar_c = np.zeros((n_chains,))
theta_bar_m = np.zeros((n_models,))
theta_bar = np.atleast_1d(np.mean(theta))
for c in range(n_chains):
theta_bar_c[c] = np.mean(theta[c])
for m in range(n_models):
mask = k[c] == present_models[m]
theta_bar_m[m] += np.sum(theta[c, mask])
theta_bar_cm[c, m] = np.mean(theta[c, mask])
for m in range(n_models):
Rm = np.sum(k == present_models[m])
theta_bar_m[m] = theta_bar_m[m] / Rm
Vhat = np.zeros((1,))
Wc = np.zeros((1,))
Wm = np.zeros((1,))
WmWc = np.zeros((1,))
for c in range(n_chains):
for m in range(n_models):
mask = k[c] == present_models[m]
Vhat[0] += np.sum((theta[c, mask] - theta_bar)**2)
Wc[0] += np.sum((theta[c, mask] - theta_bar_c[c])**2)
Wm[0] += np.sum((theta[c, mask] - theta_bar_m[m])**2)
WmWc[0] += np.sum((theta[c, mask] - theta_bar_cm[c, m])**2)
Vhat /= (n_chains * k.shape[1] - 1.0)
Wc /= (n_chains * (k.shape[1] - 1.0))
Wm /= (n_chains * k.shape[1] - n_models)
WmWc /= (n_chains * (k.shape[1] - n_models))
expected_psrf1 = Vhat / Wc
expected_psrf2 = Wm / WmWc
assert np.allclose(rhat_result['Vhat'], Vhat)
assert np.allclose(rhat_result['Wc'], Wc)
assert np.allclose(rhat_result['Wm'], Wm)
assert np.allclose(rhat_result['WmWc'], WmWc)
assert np.allclose(rhat_result['PSRF1'], expected_psrf1)
assert np.allclose(rhat_result['PSRF2'], expected_psrf2)
k = np.zeros((n_chains, n_iter))
k[:, ::2] = 1
theta = rng.uniform(size=(n_chains, n_iter))
rhat_result = rdm.rjmcmc_rhat(k, theta, split=False)
n_models = np.size(np.unique(k))
theta_bar_cm = np.zeros((n_chains, n_models))
theta_bar_c = np.zeros((n_chains,))
theta_bar_m = np.zeros((n_models,))
theta_bar = np.atleast_1d(np.mean(theta))
for c in range(n_chains):
theta_bar_c[c] = np.mean(theta[c])
for m in range(n_models):
mask = k[c] == m
theta_bar_m[m] += np.sum(theta[c, mask])
theta_bar_cm[c, m] = np.mean(theta[c, mask])
for m in range(n_models):
Rm = np.sum(k == m)
theta_bar_m[m] = theta_bar_m[m] / Rm
Vhat = np.zeros((1,))
Wc = np.zeros((1,))
Wm = np.zeros((1,))
WmWc = np.zeros((1,))
for c in range(n_chains):
for m in range(n_models):
mask = k[c] == m
Vhat[0] += np.sum((theta[c, mask] - theta_bar)**2)
Wc[0] += np.sum((theta[c, mask] - theta_bar_c[c])**2)
Wm[0] += np.sum((theta[c, mask] - theta_bar_m[m])**2)
WmWc[0] += np.sum((theta[c, mask] - theta_bar_cm[c, m])**2)
Vhat /= (n_chains * k.shape[1] - 1.0)
Wc /= (n_chains * (k.shape[1] - 1.0))
Wm /= (n_chains * k.shape[1] - n_models)
WmWc /= (n_chains * (k.shape[1] - n_models))
expected_psrf1 = Vhat / Wc
expected_psrf2 = Wm / WmWc
assert np.allclose(rhat_result['Vhat'], Vhat)
assert np.allclose(rhat_result['Wc'], Wc)
assert np.allclose(rhat_result['Wm'], Wm)
assert np.allclose(rhat_result['WmWc'], WmWc)
assert np.allclose(rhat_result['PSRF1'], expected_psrf1)
assert np.allclose(rhat_result['PSRF2'], expected_psrf2)
def test_rjmcmc_rhat_single_parameter_split(): # noqa: C901
"""Test RJMCMC with a single parameter."""
random_seed = 0
rng = np.random.default_rng(random_seed)
n_chains = 4
n_iter = 20
initial_k = np.zeros((n_chains, n_iter))
initial_k[:, ::2] = 1
initial_k[:, ::3] = 2
initial_theta = rng.uniform(size=(n_chains, n_iter))
rhat_result = rdm.rjmcmc_rhat(initial_k, initial_theta, split=True)
n_models = np.size(np.unique(initial_k))
k = np.zeros((2 * n_chains, n_iter // 2))
theta = np.zeros((2 * n_chains, n_iter // 2))
for i in range(n_chains):
k[2 * i] = initial_k[i, :n_iter // 2]
k[2 * i + 1] = initial_k[i, n_iter // 2:]
theta[2 * i] = initial_theta[i, :n_iter // 2]
theta[2 * i + 1] = initial_theta[i, n_iter // 2:]
theta_bar_cm = np.zeros((2 * n_chains, n_models))
theta_bar_c = np.zeros((2 * n_chains,))
theta_bar_m = np.zeros((n_models,))
theta_bar = np.atleast_1d(np.mean(theta))
for c in range(2 * n_chains):
theta_bar_c[c] = np.mean(theta[c])
for m in range(n_models):
mask = k[c] == m
theta_bar_m[m] += np.sum(theta[c, mask])
theta_bar_cm[c, m] = np.mean(theta[c, mask])
for m in range(n_models):
Rm = np.sum(k == m)
theta_bar_m[m] = theta_bar_m[m] / Rm
Vhat = np.zeros((1,))
Wc = np.zeros((1,))
Wm = np.zeros((1,))
WmWc = np.zeros((1,))
for c in range(2 * n_chains):
for m in range(n_models):
mask = k[c] == m
Vhat[0] += np.sum((theta[c, mask] - theta_bar)**2)
Wc[0] += np.sum((theta[c, mask] - theta_bar_c[c])**2)
Wm[0] += np.sum((theta[c, mask] - theta_bar_m[m])**2)
WmWc[0] += np.sum((theta[c, mask] - theta_bar_cm[c, m])**2)
Vhat /= (2 * n_chains * k.shape[1] - 1.0)
Wc /= (2 * n_chains * (k.shape[1] - 1.0))
Wm /= (2 * n_chains * k.shape[1] - n_models)
WmWc /= (2 * n_chains * (k.shape[1] - n_models))
expected_psrf1 = Vhat / Wc
expected_psrf2 = Wm / WmWc
assert np.allclose(rhat_result['Vhat'], Vhat)
assert np.allclose(rhat_result['Wc'], Wc)
assert np.allclose(rhat_result['Wm'], Wm)
assert np.allclose(rhat_result['WmWc'], WmWc)
assert np.allclose(rhat_result['PSRF1'], expected_psrf1)
assert np.allclose(rhat_result['PSRF2'], expected_psrf2)
n_chains = 3
n_iter = 21
initial_k = np.zeros((n_chains, n_iter))
initial_k[:, ::2] = 1
initial_k[:, ::3] = 2
initial_theta = rng.uniform(size=(n_chains, n_iter))
rhat_result = rdm.rjmcmc_rhat(initial_k, initial_theta, split=True)
n_models = np.size(np.unique(initial_k))
k = np.zeros((2 * n_chains, (n_iter - 1) // 2))
theta = np.zeros((2 * n_chains, (n_iter - 1) // 2))
for i in range(n_chains):
k[2 * i] = initial_k[i, 1:(n_iter - 1) // 2 + 1]
k[2 * i + 1] = initial_k[i, 1 + (n_iter - 1) // 2:]
theta[2 * i] = initial_theta[i, 1:(n_iter - 1) // 2 + 1]
theta[2 * i + 1] = initial_theta[i, 1 + (n_iter - 1) // 2:]
theta_bar_cm = np.zeros((2 * n_chains, n_models))
theta_bar_c = np.zeros((2 * n_chains,))
theta_bar_m = np.zeros((n_models,))
theta_bar = np.atleast_1d(np.mean(theta))
for c in range(2 * n_chains):
theta_bar_c[c] = np.mean(theta[c])
for m in range(n_models):
mask = k[c] == m
theta_bar_m[m] += np.sum(theta[c, mask])
theta_bar_cm[c, m] = np.mean(theta[c, mask])
for m in range(n_models):
Rm = np.sum(k == m)
theta_bar_m[m] = theta_bar_m[m] / Rm
Vhat = np.zeros((1,))
Wc = np.zeros((1,))
Wm = np.zeros((1,))
WmWc = np.zeros((1,))
for c in range(2 * n_chains):
for m in range(n_models):
mask = k[c] == m
Vhat[0] += np.sum((theta[c, mask] - theta_bar)**2)
Wc[0] += np.sum((theta[c, mask] - theta_bar_c[c])**2)
Wm[0] += np.sum((theta[c, mask] - theta_bar_m[m])**2)
WmWc[0] += np.sum((theta[c, mask] - theta_bar_cm[c, m])**2)
Vhat /= (2 * n_chains * k.shape[1] - 1.0)
Wc /= (2 * n_chains * (k.shape[1] - 1.0))
Wm /= (2 * n_chains * k.shape[1] - n_models)
WmWc /= (2 * n_chains * (k.shape[1] - n_models))
expected_psrf1 = Vhat / Wc
expected_psrf2 = Wm / WmWc
assert np.allclose(rhat_result['Vhat'], Vhat)
assert np.allclose(rhat_result['Wc'], Wc)
assert np.allclose(rhat_result['Wm'], Wm)
assert np.allclose(rhat_result['WmWc'], WmWc)
assert np.allclose(rhat_result['PSRF1'], expected_psrf1)
assert np.allclose(rhat_result['PSRF2'], expected_psrf2)
def test_rjmcmc_rhat_multiple_parameter_no_split(): # noqa: C901
"""Test RJMCMC with multiple parameters."""
random_seed = 0
rng = np.random.default_rng(random_seed)
n_chains = 7
n_iter = 21
n_parameters = 4
initial_k = np.zeros((n_chains, n_iter))
initial_k[:, ::2] = 1
initial_theta = rng.uniform(size=(n_chains, n_iter, n_parameters))
rhat_result = rdm.rjmcmc_rhat(initial_k, initial_theta, split=False)
n_models = np.size(np.unique(initial_k))
k = initial_k
theta = initial_theta
theta_bar_cm = np.zeros((n_chains, n_models, n_parameters))
theta_bar_c = np.zeros((n_chains, n_parameters))
theta_bar_m = np.zeros((n_models, n_parameters))
theta_bar = np.zeros((n_parameters,))
for c in range(n_chains):
for m in range(n_models):
mask = k[c] == m
for i in range(n_parameters):
theta_bar_c[c, i] = np.mean(theta[c, :, i])
theta_bar[i] += np.sum(theta[c, mask, i])
theta_bar_m[m, i] += np.sum(theta[c, mask, i])
theta_bar_cm[c, m, i] = np.mean(theta[c, mask, i])
theta_bar /= (theta.shape[0] * theta.shape[1])
for m in range(n_models):
Rm = np.sum(k == m)
theta_bar_m[m] = theta_bar_m[m] / Rm
Vhat = np.zeros((n_parameters,))
Wc = np.zeros((n_parameters,))
Wm = np.zeros((n_parameters,))
WmWc = np.zeros((n_parameters,))
for c in range(n_chains):
for m in range(n_models):
mask = k[c] == m
for i in range(n_parameters):
Vhat[i] += np.sum((theta[c, mask, i] - theta_bar[i])**2)
Wc[i] += np.sum((theta[c, mask, i] - theta_bar_c[c, i])**2)
Wm[i] += np.sum((theta[c, mask, i] - theta_bar_m[m, i])**2)
WmWc[i] += np.sum(
(theta[c, mask, i] - theta_bar_cm[c, m, i])**2)
Vhat /= (n_chains * k.shape[1] - 1.0)
Wc /= (n_chains * (k.shape[1] - 1.0))
Wm /= (n_chains * k.shape[1] - n_models)
WmWc /= (n_chains * (k.shape[1] - n_models))
expected_psrf1 = Vhat / Wc
expected_psrf2 = Wm / WmWc
assert np.allclose(np.diag(rhat_result['Vhat']), Vhat)
assert np.allclose(np.diag(rhat_result['Wc']), Wc)
assert np.allclose(np.diag(rhat_result['Wm']), Wm)
assert np.allclose(np.diag(rhat_result['WmWc']), WmWc)
assert np.allclose(rhat_result['PSRF1'], expected_psrf1)
assert np.allclose(rhat_result['PSRF2'], expected_psrf2)
Vhat = np.zeros((n_parameters, n_parameters))
Wc = np.zeros((n_parameters, n_parameters))
Wm = np.zeros((n_parameters, n_parameters))
WmWc = np.zeros((n_parameters, n_parameters))
for i in range(n_parameters):
for j in range(n_parameters):
for c in range(n_chains):
for m in range(n_models):
mask = k[c] == m
Vhat[i, j] += np.sum(
(theta[c, mask, i] - theta_bar[i]) *
(theta[c, mask, j] - theta_bar[j]))
Wc[i, j] += np.sum(
(theta[c, mask, i] - theta_bar_c[c, i]) *
(theta[c, mask, j] - theta_bar_c[c, j]))
Wm[i, j] += np.sum(
(theta[c, mask, i] - theta_bar_m[m, i]) *
(theta[c, mask, j] - theta_bar_m[m, i]))
WmWc[i, j] += np.sum(
(theta[c, mask, i] - theta_bar_cm[c, m, i]) *
(theta[c, mask, j] - theta_bar_cm[c, m, j]))
Vhat /= (n_chains * k.shape[1] - 1.0)
Wc /= (n_chains * (k.shape[1] - 1.0))
Wm /= (n_chains * k.shape[1] - n_models)
WmWc /= (n_chains * (k.shape[1] - n_models))
expected_mpsrf1 = np.max(
np.linalg.eigvals(np.dot(np.linalg.inv(Wc), Vhat)))
expected_mpsrf2 = np.max(
np.linalg.eigvals(np.dot(np.linalg.inv(WmWc), Wm)))
assert np.allclose(rhat_result['Vhat'], Vhat)
assert np.allclose(rhat_result['Wc'], Wc)
assert np.allclose(rhat_result['Wm'], Wm)
assert np.allclose(rhat_result['WmWc'], WmWc)
assert np.allclose(rhat_result['MPSRF1'], expected_mpsrf1)
assert np.allclose(rhat_result['MPSRF2'], expected_mpsrf2)
def test_rjmcmc_rhat_multiple_parameter_split(): # noqa: C901
"""Test RJMCMC with multiple parameters."""
random_seed = 0
rng = np.random.default_rng(random_seed)
n_chains = 5
n_iter = 20
n_parameters = 5
initial_k = np.zeros((n_chains, n_iter))
initial_k[:, ::2] = 1
initial_k[:, ::3] = 2
initial_theta = rng.uniform(size=(n_chains, n_iter, n_parameters))
rhat_result = rdm.rjmcmc_rhat(initial_k, initial_theta, split=True)
n_models = np.size(np.unique(initial_k))
k = np.zeros((2 * n_chains, n_iter // 2))
theta = np.zeros((2 * n_chains, n_iter // 2, n_parameters))
for i in range(n_chains):
k[2 * i] = initial_k[i, :n_iter // 2]
k[2 * i + 1] = initial_k[i, n_iter // 2:]
theta[2 * i] = initial_theta[i, :n_iter // 2, :]
theta[2 * i + 1] = initial_theta[i, n_iter // 2:, :]
theta_bar_cm = np.zeros((2 * n_chains, n_models, n_parameters))
theta_bar_c = np.zeros((2 * n_chains, n_parameters))
theta_bar_m = np.zeros((n_models, n_parameters))
theta_bar = np.zeros((n_parameters,))
for c in range(2 * n_chains):
for m in range(n_models):
mask = k[c] == m
for i in range(n_parameters):
theta_bar_c[c, i] = np.mean(theta[c, :, i])
theta_bar[i] += np.sum(theta[c, mask, i])
theta_bar_m[m, i] += np.sum(theta[c, mask, i])
theta_bar_cm[c, m, i] = np.mean(theta[c, mask, i])
theta_bar /= (theta.shape[0] * theta.shape[1])
for m in range(n_models):
Rm = np.sum(k == m)
theta_bar_m[m] = theta_bar_m[m] / Rm
Vhat = np.zeros((n_parameters,))
Wc = np.zeros((n_parameters,))
Wm = np.zeros((n_parameters,))
WmWc = np.zeros((n_parameters,))
for c in range(2 * n_chains):
for m in range(n_models):
mask = k[c] == m
for i in range(n_parameters):
Vhat[i] += np.sum((theta[c, mask, i] - theta_bar[i])**2)
Wc[i] += np.sum((theta[c, mask, i] - theta_bar_c[c, i])**2)
Wm[i] += np.sum((theta[c, mask, i] - theta_bar_m[m, i])**2)
WmWc[i] += np.sum(
(theta[c, mask, i] - theta_bar_cm[c, m, i])**2)
Vhat /= (2 * n_chains * k.shape[1] - 1.0)
Wc /= (2 * n_chains * (k.shape[1] - 1.0))
Wm /= (2 * n_chains * k.shape[1] - n_models)
WmWc /= (2 * n_chains * (k.shape[1] - n_models))
expected_psrf1 = Vhat / Wc
expected_psrf2 = Wm / WmWc
assert np.allclose(np.diag(rhat_result['Vhat']), Vhat)
assert np.allclose(np.diag(rhat_result['Wc']), Wc)
assert np.allclose(np.diag(rhat_result['Wm']), Wm)
assert np.allclose(np.diag(rhat_result['WmWc']), WmWc)
assert np.allclose(rhat_result['PSRF1'], expected_psrf1)
assert np.allclose(rhat_result['PSRF2'], expected_psrf2)
Vhat = np.zeros((n_parameters, n_parameters))
Wc = np.zeros((n_parameters, n_parameters))
Wm = np.zeros((n_parameters, n_parameters))
WmWc = np.zeros((n_parameters, n_parameters))
for i in range(n_parameters):
for j in range(n_parameters):
for c in range(2 * n_chains):
for m in range(n_models):
mask = k[c] == m
Vhat[i, j] += np.sum(
(theta[c, mask, i] - theta_bar[i]) *
(theta[c, mask, j] - theta_bar[j]))
Wc[i, j] += np.sum(
(theta[c, mask, i] - theta_bar_c[c, i]) *
(theta[c, mask, j] - theta_bar_c[c, j]))
Wm[i, j] += np.sum(
(theta[c, mask, i] - theta_bar_m[m, i]) *
(theta[c, mask, j] - theta_bar_m[m, i]))
WmWc[i, j] += np.sum(
(theta[c, mask, i] - theta_bar_cm[c, m, i]) *
(theta[c, mask, j] - theta_bar_cm[c, m, j]))
Vhat /= (2 * n_chains * k.shape[1] - 1.0)
Wc /= (2 * n_chains * (k.shape[1] - 1.0))
Wm /= (2 * n_chains * k.shape[1] - n_models)
WmWc /= (2 * n_chains * (k.shape[1] - n_models))
expected_mpsrf1 = np.max(
np.linalg.eigvals(np.dot(np.linalg.inv(Wc), Vhat)))
expected_mpsrf2 = np.max(
np.linalg.eigvals(np.dot(np.linalg.inv(WmWc), Wm)))
assert np.allclose(rhat_result['Vhat'], Vhat)
assert np.allclose(rhat_result['Wc'], Wc)
assert np.allclose(rhat_result['Wm'], Wm)
assert np.allclose(rhat_result['WmWc'], WmWc)
assert np.allclose(rhat_result['MPSRF1'], expected_mpsrf1)
assert np.allclose(rhat_result['MPSRF2'], expected_mpsrf2)
n_chains = 5
n_iter = 37
n_parameters = 5
initial_k = np.zeros((n_chains, n_iter))
initial_k[:, ::2] = 1
initial_k[:, ::3] = 2
initial_k[:, ::5] = 3
initial_theta = rng.uniform(size=(n_chains, n_iter, n_parameters))
rhat_result = rdm.rjmcmc_rhat(initial_k, initial_theta, split=True)
n_models = np.size(np.unique(initial_k))
k = np.zeros((2 * n_chains, (n_iter - 1) // 2))
theta = np.zeros((2 * n_chains, (n_iter - 1) // 2, n_parameters))
for i in range(n_chains):
k[2 * i] = initial_k[i, 1:(n_iter - 1) // 2 + 1]
k[2 * i + 1] = initial_k[i, (n_iter - 1) // 2 + 1:]
theta[2 * i] = initial_theta[i, 1:(n_iter - 1) // 2 + 1, :]
theta[2 * i + 1] = initial_theta[i, 1 + (n_iter - 1) // 2:, :]
theta_bar_cm = np.zeros((2 * n_chains, n_models, n_parameters))
theta_bar_c = np.zeros((2 * n_chains, n_parameters))
theta_bar_m = np.zeros((n_models, n_parameters))
theta_bar = np.zeros((n_parameters,))
for c in range(2 * n_chains):
for m in range(n_models):
mask = k[c] == m
for i in range(n_parameters):
theta_bar_c[c, i] = np.mean(theta[c, :, i])
theta_bar[i] += np.sum(theta[c, mask, i])
theta_bar_m[m, i] += np.sum(theta[c, mask, i])
theta_bar_cm[c, m, i] = np.mean(theta[c, mask, i])
theta_bar /= (theta.shape[0] * theta.shape[1])
for m in range(n_models):
Rm = np.sum(k == m)
theta_bar_m[m] = theta_bar_m[m] / Rm
Vhat = np.zeros((n_parameters,))
Wc = np.zeros((n_parameters,))
Wm = np.zeros((n_parameters,))
WmWc = np.zeros((n_parameters,))
for c in range(2 * n_chains):
for m in range(n_models):
mask = k[c] == m
for i in range(n_parameters):
Vhat[i] += np.sum((theta[c, mask, i] - theta_bar[i])**2)
Wc[i] += np.sum((theta[c, mask, i] - theta_bar_c[c, i])**2)
Wm[i] += np.sum((theta[c, mask, i] - theta_bar_m[m, i])**2)
WmWc[i] += np.sum(
(theta[c, mask, i] - theta_bar_cm[c, m, i])**2)
Vhat /= (2 * n_chains * k.shape[1] - 1.0)
Wc /= (2 * n_chains * (k.shape[1] - 1.0))
Wm /= (2 * n_chains * k.shape[1] - n_models)
WmWc /= (2 * n_chains * (k.shape[1] - n_models))
expected_psrf1 = Vhat / Wc
expected_psrf2 = Wm / WmWc
assert np.allclose(np.diag(rhat_result['Vhat']), Vhat)
assert np.allclose(np.diag(rhat_result['Wc']), Wc)
assert np.allclose(np.diag(rhat_result['Wm']), Wm)
assert np.allclose(np.diag(rhat_result['WmWc']), WmWc)
assert np.allclose(rhat_result['PSRF1'], expected_psrf1)
assert np.allclose(rhat_result['PSRF2'], expected_psrf2)
Vhat = np.zeros((n_parameters, n_parameters))
Wc = np.zeros((n_parameters, n_parameters))
Wm = np.zeros((n_parameters, n_parameters))
WmWc = np.zeros((n_parameters, n_parameters))
for i in range(n_parameters):
for j in range(n_parameters):
for c in range(2 * n_chains):
for m in range(n_models):
mask = k[c] == m
Vhat[i, j] += np.sum(
(theta[c, mask, i] - theta_bar[i]) *
(theta[c, mask, j] - theta_bar[j]))
Wc[i, j] += np.sum(
(theta[c, mask, i] - theta_bar_c[c, i]) *
(theta[c, mask, j] - theta_bar_c[c, j]))
Wm[i, j] += np.sum(
(theta[c, mask, i] - theta_bar_m[m, i]) *
(theta[c, mask, j] - theta_bar_m[m, i]))
WmWc[i, j] += np.sum(
(theta[c, mask, i] - theta_bar_cm[c, m, i]) *
(theta[c, mask, j] - theta_bar_cm[c, m, j]))
Vhat /= (2 * n_chains * k.shape[1] - 1.0)
Wc /= (2 * n_chains * (k.shape[1] - 1.0))
Wm /= (2 * n_chains * k.shape[1] - n_models)
WmWc /= (2 * n_chains * (k.shape[1] - n_models))
expected_mpsrf1 = np.max(
np.linalg.eigvals(np.dot(np.linalg.inv(Wc), Vhat)))
expected_mpsrf2 = np.max(
np.linalg.eigvals(np.dot(np.linalg.inv(WmWc), Wm)))
assert np.allclose(rhat_result['Vhat'], Vhat)
assert np.allclose(rhat_result['Wc'], Wc)
assert np.allclose(rhat_result['Wm'], Wm)
assert np.allclose(rhat_result['WmWc'], WmWc)
assert np.allclose(rhat_result['MPSRF1'], expected_mpsrf1)
assert np.allclose(rhat_result['MPSRF2'], expected_mpsrf2)
def test_invert_digamma():
"""Test inversion of digamma function."""
tolerance = 1e-12
x = 1.0
y = sp.digamma(x)
x_est = _invert_digamma(y, tolerance=tolerance)
assert np.abs(x - x_est) < tolerance
x = 4.3531
y = sp.digamma(x)
x_est = _invert_digamma(y, tolerance=tolerance)
assert np.abs(x - x_est) < tolerance
x = 0.2
y = sp.digamma(x)
x_est = _invert_digamma(y, tolerance=tolerance)
assert np.abs(x - x_est) < tolerance
x = np.ones(())
y = sp.digamma(x)
x_est = _invert_digamma(y, tolerance=tolerance)
assert np.all(np.abs(x - x_est) < tolerance)
x = np.ones(3)
y = sp.digamma(x)
x_est = _invert_digamma(y, tolerance=tolerance)
assert np.all(np.abs(x - x_est) < tolerance)
x = np.random.uniform(0.6, 10.0, size=(20,))
y = sp.digamma(x)
x_est = _invert_digamma(y, tolerance=tolerance)
assert np.all(np.abs(x - x_est) < tolerance)
x = np.random.uniform(0.0001, 0.6, size=(30,))
y = sp.digamma(x)
x_est = _invert_digamma(y, tolerance=tolerance)
assert np.all(np.abs(x - x_est) < tolerance)
x = np.ones((5, 5))
y = sp.digamma(x)
x_est = _invert_digamma(y, tolerance=tolerance)
assert np.all(np.abs(x - x_est) < tolerance)
x = np.random.uniform(0.6, 10.0, size=(20, 5, 11))
y = sp.digamma(x)
x_est = _invert_digamma(y, tolerance=tolerance)
assert np.all(np.abs(x - x_est) < tolerance)
x = np.random.uniform(0.0001, 0.6, size=(30, 20, 4, 8))
y = sp.digamma(x)
x_est = _invert_digamma(y, tolerance=tolerance)
assert np.all(np.abs(x - x_est) < tolerance)
def test_estimate_dirichlet_shape_parameters():
"""Test maximum likelihood fit of Dirichlet distribution."""
random_seed = 0
random_state = np.random.default_rng(random_seed)
def dirichlet_log_likelihood(alpha, p):
return np.sum([ss.dirichlet.logpdf(pi, alpha) for pi in p])
n_features = 3
n_samples = 500
alpha = np.ones(n_features)
p = ss.dirichlet.rvs(alpha, size=(n_samples,), random_state=random_state)
alpha_hat = _estimate_dirichlet_shape_parameters(p)
alpha_0 = random_state.uniform(0.0, 2.0, size=(n_features,))
def _objective_one(x):
return -dirichlet_log_likelihood(x, p)
tol = 1e-6
bounds = so.Bounds(0, np.inf)
sol = so.minimize(_objective_one, alpha_0, bounds=bounds,
method='trust-constr', options={'xtol': tol})
expected_alpha_hat = sol.x
assert np.allclose(alpha_hat, expected_alpha_hat, atol=tol)
n_features = 6
n_samples = 250
alpha = random_state.uniform(0.5, 4.0, size=(n_features,))
p = ss.dirichlet.rvs(alpha, size=(n_samples,), random_state=random_state)
alpha_hat = _estimate_dirichlet_shape_parameters(p)
alpha_0 = random_state.uniform(0.0, 2.0, size=(n_features,))
def _objective_two(x):
return -dirichlet_log_likelihood(x, p)
tol = 1e-6
bounds = so.Bounds(0, np.inf)
sol = so.minimize(_objective_two, alpha_0, bounds=bounds,
method='trust-constr', options={'xtol': tol})
expected_alpha_hat = sol.x
assert np.allclose(alpha_hat, expected_alpha_hat, atol=tol)
def test_count_model_transitions():
"""Test counting model transitions."""
k = np.array([[0, 0, 1, 2, 1, 1, 0]], dtype='i8')
n = _count_model_transitions(k)
expected_n = np.array([[[1, 1, 0], [1, 1, 1], [0, 1, 0]]])
assert np.all(n == expected_n)
n = _count_model_transitions(k, sparse=True)
expected_n = np.array([[[1, 1, 0], [1, 1, 1], [0, 1, 0]]])
assert np.all(n.toarray() == expected_n)
model_indicators = np.array([0, 1, 2, 3])
n = _count_model_transitions(k, model_indicators=model_indicators)
expected_n = np.array([[[1, 1, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]]])
assert np.all(n == expected_n)
n = _count_model_transitions(k, model_indicators=model_indicators,
sparse=True)
expected_n = np.array([[[1, 1, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]]])
assert np.all(n.toarray() == expected_n)
def test_sample_stationary_distributions():
"""Test sampling of stationary distributions."""
random_seed = 0
rng = np.random.default_rng(random_seed)
P = np.array([[0.2, 0.8], [0.5, 0.5]])
exact_pi = np.array([5.0 / 13.0, 8.0 / 13.0])
assert np.allclose(np.dot(exact_pi, P), exact_pi)
assert np.abs(np.sum(exact_pi) - 1.0) < 1e-10
n_chains = 4
n_iter = 500
k = np.empty((n_chains, n_iter), dtype=np.uint64)
for i in range(n_chains):
ki = np.empty((2 * n_iter), dtype=np.uint64)
ki[0] = rng.choice(2)
for t in range(1, 2 * n_iter):
p = P[ki[t - 1]]
ki[t] = rng.choice(2, p=p)
k[i] = ki[-n_iter:]
n_samples = 1000
samples, epsilon = _sample_stationary_distributions(
k, epsilon=0.0, n_samples=n_samples)
assert samples.shape == (n_samples, 2)
assert np.allclose(np.sum(samples, axis=1), 1.0)
assert np.allclose(epsilon, np.array([0., 0.]))
assert np.allclose(np.mean(samples, axis=0), exact_pi, atol=5e-2)
model_indicators = np.array([0, 1, 2])
samples, epsilon = _sample_stationary_distributions(
k, epsilon=1e-6, n_samples=n_samples,
model_indicators=model_indicators)
assert samples.shape == (n_samples, 3)
assert np.allclose(np.sum(samples, axis=1), 1.0)
assert np.allclose(epsilon, np.array([1e-6, 1e-6, 1e-6]))
assert np.allclose(np.mean(samples[:, :2], axis=0), exact_pi, atol=5e-2)
k2 = k.copy()
for i in range(n_chains):
k2[i, k2[i] == 1] = 2
model_indicators = np.array([0, 1, 2])
samples, epsilon = _sample_stationary_distributions(
k2, epsilon=1e-6, n_samples=n_samples,
model_indicators=model_indicators)
assert samples.shape == (n_samples, 3)
assert np.allclose(np.sum(samples, axis=1), 1.0)
assert np.allclose(epsilon, np.array([1e-6, 1e-6, 1e-6]))
assert np.allclose(np.mean(samples, axis=0)[[0, 2]], exact_pi, atol=5e-2)
samples, epsilon = _sample_stationary_distributions(
k, n_samples=n_samples)
assert samples.shape == (n_samples, 2)
assert np.allclose(np.sum(samples, axis=1), 1.0)
assert np.allclose(epsilon, np.array([0.5, 0.5]))
assert np.allclose(np.mean(samples, axis=0), exact_pi, atol=5e-2)
model_indicators = np.array([0, 1, 2])
samples, epsilon = _sample_stationary_distributions(
k, n_samples=n_samples, model_indicators=model_indicators)
assert samples.shape == (n_samples, 3)
assert np.allclose(np.sum(samples, axis=1), 1.0)
assert np.allclose(epsilon, np.array([0.5, 0.5, 1e-10]))
assert np.allclose(np.mean(samples[:, :2], axis=0), exact_pi, atol=5e-2)
samples, epsilon = _sample_stationary_distributions(
k2, n_samples=n_samples, model_indicators=model_indicators)
assert samples.shape == (n_samples, 3)
assert np.allclose(np.sum(samples, axis=1), 1.0)
assert np.allclose(epsilon, np.array([0.5, 1e-10, 0.5]))
assert np.allclose(np.mean(samples, axis=0)[[0, 2]], exact_pi, atol=5e-2)
def test_estimate_stationary_distributions():
"""Test estimation of stationary distributions and ESS calculation."""
random_seed = 0
rng = np.random.default_rng(random_seed)
n_chains = 4
n_iter = 1000
n_models = 3
k = np.empty((n_chains, n_iter), dtype=np.uint64)
for i in range(n_chains):
for t in range(n_iter):
k[i, t] = rng.choice(n_models)
n_samples = 10000
result = rdm.estimate_stationary_distribution(
k, tolerance=1e-3, model_indicators=np.arange(3),
min_epsilon=1e-10, n_samples=n_samples, random_state=random_seed)
assert result['pi'].shape == (n_samples, 3)
row_sums = np.sum(result['pi'], axis=1)
mask = np.isfinite(row_sums)
assert np.allclose(row_sums[mask], 1.0)
assert np.abs(result['ess'] - n_chains * n_iter) < 200
assert result['n_models'] == 3
assert result['n_observed_models'] == 3
n_samples = 10000
result = rdm.estimate_stationary_distribution(
k, tolerance=1e-3, model_indicators=np.arange(4),
min_epsilon=1e-10, n_samples=n_samples, random_state=random_seed)
assert result['pi'].shape == (n_samples, 4)
row_sums = np.sum(result['pi'], axis=1)
mask = np.isfinite(row_sums)
assert np.allclose(row_sums[mask], 1.0)
assert np.abs(result['ess'] - n_chains * n_iter) < 200
assert result['n_models'] == 4
assert result['n_observed_models'] == 3
P = np.array([[0.2, 0.8], [0.5, 0.5]])
exact_pi = np.array([5.0 / 13.0, 8.0 / 13.0])
assert np.allclose(np.dot(exact_pi, P), exact_pi)
assert np.abs(np.sum(exact_pi) - 1.0) < 1e-10
n_chains = 4
n_iter = 1000
k = np.empty((n_chains, n_iter), dtype=np.uint64)
for i in range(n_chains):
ki = np.empty((2 * n_iter), dtype=np.uint64)
ki[0] = rng.choice(2)
for t in range(1, 2 * n_iter):
p = P[ki[t - 1]]
ki[t] = rng.choice(2, p=p)
k[i] = ki[-n_iter:]
n_samples = 10000
result = rdm.estimate_stationary_distribution(
k, n_samples=n_samples, tolerance=1e-3, min_epsilon=1e-10)
assert result['pi'].shape == (n_samples, 2)
row_sums = np.sum(result['pi'], axis=1)
mask = np.isfinite(row_sums)
assert np.allclose(row_sums[mask], 1.0)
# for chains with significant anti-correlation, ESS should be
# larger than IID samples
assert result['ess'] > n_chains * n_iter
assert result['n_models'] == 2
assert result['n_observed_models'] == 2
P = np.array([[0.8, 0.2], [0.7, 0.3]])
exact_pi = np.array([7.0 / 9.0, 2.0 / 9.0])
assert np.allclose(np.dot(exact_pi, P), exact_pi)
assert np.abs(np.sum(exact_pi) - 1.0) < 1e-10
n_chains = 4
n_iter = 1000
k = np.empty((n_chains, n_iter), dtype=np.uint64)
for i in range(n_chains):
ki = np.empty((2 * n_iter), dtype=np.uint64)
ki[0] = rng.choice(2)
for t in range(1, 2 * n_iter):
p = P[ki[t - 1]]
ki[t] = rng.choice(2, p=p)
k[i] = ki[-n_iter:]
n_samples = 10000
result = rdm.estimate_stationary_distribution(
k, n_samples=n_samples, sparse=True, tolerance=1e-3,
min_epsilon=1e-10)
assert result['pi'].shape == (n_samples, 2)
row_sums = np.sum(result['pi'], axis=1)
mask = np.isfinite(row_sums)
assert np.allclose(row_sums[mask], 1.0)
# for chains with significant autocorrelation, ESS should be
# smaller than IID samples
assert result['ess'] < n_chains * n_iter
assert result['n_models'] == 2
assert result['n_observed_models'] == 2
result = rdm.estimate_stationary_distribution(
k, model_indicators=[0, 1, 2],
n_samples=n_samples, sparse=True, tolerance=1e-3,
min_epsilon=1e-10)
assert result['pi'].shape == (n_samples, 3)
row_sums = np.sum(result['pi'], axis=1)
mask = np.isfinite(row_sums)
assert
|
np.allclose(row_sums[mask], 1.0)
|
numpy.allclose
|
from __future__ import print_function
# from network import VGGNet
from dirtorch.utils import common
import dirtorch.nets as nets
import pandas as pd
import faiss
import torch
import torchvision.transforms as transforms
import torch.nn as nn
from six.moves import cPickle
import numpy as np
import imageio
import os
import time
from PIL import Image
from evaluate import evaluate_class
from DB import Database
def load_model(path, iscuda):
checkpoint = common.load_checkpoint(path, iscuda)
net = nets.create_model(pretrained="", **checkpoint['model_options'])
net = common.switch_model_to_cuda(net, iscuda, checkpoint)
net.load_state_dict(checkpoint['state_dict'])
net.preprocess = checkpoint.get('preprocess', net.preprocess)
# if 'pca' in checkpoint:
# net.pca = checkpoint.get('pca')
return net
# use_gpu = torch.cuda.is_available()
# torch.cuda.set_device(2)
use_gpu = False
# cache dir
cache_dir = '..\\cache'
Odic_addr = 'res101_AP_GeM-oxf-dict'
Ovec_addr = 'res101_AP_GeM-oxf-vec'
Oindex_addr = 'res101_AP_GeM-oxf-indexIPQ'
Ddic_addr = 'res101_AP_GeM-database-dict'
Dvec_addr = 'res101_AP_GeM-database-vec'
Dindex_addr = 'res101_AP_GeM-database-indexIPQ'
depth = 10
isOxford = True
# LOAD_MODEL_PATH = None
# LOAD_MODEL_PATH = '../model/imagenet-caffe-vgg16-features-d369c8e.pth'
# LOAD_MODEL_PATH = '../model/imagenet-caffe-resnet101-features-10a101d.pth'
# LOAD_WHITEN_PATH = '../model/retrieval-SfM-120k-resnet101-gem-whiten-22ab0c1.pth'
CHECKPOINT = "../model/Resnet-101-AP-GeM.pt"
IMAGE_NORMALIZER = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
REMOVE_FC = False
class ModelFeat(object):
@staticmethod
def make_samples(db, mode, is_Oxford=isOxford, verbose=True):
if is_Oxford:
dic_addr = Odic_addr
vec_addr = Odic_addr
index_addr = Oindex_addr
else:
dic_addr = Ddic_addr
vec_addr = Ddic_addr
index_addr = Dindex_addr
try:
dicbase = cPickle.load(open(os.path.join(cache_dir, dic_addr), "rb", True))
# print(dicbase)
vecbase = cPickle.load(open(os.path.join(cache_dir, vec_addr), "rb", True))
index = faiss.read_index(os.path.join(cache_dir, index_addr))
if verbose:
print("Using cache..., config=%s, depth=%s" % (vec_addr, depth))
except:
if verbose:
print("Counting histogram..., config=%s, depth=%s" % (vec_addr, depth))
# base_model = VGGNet(load_features_path=LOAD_MODEL_PATH, requires_grad=False)
# base_model = Res101(load_features_path=LOAD_MODEL_PATH,
# use_Gem_whiten=True, load_whiten_path=LOAD_WHITEN_PATH)
# base_model =
base_model = load_model(CHECKPOINT, False)
base_model.eval()
print("load successfully!")
if REMOVE_FC:
base_model = nn.Sequential(*list(base_model.children())[:-1])
print("Remove FC")
if use_gpu:
base_model = base_model.cuda()
vecbase = []
dicbase = []
data = db.get_data()
count = 1
for d in data.itertuples():
# if count == 5:
# break
d_img, d_cls = getattr(d, "img"), getattr(d, "cls")
img = imageio.imread(d_img, pilmode="RGB")
img = Image.fromarray(img)
img = IMAGE_NORMALIZER(img)
img = np.array(img)
img = np.expand_dims(img, axis=0)
if use_gpu:
inputs = torch.autograd.Variable(torch.from_numpy(img).cuda().float())
else:
inputs = torch.from_numpy(img)
d_hist = base_model(inputs).view(-1, )
d_hist = d_hist.data.cpu().numpy()
vecbase.append(d_hist)
dicbase.append((d_cls, d_img))
print(count)
count += 1
vecbase =
|
np.array(vecbase)
|
numpy.array
|
from lsnn.guillaume_toolbox.tensorflow_einsums.einsum_re_written import einsum_bij_jk_to_bik, einsum_bi_ijk_to_bjk
# import matplotlib
# matplotlib.use('Agg')
import datetime
import os
import socket
from time import time
import matplotlib.pyplot as plt
from matplotlib import collections as mc, patches
import numpy as np
import numpy.random as rd
import tensorflow as tf
from lsnn.guillaume_toolbox.file_saver_dumper_no_h5py import save_file
from lsnn.guillaume_toolbox.matplotlib_extension import strip_right_top_axis, raster_plot
from tutorial_storerecall_utils import generate_storerecall_data, error_rate, gen_custom_delay_batch
from lsnn.guillaume_toolbox.tensorflow_utils import tf_downsample, tf_roll
from lsnn.spiking_models import tf_cell_to_savable_dict, placeholder_container_for_rnn_state, \
feed_dict_with_placeholder_container, exp_convolve, ALIF, ALIFStateTuple, weight_matrix_with_delay_dimension, LIF
from lsnn.guillaume_toolbox.rewiring_tools import weight_sampler, rewiring_optimizer_wrapper
script_name = os.path.basename(__file__)[:-3]
result_folder = 'results/' + script_name + '/'
FLAGS = tf.app.flags.FLAGS
start_time = datetime.datetime.now()
##
tf.app.flags.DEFINE_string('model', 'ALIF', 'Chosen the network model')
tf.app.flags.DEFINE_string('comment', '', 'comment to retrieve the stored results')
##
tf.app.flags.DEFINE_integer('batch_train', 128, 'batch size fo the validation set')
tf.app.flags.DEFINE_integer('batch_val', 128, 'batch size of the validation set')
tf.app.flags.DEFINE_integer('batch_test', 128, 'batch size of the testing set')
tf.app.flags.DEFINE_integer('n_charac', 2, 'number of characters in the recall task')
tf.app.flags.DEFINE_integer('n_in', 100, 'number of input units.')
tf.app.flags.DEFINE_integer('n_rec', 0, 'number of recurrent units.')
tf.app.flags.DEFINE_integer('n_con', 2, 'number of controller units')
tf.app.flags.DEFINE_integer('f0', 50, 'input firing rate')
tf.app.flags.DEFINE_integer('reg_rate', 10, 'target rate for regularization')
tf.app.flags.DEFINE_integer('reg_max_rate', 100, 'target rate for regularization')
tf.app.flags.DEFINE_integer('n_iter', 3, 'number of iterations')
tf.app.flags.DEFINE_integer('n_delay', 10, 'number of delays')
tf.app.flags.DEFINE_integer('n_ref', 3, 'Number of refractory steps')
tf.app.flags.DEFINE_integer('seq_len', 64, 'Number of character steps')
tf.app.flags.DEFINE_integer('seq_delay', 3, 'Expected delay in character steps. Must be <= seq_len - 2')
tf.app.flags.DEFINE_integer('tau_char', 200, 'Duration of symbols')
tf.app.flags.DEFINE_integer('seed', -1, 'Random seed.')
tf.app.flags.DEFINE_integer('lr_decay_every', 100, 'Decay every')
tf.app.flags.DEFINE_integer('print_every', 1, 'Decay every')
##
tf.app.flags.DEFINE_float('stop_crit', -1, 'Stopping criterion. Stops training if error goes below this value')
tf.app.flags.DEFINE_float('beta', 3.0, 'Mikolov adaptive threshold beta scaling parameter')
tf.app.flags.DEFINE_float('tau_a', 1200, 'Mikolov model alpha - threshold decay')
tf.app.flags.DEFINE_float('tau_out', 20, 'tau for PSP decay in LSNN and output neurons')
tf.app.flags.DEFINE_float('tau', 20, 'tau for PSP decay in LSNN and output neurons')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Base learning rate.')
tf.app.flags.DEFINE_float('lr_decay', 0.3, 'Decaying factor')
tf.app.flags.DEFINE_float('reg', 1e-2, 'regularization coefficient')
tf.app.flags.DEFINE_float('rewiring_connectivity', -1, 'possible usage of rewiring with ALIF and LIF (0.1 is default)')
tf.app.flags.DEFINE_float('readout_rewiring_connectivity', -1, '')
tf.app.flags.DEFINE_float('l1', 1e-2, 'l1 regularization that goes with rewiring')
tf.app.flags.DEFINE_float('rewiring_temperature', 0, '')
tf.app.flags.DEFINE_float('dampening_factor', 0.3, '')
tf.app.flags.DEFINE_float('stochastic_factor', -1, '')
tf.app.flags.DEFINE_float('V0', 1, 'unit scaling for LSNN model')
tf.app.flags.DEFINE_float('dt', 1., '(ms) simulation step')
tf.app.flags.DEFINE_float('thr', .05, 'threshold at which the LSNN neurons spike')
##
tf.app.flags.DEFINE_bool('train', False, 'Mikolov model spread of alpha - threshold decay')
tf.app.flags.DEFINE_bool('tau_a_spread', False, 'Mikolov model spread of alpha - threshold decay')
tf.app.flags.DEFINE_bool('save_data', True, 'Save the data (training, test, network, trajectory for plotting)')
tf.app.flags.DEFINE_bool('do_plot', True, 'Perform plots')
tf.app.flags.DEFINE_bool('monitor_plot', True, 'Perform plots during training')
tf.app.flags.DEFINE_bool('interactive_plot', True, 'Perform plots')
tf.app.flags.DEFINE_bool('rec_to_con', True, 'Hidden units connected to context units in Mikolov')
tf.app.flags.DEFINE_bool('rec_con', False, 'Recurrent context units in Mikolov')
tf.app.flags.DEFINE_bool('device_placement', False, '')
tf.app.flags.DEFINE_bool('verbose', False, '')
tf.app.flags.DEFINE_bool('neuron_sign', True, '')
tf.app.flags.DEFINE_bool('adaptive_reg', False, '')
tf.app.flags.DEFINE_bool('preserve_state', True, '')
FLAGS.thr = FLAGS.thr * FLAGS.V0 # scaling the threshold too!
class ALIFv(ALIF):
def __init__(self, n_in, n_rec, tau=20, thr=0.01,
dt=1., n_refractory=0, dtype=tf.float32, n_delay=5,
tau_adaptation=200., beta=1.6,
rewiring_connectivity=-1, dampening_factor=0.3,
in_neuron_sign=None, rec_neuron_sign=None, injected_noise_current=0.,
V0=1., trainable=True):
LIF.__init__(self, n_in=n_in, n_rec=n_rec, tau=tau, thr=thr, dt=dt, n_refractory=n_refractory,
dtype=dtype, n_delay=n_delay,
rewiring_connectivity=rewiring_connectivity,
dampening_factor=dampening_factor, in_neuron_sign=in_neuron_sign, rec_neuron_sign=rec_neuron_sign,
injected_noise_current=injected_noise_current,
V0=V0)
if tau_adaptation is None: raise ValueError("alpha parameter for adaptive bias must be set")
if beta is None: raise ValueError("beta parameter for adaptive bias must be set")
self.tau_adaptation = tau_adaptation
self.beta = beta
self.decay_b = np.exp(-dt / tau_adaptation)
@property
def output_size(self):
return [self.n_rec, self.n_rec, self.n_rec]
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + einsum_bi_ijk_to_bjk(
state.z, self.W_rec)
# i_future_buffer = tf.ones_like(state.i_future_buffer) * FLAGS.input_current
new_b = self.decay_b * state.b + (np.ones(self.n_rec) - self.decay_b) * state.z
thr = self.thr + new_b * self.beta * self.V0
new_v, new_z = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer,
decay=self._decay,
thr=thr)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = tf_roll(i_future_buffer, axis=2)
new_state = ALIFStateTuple(v=new_v,
z=new_z,
b=new_b,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer)
return [new_z, new_b, new_v], new_state
def custom_sequence():
#s = rd.choice([0, 1], size=FLAGS.seq_len)
#s[1] = FLAGS.n_charac # store
#s[7] = FLAGS.n_charac + 1 # recall
s = [0, 1, 0, 1, 0, 1] + [i%2 for i in range(FLAGS.seq_len - 12)] + [1, 0, 1, 0, 1, 0]
s[0] = FLAGS.n_charac # store
s[5] = FLAGS.n_charac + 1 # recall
s[58] = FLAGS.n_charac # store
s[63] = FLAGS.n_charac + 1 # recall
return s
custom_plot = np.stack([custom_sequence() for _ in range(FLAGS.batch_test)], axis=0)
# Run asserts to check seq_delay and seq_len relation is ok
_ = gen_custom_delay_batch(FLAGS.seq_len, FLAGS.seq_delay, 1)
# Fix the random seed if given as an argument
if FLAGS.seed >= 0:
seed = FLAGS.seed
else:
seed = rd.randint(10 ** 6)
rd.seed(seed)
tf.set_random_seed(seed)
# Experiment parameters
dt = 1.
repeat_batch_test = 10
print_every = FLAGS.print_every
# Frequencies
input_f0 = FLAGS.f0 / 1000 # in kHz in coherence with the usgae of ms for time
regularization_f0 = FLAGS.reg_rate / 1000
regularization_f0_max = FLAGS.reg_max_rate / 1000
# Network parameters
tau_v = FLAGS.tau
thr = FLAGS.thr
decay = np.exp(-dt / FLAGS.tau_out) # output layer psp decay, chose value between 15 and 30ms as for tau_v
# Symbol number
n_charac = FLAGS.n_charac # Number of digit symbols
n_input_symbols = n_charac + 2 # Total number of symbols including recall and store
n_output_symbols = n_charac # Number of output symbols
recall_symbol = n_input_symbols - 1 # ID of the recall symbol
store_symbol = n_input_symbols - 2 # ID of the store symbol
# Neuron population sizes
input_neuron_split = np.array_split(np.arange(FLAGS.n_in), n_input_symbols)
# Sign of the neurons
if 0 < FLAGS.rewiring_connectivity and FLAGS.neuron_sign:
n_excitatory_in = int(0.75 * FLAGS.n_in)
n_inhibitory_in = FLAGS.n_in - n_excitatory_in
in_neuron_sign = np.concatenate([-np.ones(n_inhibitory_in), np.ones(n_excitatory_in)])
np.random.shuffle(in_neuron_sign)
n_excitatory = int(0.75 * (FLAGS.n_rec + FLAGS.n_con))
n_inhibitory = FLAGS.n_rec + FLAGS.n_con - n_excitatory
rec_neuron_sign = np.concatenate([-np.ones(n_inhibitory), np.ones(n_excitatory)])
else:
if not FLAGS.neuron_sign: print('WARNING: Neuron sign is set to None without rewiring but sign is requested')
in_neuron_sign = None
rec_neuron_sign = None
# Generate the cell
beta = np.concatenate([np.zeros(FLAGS.n_rec), np.ones(FLAGS.n_con) * FLAGS.beta])
cell = ALIFv(n_in=FLAGS.n_in, n_rec=FLAGS.n_rec + FLAGS.n_con, tau=tau_v, n_delay=FLAGS.n_delay,
n_refractory=FLAGS.n_ref, dt=dt, tau_adaptation=FLAGS.tau_a, beta=beta, thr=thr,
rewiring_connectivity=FLAGS.rewiring_connectivity,
in_neuron_sign=in_neuron_sign, rec_neuron_sign=rec_neuron_sign,
dampening_factor=FLAGS.dampening_factor,
V0=FLAGS.V0, trainable=True
)
cell_name = type(cell).__name__
print('\n -------------- \n' + cell_name + '\n -------------- \n')
time_stamp = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
file_reference = '{}_{}_seqlen{}_seqdelay{}_in{}_R{}_A{}_V0{:.0f}_tauchar{}_comment{}'.format(
time_stamp, cell_name, FLAGS.seq_len, FLAGS.seq_delay, FLAGS.n_in, FLAGS.n_rec, FLAGS.n_con, FLAGS.V0,
FLAGS.tau_char, FLAGS.comment)
if FLAGS.model == 'Mikolov':
file_reference = file_reference + '_taua' + str(FLAGS.tau_a) + '_beta' + str(FLAGS.beta)
print('FILE REFERENCE: ' + file_reference)
# Generate input
input_spikes = tf.placeholder(dtype=tf.float32, shape=(None, None, FLAGS.n_in),
name='InputSpikes') # MAIN input spike placeholder
input_nums = tf.placeholder(dtype=tf.int64, shape=(None, None),
name='InputNums') # Lists of input character for the recall task
target_nums = tf.placeholder(dtype=tf.int64, shape=(None, None),
name='TargetNums') # Lists of target characters of the recall task
recall_mask = tf.placeholder(dtype=tf.bool, shape=(None, None),
name='RecallMask') # Binary tensor that points to the time of presentation of a recall
# Other placeholder that are useful for computing accuracy and debuggin
target_sequence = tf.placeholder(dtype=tf.int64, shape=(None, None),
name='TargetSequence') # The target characters with time expansion
batch_size_holder = tf.placeholder(dtype=tf.int32, name='BatchSize') # Int that contains the batch size
init_state_holder = placeholder_container_for_rnn_state(cell.state_size, dtype=tf.float32, batch_size=None)
recall_charac_mask = tf.equal(input_nums, recall_symbol, name='RecallCharacMask')
if FLAGS.model not in ['Mikolov', 'ALIF']:
# The character to be memorize at each time (with time expansion)
mem_sequence = tf.placeholder(dtype=tf.int64, shape=(None, None), name='MemorySequence')
def debug_error_rate(z, num_Y, num_X, n_character):
# Find the recall index
n_recall_symbol = n_character + 1
shp = tf.shape(num_X)
# Translate the one hot into ints
char_predict = tf.argmax(z, axis=1)
char_true = num_Y
# char_input = num_X
# error rate 1) Wrong characters
char_correct = tf.cast(tf.equal(char_predict, char_true), tf.float32)
character_errors = tf.reduce_mean(1 - char_correct)
# error rate 2) wrong recalls
# recall_mask = tf.equal(char_input, n_recall_symbol)
# recalls_predict = tf.boolean_mask(char_predict, recall_mask)
# recalls_true = tf.boolean_mask(char_true, recall_mask)
recall_correct = tf.equal(char_predict, char_true)
# recall_errors = tf.reduce_mean(tf.cast(tf.logical_not(recall_correct), tf.float32))
recall_errors = tf.reduce_mean(1. - tf.cast(recall_correct, tf.float32))
# Get wrong samples
# sentence_id = tf.tile(tf.expand_dims(tf.range(shp[0]), axis=1), (1, shp[1]))
# recall_sentence_id = tf.boolean_mask(sentence_id, recall_mask)
# false_sentence_id_list = tf.boolean_mask(recall_sentence_id, tf.logical_not(recall_correct))
false_sentence_id_list = tf.where(tf.equal(recall_correct, False))
return character_errors, recall_errors, false_sentence_id_list
def add_errors(data):
y = data['target_nums_at_recall']
# y = data['Y']
# y = np.argmax(y, axis=1)
y_pred = data['Y_predict']
# print("y_pred", y_pred.shape)
# print("y_pred raw", y_pred)
y_pred = np.argmax(y_pred, axis=1)
# print("y_pred", y_pred)
# print("y", y)
correct = y == y_pred
acc = np.mean(correct)
err = 1. - acc
failed_batch_idxs = np.where(np.logical_not(correct))[0]
# data['recall_errors'] = err
# data['false_sentence_id_list'] = failed_batch_idxs
return err, failed_batch_idxs
def get_data_dict(batch_size, seq_len=FLAGS.seq_len, batch=None, override_input=None):
p_sr = 1/(1 + FLAGS.seq_delay)
spk_data, is_recall_data, target_seq_data, memory_seq_data, in_data, target_data = generate_storerecall_data(
batch_size=batch_size,
f0=input_f0,
sentence_length=seq_len,
n_character=FLAGS.n_charac,
n_charac_duration=FLAGS.tau_char,
n_neuron=FLAGS.n_in,
prob_signals=p_sr,
with_prob=True,
override_input=override_input,
)
data_dict = {input_spikes: spk_data, input_nums: in_data, target_nums: target_data, recall_mask: is_recall_data,
target_sequence: target_seq_data, batch_size_holder: batch_size}
return data_dict
# Define the name of spike train for the different models
z_stack, final_state = tf.nn.dynamic_rnn(cell, input_spikes, initial_state=init_state_holder, dtype=tf.float32)
z, b_con, v = z_stack
z_con = []
z_all = z
with tf.name_scope('RecallLoss'):
target_nums_at_recall = tf.boolean_mask(target_nums, recall_charac_mask)
Y = tf.one_hot(target_nums_at_recall, depth=n_output_symbols, name='Target')
# MTP models do not use controller (modulator) population for output
out_neurons = z_all if FLAGS.model in ['Mikolov', 'ALIF'] else z
n_neurons = out_neurons.get_shape()[2]
psp = exp_convolve(out_neurons, decay=decay)
if 0 < FLAGS.rewiring_connectivity and 0 < FLAGS.readout_rewiring_connectivity:
w_out, w_out_sign, w_out_var, _ = weight_sampler(FLAGS.n_rec + FLAGS.n_con, n_output_symbols,
FLAGS.readout_rewiring_connectivity,
neuron_sign=rec_neuron_sign)
else:
w_out = tf.get_variable(name='out_weight', shape=[n_neurons, n_output_symbols])
out = einsum_bij_jk_to_bik(psp, w_out)
out_char_step = tf_downsample(out, new_size=FLAGS.seq_len, axis=1)
# out_char_step = out[:, FLAGS.tau_char//2::FLAGS.tau_char]
Y_predict = tf.boolean_mask(out_char_step, recall_charac_mask, name='Prediction')
# loss_recall = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=Y_predict))
loss_recall = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target_nums_at_recall,
logits=Y_predict))
with tf.name_scope('PlotNodes'):
out_plot = tf.nn.softmax(out)
out_plot_char_step = tf_downsample(out_plot, new_size=FLAGS.seq_len, axis=1)
# _, recall_errors, false_sentence_id_list = debug_error_rate(Y_predict, target_nums_at_recall, input_nums, n_charac)
# Target regularization
with tf.name_scope('RegularizationLoss'):
# Firing rate regularization
av = tf.reduce_mean(z_all, axis=(0, 1)) / dt
adaptive_regularization_coeff = tf.Variable(np.ones(n_neurons) * FLAGS.reg, dtype=tf.float32, trainable=False)
loss_reg = tf.reduce_sum(tf.square(av - regularization_f0) * adaptive_regularization_coeff)
do_increase_reg = tf.greater(av,regularization_f0_max)
do_increase_reg = tf.cast(do_increase_reg,dtype=tf.float32)
new_adaptive_coeff = do_increase_reg * adaptive_regularization_coeff * 1.3 \
+ (1-do_increase_reg) * adaptive_regularization_coeff * 0.93
if FLAGS.adaptive_reg:
update_regularization_coeff = tf.assign(adaptive_regularization_coeff,new_adaptive_coeff)
else:
update_regularization_coeff = tf.no_op('SkipAdaptiveRegularization')
# Aggregate the losses
with tf.name_scope('OptimizationScheme'):
# scaling loss_recall to match order of magnitude of loss from script_recall.py
# this is needed to keep the same regularization coefficients (reg, regl2) across scripts
global_step = tf.Variable(0, dtype=tf.int32, trainable=False)
learning_rate = tf.Variable(FLAGS.learning_rate, dtype=tf.float32, trainable=False)
decay_learning_rate_op = tf.assign(learning_rate, learning_rate * FLAGS.lr_decay)
loss = loss_reg + loss_recall
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
if 0 < FLAGS.rewiring_connectivity:
rewiring_connectivity_list = [FLAGS.rewiring_connectivity, FLAGS.rewiring_connectivity,
FLAGS.readout_rewiring_connectivity]
train_step = rewiring_optimizer_wrapper(opt, loss, learning_rate, FLAGS.l1, FLAGS.rewiring_temperature,
rewiring_connectivity_list,
global_step=global_step,
all_trained_var_list=tf.trainable_variables())
else:
train_step = opt.minimize(loss=loss, global_step=global_step)
# Real-time plotting
sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.device_placement))
sess.run(tf.global_variables_initializer())
# print("PRE w_rec", sess.run(cell.w_rec_var))
w_rec = np.array([[0, 0], [0, 0]])
# w_rec = np.array([[0, -0.7], [-0.7, 0]])
set_w_rec = tf.assign(cell.w_rec_var, w_rec)
sess.run(set_w_rec)
# With excitatory and inhibitory connections from input
w_in_b0 = np.repeat(np.array([[0.003, -0.1]]), FLAGS.n_in//4, axis=0)
w_in_b1 = np.repeat(np.array([[-0.1, 0.003]]), FLAGS.n_in//4, axis=0)
w_in_s = np.repeat(np.array([[0.1, 0.1]]), FLAGS.n_in//4, axis=0)
w_in_r = np.repeat(np.array([[0.1, 0.1]]), FLAGS.n_in//4, axis=0)
# Only excitatory connections from input
# w_in_b0 = np.repeat(np.array([[0.02, 0]]), FLAGS.n_in//4, axis=0)
# w_in_b1 = np.repeat(np.array([[0, 0.02]]), FLAGS.n_in//4, axis=0)
# w_in_s = np.repeat(np.array([[0.08, 0.08]]), FLAGS.n_in//4, axis=0)
# w_in_r = np.repeat(np.array([[0.1, 0.1]]), FLAGS.n_in//4, axis=0)
w_in = np.vstack((w_in_b0, w_in_b1, w_in_s, w_in_r))
# print("w_in shape", w_in.shape)
set_w_in = tf.assign(cell.w_in_var, w_in)
sess.run(set_w_in)
w_out_v = np.array([[0, 10], [10, 0]])
set_w_out = tf.assign(w_out, w_out_v)
sess.run(set_w_out)
last_final_state_state_training_pointer = [sess.run(cell.zero_state(batch_size=FLAGS.batch_train, dtype=tf.float32))]
last_final_state_state_validation_pointer = [sess.run(cell.zero_state(batch_size=FLAGS.batch_val, dtype=tf.float32))]
last_final_state_state_testing_pointer = [sess.run(cell.zero_state(batch_size=FLAGS.batch_test, dtype=tf.float32))]
# Open an interactive matplotlib window to plot in real time
if FLAGS.do_plot and FLAGS.interactive_plot:
plt.ion()
if FLAGS.do_plot:
fig, ax_list = plt.subplots(5, figsize=(5.9, 6))
# re-name the window with the name of the cluster to track relate to the terminal window
fig.canvas.set_window_title(socket.gethostname() + ' - ' + FLAGS.comment)
def update_plot(plot_result_values, batch=0, n_max_neuron_per_raster=20, n_max_synapses=FLAGS.n_con):
"""
This function iterates the matplotlib figure on every call.
It plots the data for a fixed sequence that should be representative of the expected computation
:return:
"""
# Clear the axis to print new plots
for k in range(ax_list.shape[0]):
ax = ax_list[k]
ax.clear()
strip_right_top_axis(ax)
# if len(plot_result_values['false_sentence_id_list']) > 0:
# print(plot_result_values['false_sentence_id_list'])
# batch = plot_result_values['false_sentence_id_list'][0]
# ax_list[0].set_title("Failed batch " + str(batch))
# Plot the data, from top to bottom each axe represents: inputs, recurrent and controller
for k_data, data, d_name in zip(range(3),
[plot_result_values['input_spikes'], plot_result_values['z'],
plot_result_values['z_con']],
['Input', 'Hidden', 'Memory' if FLAGS.model == 'Mikolov' else 'Controller']):
if FLAGS.model in ['LIF', 'ALIF'] and k_data == 2:
continue
ax = ax_list[k_data]
ax.grid(color='black', alpha=0.15, linewidth=0.4)
if np.size(data) > 0:
data = data[batch]
n_max = min(data.shape[1], n_max_neuron_per_raster)
cell_select = np.linspace(start=0, stop=data.shape[1] - 1, num=n_max, dtype=int)
data = data[:, cell_select] # select a maximum of n_max_neuron_per_raster neurons to plot
raster_plot(ax, data, linewidth=0.3)
ax.set_ylabel(d_name)
ax.set_xticklabels([])
if d_name == 'Input':
ax.set_yticklabels([])
n_channel = data.shape[1] // n_input_symbols
ax.add_patch( # Value 0 row
patches.Rectangle((0, 0), data.shape[0], n_channel, facecolor="red", alpha=0.15))
ax.add_patch( # Value 1 row
patches.Rectangle((0, n_channel), data.shape[0], n_channel, facecolor="blue", alpha=0.15))
ax.add_patch( # Store row
patches.Rectangle((0, 2 * n_channel), data.shape[0], n_channel, facecolor="yellow", alpha=0.15))
ax.add_patch( # Recall row
patches.Rectangle((0, 3 * n_channel), data.shape[0], n_channel, facecolor="green", alpha=0.15))
top_margin = 0.08
left_margin = -0.6
ax.text(left_margin, 1. - top_margin, 'Recall', transform=ax.transAxes, fontsize=7, verticalalignment='top')
ax.text(left_margin, 0.75 - top_margin, 'Store', transform=ax.transAxes, fontsize=7, verticalalignment='top')
ax.text(left_margin, 0.5 - top_margin, 'Value 1', transform=ax.transAxes, fontsize=7, verticalalignment='top')
ax.text(left_margin, 0.25 - top_margin, 'Value 0', transform=ax.transAxes, fontsize=7, verticalalignment='top')
# plot targets
ax = ax_list[3 if FLAGS.model not in ['LIF', 'ALIF'] else 2]
mask = plot_result_values['recall_charac_mask'][batch]
data = plot_result_values['target_nums'][batch]
data[np.invert(mask)] = -1
lines = []
ind_nt = np.argwhere(data != -1)
for idx in ind_nt.tolist():
i = idx[0]
lines.append([(i * FLAGS.tau_char, data[i]), ((i + 1) * FLAGS.tau_char, data[i])])
lc_t = mc.LineCollection(lines, colors='green', linewidths=2, label='Target')
ax.add_collection(lc_t) # plot target segments
# plot output per tau_char
data = plot_result_values['out_plot_char_step'][batch]
data = np.array([(d[1] - d[0] + 1) / 2 for d in data])
data[np.invert(mask)] = -1
lines = []
ind_nt = np.argwhere(data != -1)
for idx in ind_nt.tolist():
i = idx[0]
lines.append([(i * FLAGS.tau_char, data[i]), ((i + 1) * FLAGS.tau_char, data[i])])
lc_o = mc.LineCollection(lines, colors='blue', linewidths=2, label='Output')
ax.add_collection(lc_o) # plot target segments
# plot softmax of psp-s per dt for more intuitive monitoring
# ploting only for second class since this is more intuitive to follow (first class is just a mirror)
output2 = plot_result_values['out_plot'][batch, :, 1]
presentation_steps = np.arange(output2.shape[0])
ax.set_yticks([0, 0.5, 1])
ax.grid(color='black', alpha=0.15, linewidth=0.4)
ax.set_ylabel('Output')
line_output2, = ax.plot(presentation_steps, output2, color='purple', label='softmax', alpha=0.7)
ax.axis([0, presentation_steps[-1] + 1, -0.3, 1.1])
ax.legend(handles=[lc_t, lc_o, line_output2], loc='lower center', fontsize=7,
bbox_to_anchor=(0.5, -0.05), ncol=3)
if FLAGS.model != 'LIF':
ax.set_xticklabels([])
# debug plot for psp-s or biases
plot_param = 'b_con' # or 'psp'
ax.set_xticklabels([])
ax = ax_list[-2]
ax.grid(color='black', alpha=0.15, linewidth=0.4)
ax.set_ylabel('PSPs' if plot_param == 'psp' else 'Threshold')
sub_data = plot_result_values[plot_param][batch]
if plot_param == 'b_con':
sub_data = sub_data + thr
vars = np.var(sub_data, axis=0)
# cell_with_max_var = np.argsort(vars)[::-1][:n_max_synapses * 3:3]
cell_with_max_var = np.argsort(vars)[::-1][:n_max_synapses]
presentation_steps = np.arange(sub_data.shape[0])
ax.plot(sub_data[:, cell_with_max_var], color='r', label='Output', alpha=0.4, linewidth=1)
ax.axis([0, presentation_steps[-1], np.min(sub_data[:, cell_with_max_var]),
np.max(sub_data[:, cell_with_max_var])]) # [xmin, xmax, ymin, ymax]
plot_param = 'v'
ax.set_xticklabels([])
ax = ax_list[-1]
ax.grid(color='black', alpha=0.15, linewidth=0.4)
ax.set_ylabel('Membrane potential')
sub_data = plot_result_values[plot_param][batch]
presentation_steps = np.arange(sub_data.shape[0])
ax.plot(sub_data, label='Voltage', alpha=0.4, linewidth=1)
ax.axis([0, presentation_steps[-1], np.min(sub_data[:, cell_with_max_var]),
np.max(sub_data[:, cell_with_max_var])]) # [xmin, xmax, ymin, ymax]
ax.set_xlabel('Time in ms')
# To plot with interactive python one need to wait one second to the time to draw the axis
if FLAGS.do_plot:
plt.draw()
plt.pause(1)
test_loss_list = []
test_loss_with_reg_list = []
validation_error_list = []
tau_delay_list = []
training_time_list = []
time_to_ref_list = []
results_tensors = {'loss': loss,
'loss_reg': loss_reg,
'loss_recall': loss_recall,
# 'recall_errors': recall_errors,
'final_state': final_state,
'av': av,
'adaptive_regularization_coeff': adaptive_regularization_coeff,
'target_nums_at_recall': target_nums_at_recall,
'Y': Y,
'Y_predict': Y_predict,
}
if FLAGS.model in ['LIF', 'ALIF']:
results_tensors['w_in_val'] = cell.w_in_val
results_tensors['w_rec_val'] = cell.w_rec_val
results_tensors['w_out'] = w_out
w_in_last = sess.run(cell.w_in_val)
w_rec_last = sess.run(cell.w_rec_val)
w_out_last = sess.run(w_out)
plot_result_tensors = {'input_spikes': input_spikes,
'z': z,
'v': v,
'z_con': z_con,
'input_nums': input_nums,
'target_nums': target_nums,
'out_avg_per_step': out_plot_char_step,
# 'false_sentence_id_list': false_sentence_id_list,
}
t_train = 0
t_ref = time()
for k_iter in range(FLAGS.n_iter):
if k_iter > 0 and np.mod(k_iter, FLAGS.lr_decay_every) == 0:
old_lr = sess.run(learning_rate)
new_lr = sess.run(decay_learning_rate_op)
print('Decaying learning rate: {:.2g} -> {:.2g}'.format(old_lr, new_lr))
# Monitor the training with a validation set
t0 = time()
val_dict = get_data_dict(FLAGS.batch_val)
feed_dict_with_placeholder_container(val_dict, init_state_holder, last_final_state_state_validation_pointer[0])
plot_result_tensors['psp'] = psp
plot_result_tensors['out_plot_char_step'] = out_plot_char_step
plot_result_tensors['out_plot'] = out_plot
plot_result_tensors['recall_charac_mask'] = recall_charac_mask
plot_result_tensors['Y'] = Y
plot_result_tensors['Y_predict'] = Y_predict
if FLAGS.model in ['Mikolov', 'ALIF']:
plot_result_tensors['b_con'] = b_con
results_values, plot_results_values = sess.run([results_tensors, plot_result_tensors], feed_dict=val_dict)
rec_err, wrong_seq = add_errors(results_values)
results_values['recall_errors'] = rec_err
plot_results_values['false_sentence_id_list'] = wrong_seq
if FLAGS.preserve_state:
last_final_state_state_validation_pointer[0] = results_values['final_state']
last_final_state_state_testing_pointer[0] = results_values['final_state']
t_run = time() - t0
# Storage of the results
test_loss_with_reg_list.append(results_values['loss_reg'])
test_loss_list.append(results_values['loss_recall'])
validation_error_list.append(results_values['recall_errors'])
training_time_list.append(t_train)
time_to_ref_list.append(time() - t_ref)
if np.mod(k_iter, print_every) == 0:
print('''Iteration {}, statistics on the validation set average error {:.2g} +- {:.2g} (trial averaged)'''
.format(k_iter, np.mean(validation_error_list[-print_every:]),
np.std(validation_error_list[-print_every:])))
current_w_in = sess.run(cell.w_in_var)
w_in_b0 = np.mean(current_w_in[0:FLAGS.n_in // 4], axis=0)
w_in_b1 = np.mean(current_w_in[FLAGS.n_in // 4:2 * FLAGS.n_in // 4], axis=0)
w_in_s = np.mean(current_w_in[2 * FLAGS.n_in // 4:3 * FLAGS.n_in // 4], axis=0)
w_in_r = np.mean(current_w_in[3 * FLAGS.n_in // 4:4 * FLAGS.n_in // 4], axis=0)
current_w_in = np.vstack((w_in_b0, w_in_b1, w_in_s, w_in_r))
print("POST w_in\n", current_w_in)
print("POST w_rec\n", sess.run(cell.w_rec_var))
print("POST w_out\n", sess.run(w_out))
def get_stats(v):
if np.size(v) == 0:
return np.nan, np.nan, np.nan, np.nan
min_val = np.min(v)
max_val = np.max(v)
k_min = np.sum(v == min_val)
k_max = np.sum(v == max_val)
return np.min(v), np.max(v), np.mean(v), np.std(v), k_min, k_max
firing_rate_stats = get_stats(results_values['av'] * 1000)
reg_coeff_stats = get_stats(results_values['adaptive_regularization_coeff'])
if FLAGS.verbose:
print('''
firing rate (Hz) min {:.0f} ({}) \t max {:.0f} ({}) \t
average {:.0f} +- std {:.0f} (averaged over batches and time)
reg. coeff min {:.2g} \t max {:.2g} \t average {:.2g} +- std {:.2g}
comput. time (s) training {:.2g} \t validation {:.2g}
loss classif. {:.2g} \t reg. loss {:.2g}
'''.format(
firing_rate_stats[0], firing_rate_stats[4], firing_rate_stats[1], firing_rate_stats[5],
firing_rate_stats[2], firing_rate_stats[3],
reg_coeff_stats[0], reg_coeff_stats[1], reg_coeff_stats[2], reg_coeff_stats[3],
t_train, t_run,
results_values['loss_recall'], results_values['loss_reg']
))
if 0 < FLAGS.rewiring_connectivity or FLAGS.l1 > 0 and FLAGS.model in ['LIF', 'ALIF']:
rewired_ref_list = ['w_in_val','w_rec_val','w_out']
non_zeros = [np.sum(results_values[ref] != 0) for ref in rewired_ref_list]
sizes = [np.size(results_values[ref]) for ref in rewired_ref_list]
empirical_connectivity = np.sum(non_zeros) / np.sum(sizes)
if 0 < FLAGS.rewiring_connectivity:
assert empirical_connectivity < FLAGS.rewiring_connectivity * 1.1,\
'Rewiring error: found connectivity {:.3g}'.format(empirical_connectivity)
w_in_new = results_values['w_in_val']
w_rec_new = results_values['w_rec_val']
w_out_new = results_values['w_out']
stay_con_in = np.logical_and(w_in_new != 0, w_in_last != 0)
stay_con_rec = np.logical_and(w_rec_new != 0, w_rec_last != 0)
stay_con_out = np.logical_and(w_out_new != 0, w_out_last != 0)
Dw_in = np.linalg.norm(w_in_new[stay_con_in] - w_in_last[stay_con_in])
Dw_rec = np.linalg.norm(w_rec_new[stay_con_rec] - w_rec_last[stay_con_rec])
Dw_out = np.linalg.norm(w_out_new[stay_con_out] - w_out_last[stay_con_out])
if FLAGS.verbose:
print('''Connectivity {:.3g} \t Non zeros: W_in {}/{} W_rec {}/{} w_out {}/{} \t
New zeros: W_in {} W_rec {} W_out {}'''.format(
empirical_connectivity,
non_zeros[0], sizes[0],
non_zeros[1], sizes[1],
non_zeros[2], sizes[2],
np.sum(np.logical_and(w_in_new == 0, w_in_last != 0)),
np.sum(
|
np.logical_and(w_rec_new == 0, w_rec_last != 0)
|
numpy.logical_and
|
import sys
import numpy as np
sys.path.insert(0, "../")
from bayes_optim import DiscreteSpace, IntegerSpace, RealSpace
from bayes_optim.search_space.samplers import SCMC
search_space = (
RealSpace([-5, 5]) * 2 + DiscreteSpace(["A", "B", "C", "D"]) + IntegerSpace([1, 10]) * 2
)
def h(x):
return np.array([bool(x[2] not in ["A", "B"]), x[4] ** 2 + x[4] - 2])
def g(x):
return np.array(
[
np.sum(x[:2] ** 2) - 1,
0.25 -
|
np.sum(x[:2] ** 2)
|
numpy.sum
|
import unittest
import numpy
import cupy
from cupy import cuda
from cupy import testing
import cupyx.scipy.linalg
if cupyx.scipy._scipy_available:
import scipy.linalg
@testing.gpu
@testing.parameterize(*testing.product({
'shape': [(1, 1), (2, 2), (3, 3), (5, 5), (1, 5), (5, 1), (2, 5), (5, 2)],
}))
@testing.fix_random()
@unittest.skipUnless(
cuda.cusolver_enabled, 'Only cusolver in CUDA 8.0 is supported')
@testing.with_requires('scipy')
class TestLUFactor(unittest.TestCase):
@testing.for_float_dtypes(no_float16=True)
def test_lu_factor(self, dtype):
if self.shape[0] != self.shape[1]:
# skip non-square tests since scipy.lu_factor requires square
return unittest.SkipTest()
array = numpy.random.randn(*self.shape)
a_cpu = numpy.asarray(array, dtype=dtype)
a_gpu = cupy.asarray(array, dtype=dtype)
result_cpu = scipy.linalg.lu_factor(a_cpu)
result_gpu = cupyx.scipy.linalg.lu_factor(a_gpu)
self.assertEqual(len(result_cpu), len(result_gpu))
self.assertEqual(result_cpu[0].dtype, result_gpu[0].dtype)
self.assertEqual(result_cpu[1].dtype, result_gpu[1].dtype)
cupy.testing.assert_allclose(result_cpu[0], result_gpu[0], atol=1e-5)
cupy.testing.assert_array_equal(result_cpu[1], result_gpu[1])
@testing.for_float_dtypes(no_float16=True)
def test_lu_factor_reconstruction(self, dtype):
m, n = self.shape
A = cupy.random.randn(m, n, dtype=dtype)
lu, piv = cupyx.scipy.linalg.lu_factor(A)
# extract ``L`` and ``U`` from ``lu``
L = cupy.tril(lu, k=-1)
cupy.fill_diagonal(L, 1.)
L = L[:, :m]
U = cupy.triu(lu)
U = U[:n, :]
# check output shapes
assert lu.shape == (m, n)
assert L.shape == (m, min(m, n))
assert U.shape == (min(m, n), n)
assert piv.shape == (min(m, n),)
# apply pivot (on CPU since slaswp is not available in cupy)
piv = cupy.asnumpy(piv)
rows =
|
numpy.arange(m)
|
numpy.arange
|
import numpy as np
import torch
import sys
from sklearn.neighbors import NearestNeighbors as NN
import geop.geometry.util as gutil
from torch_scatter import scatter_add, scatter_mean
from torch_geometric.nn import knn
""" Iterative Closest Points (ICP) Method according to point-to-plane metric.
Inputs:
source: o3d.geometry.PointCloud
target: o3d.geometry.PointCloud
sigma: soft-thresholding [default 0.01]
max_iter: maximum number of iterations [default 100]
stopping_threshold: stopping threshold for ICP algorithm [default 1e-4]
Outputs:
transform: np.ndarray of shape [4, 4].
Transformation from source to target.
"""
def icp_reweighted(source, target, sigma=0.01, max_iter = 100,
stopping_threshold=1e-4):
""" If target has no normals, estimate """
import open3d as o3d
if np.array(target.normals).shape[0] == 0:
search_param = o3d.geometry.KDTreeSearchParamHybrid(
radius=0.2, max_nn=30)
o3d.estimate_normals(target, search_param=search_param)
tree = NN(n_neighbors=1, algorithm='kd_tree', n_jobs=10)
tree = tree.fit(np.array(target.points))
n = np.array(source.points).shape[0]
normals = np.array(target.normals)
points = np.array(target.points)
weights = np.zeros(n)
errors = []
transform = np.eye(4)
for itr in range(max_iter):
p = np.array(source.points)
R, trans = gutil.unpack(transform)
p = (R.dot(p.T) + trans.reshape((3, 1))).T
_, indices = tree.kneighbors(p)
""" (r X pi + pi + t - qi)^T ni """
"""( <r, (pi X ni)> + <t, ni> + <pi-qi, ni> )^2"""
""" (<(r; t), hi> + di)^2 """
nor = normals[indices[:, 0], :]
q = points[indices[:, 0], :]
d = np.sum(np.multiply(p-q, nor), axis=1) #[n]
h = np.zeros((n, 6))
h[:, :3] =
|
np.cross(p, nor)
|
numpy.cross
|
def plot_solution(system, filename=None, num=1, smooth=True, limits=None):
"""Quickly plot the 1D eigenmodes stored in the system object"""
import numpy as np
import matplotlib.pyplot as plt
sol = system.result
grid = system.grid
title = r'$\omega = {:1.4f}, k_x = {:1.2f}, m={}$'
plt.figure(num)
plt.clf()
fig, axes = plt.subplots(num=num, nrows=system.dim, sharex=True)
for j, var in enumerate(system.variables):
if smooth:
if limits is None:
z = np.linspace(grid.zmin, grid.zmax, 2000)
else:
z = np.linspace(limits[0], limits[1], 2000)
# var_interp = grid.interpolate(z, sol[var])
# axes[j].plot(
# z, var_interp.real, 'C0', label='Real'
# )
# axes[j].plot(
# z, var_interp.imag, 'C1', label='Imag'
# )
axes[j].plot(
z, grid.interpolate(z, sol[var].real), 'C0', label='Real'
)
axes[j].plot(
z, grid.interpolate(z, sol[var].imag), 'C1', label='Imag'
)
axes[j].plot(grid.zg, sol[var].real, 'C0.', label='Real')
axes[j].plot(grid.zg, sol[var].imag, 'C1.', label='Imag')
axes[j].set_ylabel(system.labels[j])
axes[system.dim - 1].set_xlabel(r"$z$")
try:
axes[0].set_title(
title.format(sol[system.eigenvalue], system.kx, sol['mode'])
)
except:
axes[0].set_title(
r'$\omega$ = {:1.6f}'.format(sol[system.eigenvalue])
)
axes[0].legend(frameon=False)
if filename is not None:
fig.savefig(filename)
else:
plt.show()
def get_2Dmap(system, var, xmin, xmax, Nx, Nz, zmin=None, zmax=None, time=0):
"""Create a 2D map of the eigenmode var stored in system.result[var].
This function assumes that the eigenmodes have the form
f(z)*exp(i kx x).
"""
import numpy as np
dx = (xmax - xmin) / Nx
xg = (0.5 + np.arange(Nx)) * dx
if zmin is None or zmax is None:
zmin = system.grid.zmin
zmax = system.grid.zmax
dz = (zmax - zmin) / Nz
zg = (0.5 + np.arange(Nz)) * dz + zmin
xx, zz = np.meshgrid(xg, zg)
# Wavenumber
kx = system.kx
val = np.zeros((Nz, Nx))
def return_real_ampl(f, x):
""""""
return (
2*f*np.exp(1j*kx*x + system.result[system.eigenvalue]*time)
).real
# Interpolate onto z-grid
if type(var) is str:
yr = system.grid.interpolate(zg, system.result[var].real)
yi = system.grid.interpolate(zg, system.result[var].imag)
else:
yr = system.grid.interpolate(zg, var.real)
yi = system.grid.interpolate(zg, var.imag)
y = yr + 1j * yi
for i in range(Nx):
val[:, i] = return_real_ampl(y, xg[i])
return val
def get_2D_cylindrical_map_in_cylindrical_coords(
system, var, phimin, phimax, Nphi, Nr, rmin=None, rmax=None, time=0, z=0
):
"""Create a 2D map of the eigenmode var stored in system.result[var].
This function assumes that the eigenmodes have the form
f(r)*exp(i kz z + i m phi). It returns a map in the r-phi plane at
a fixed value of z (default 0)
"""
import numpy as np
# Create linear grid in phi
dphi = (phimax - phimin) / (Nphi - 1)
phig = (np.arange(Nphi)) * dphi
# Create linear grid in r
if rmin is None:
rmin = system.grid.zmin
dr = (rmax - rmin) / Nr
rg = (0.5 + np.arange(Nr)) * dr + rmin
# Contruct meshgrids
rr, phiphi = np.meshgrid(rg, phig)
# Azimuthal mode number
m = system.m
# Wavenumber
kz = system.kz
val = np.zeros((Nphi, Nr))
def return_real_ampl(f, phi, z):
""""""
return (
2
* f
* np.exp(
1j*kz*z + 1j*m*phi + system.result[system.eigenvalue]*time
)
).real
# Interpolate onto r-grid
if type(var) is str:
yr = system.grid.interpolate(rg, system.result[var].real)
yi = system.grid.interpolate(rg, system.result[var].imag)
else:
yr = system.grid.interpolate(rg, var.real)
yi = system.grid.interpolate(rg, var.imag)
y = yr + 1j * yi
for i in range(Nphi):
val[i, :] = return_real_ampl(y, phig[i], z)
# This is how you would plot the map
# xx = rr * np.cos(phiphi)
# yy = rr * np.sin(phiphi)
# plt.pcolormesh(xx, yy, val)
# plt.axis('equal')
# plt.show()
return (rr, phiphi, val)
def get_2D_cylindrical_map(
system, var, xmin, xmax, ymin, ymax, Nx, Ny, time=0, z=0
):
import numpy as np
x = np.linspace(xmin, xmax, Nx)
y =
|
np.linspace(ymin, ymax, Ny)
|
numpy.linspace
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for coefficient-wise operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import gen_math_ops
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x ** y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
_NEG = lambda x: -x
_ABS = abs
_LT = lambda x, y: x < y
_LE = lambda x, y: x <= y
_GT = lambda x, y: x > y
_GE = lambda x, y: x >= y
_AND = lambda x, y: x & y
_OR = lambda x, y: x | y
_XOR = lambda x, y: x ^ y
_INV = lambda x: ~x
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return tf.SparseTensor(
indices=x_indices, values=x_values, shape=x_shape), x_values
class UnaryOpTest(tf.test.TestCase):
def _compareCpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x)
if x.dtype in (np.float32, np.float64):
y = 1.1 * tf_func(inx)
np_ans *= 1.1
else:
y = tf_func(inx)
tf_cpu = y.eval()
self.assertShapeEqual(np_ans, y)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(np_ans, tf_cpu)
if x.dtype in (np.complex64, np.complex128) and tf_func == tf.sign:
return # Return early
if x.dtype == np.float16:
s = list(np.shape(x))
jacob_t, _ = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
xf = x.astype(np.float)
inxf = tf.convert_to_tensor(xf)
yf = tf_func(inxf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
yf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(np.float16)
self.assertAllClose(jacob_t, jacob_n, rtol=5e-3, atol=5e-3)
elif x.dtype in (np.float32, np.complex64):
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype in (np.float64, np.complex128):
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _check(self, result_tensor, result_np, input_sp_t, tol):
self.assertTrue(isinstance(result_tensor, tf.SparseTensor))
self.assertTrue(isinstance(input_sp_t, tf.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.shape.eval(), result_tensor.shape.eval())
if tol is None:
self.assertAllClose(result_np, result_tensor.values.eval())
else:
self.assertAllClose(result_np, result_tensor.values.eval(), rtol=tol,
atol=tol)
def _compareSparseCpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(use_gpu=False):
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareGpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=True):
result = tf_func(tf.convert_to_tensor(x))
tf_gpu = result.eval()
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(np_ans, tf_gpu)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareSparseGpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(use_gpu=True):
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareBoth(self, x, np_func, tf_func):
self._compareCpu(x, np_func, tf_func)
self._compareGpu(x, np_func, tf_func)
def _compareBothSparse(self, x, np_func, tf_func, tol=None):
self._compareSparseCpu(x, np_func, tf_func, tol)
self._compareSparseGpu(x, np_func, tf_func, tol)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
def _replace_domain_error_with_inf(self, fn):
def func(x):
try:
return fn(x)
except ValueError as e:
if "domain error" in str(e):
return np.inf * np.ones_like(x)
else:
raise e
return func
def testFloatBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
y = (x + .5).astype(np.float32) # no zero
z = (x + 15.5).astype(np.float32) # all positive
k = np.arange(-0.90, 0.90, 0.25).astype(np.float32) # between -1 and 1
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(k, np.arcsin, tf.asin)
self._compareBoth(k, np.arccos, tf.acos)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBoth(x, np.tan, tf.tan)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
self._compareBothSparse(x, np.vectorize(math.erf), tf.erf)
def testFloatTanhEdge(self):
x = np.arange(40, 40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, tf.tanh)
x = np.arange(-40, -40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, tf.tanh)
def testFloatEmpty(self):
x = np.empty((2, 0, 5), dtype=np.float32)
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(x, np.sqrt, tf.sqrt)
self._compareBoth(x, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(x, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(x, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
# Can't use vectorize below, so just use some arbitrary function
self._compareBoth(x, np.sign, tf.lgamma)
self._compareBoth(x, np.sign, tf.erf)
self._compareBoth(x, np.sign, tf.erfc)
self._compareBoth(x, np.tan, tf.tan)
self._compareBoth(x, np.arcsin, tf.asin)
self._compareBoth(x, np.arccos, tf.acos)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(x, np.sign, tf.sign)
self._compareBothSparse(x, np.sign, tf.erf)
def testDoubleBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
y = (x + .5).astype(np.float64) # no zero
z = (x + 15.5).astype(np.float64) # all positive
k = np.arange(-0.90, 0.90, 0.35).reshape(1, 3, 2).astype(np.float64) # between -1 and 1
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBoth(k, np.arcsin, tf.asin)
self._compareBoth(k, np.arccos, tf.acos)
self._compareBoth(k, np.tan, tf.tan)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
self._compareBothSparse(x, np.vectorize(math.erf), tf.erf)
def testHalfBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float16)
y = (x + .5).astype(np.float16) # no zero
z = (x + 15.5).astype(np.float16) # all positive
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
self._compareBothSparse(x, np.vectorize(math.erf), tf.erf, tol=1e-3)
def testInt32Basic(self):
x = np.arange(-6, 6, 2).reshape(1, 3, 2).astype(np.int32)
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, np.square, tf.square)
self._compareCpu(x, np.sign, tf.sign)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sign, tf.sign)
def testInt64Basic(self):
x = np.arange(
-6 << 40, 6 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(x, np.sign, tf.sign)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sign, tf.sign)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
np.complex64)
y = x + 0.5 # no zeros
self._compareCpu(x, np.abs, tf.complex_abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(y, self._inv, tf.inv)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(y, np.sqrt, tf.sqrt)
self._compareCpu(y, self._rsqrt, tf.rsqrt)
self._compareCpu(x, np.exp, tf.exp)
self._compareCpu(y, np.log, tf.log)
self._compareCpu(x, np.tanh, tf.tanh)
self._compareCpu(x, self._sigmoid, tf.sigmoid)
self._compareCpu(x, np.sin, tf.sin)
self._compareCpu(x, np.cos, tf.cos)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
self._compareCpu(y, complex_sign, tf.sign)
self._compareBothSparse(y, complex_sign, tf.sign)
def testComplex128Basic(self):
x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
np.complex128)
y = x + 0.5 # no zeros
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(y, self._inv, tf.inv)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(y, np.sqrt, tf.sqrt)
self._compareCpu(y, self._rsqrt, tf.rsqrt)
self._compareCpu(x, np.exp, tf.exp)
self._compareCpu(y, np.log, tf.log)
self._compareCpu(x, np.tanh, tf.tanh)
self._compareCpu(x, self._sigmoid, tf.sigmoid)
self._compareCpu(x, np.sin, tf.sin)
self._compareCpu(x, np.cos, tf.cos)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
self._compareCpu(y, complex_sign, tf.sign)
self._compareBothSparse(y, complex_sign, tf.sign)
def testGradGrad(self):
np.random.seed(7)
shape = (5,)
dtype_tols = [(np.float32, 1e-3), (np.float64, 1e-6), (np.complex64, 1e-3),
(np.complex128, 1e-6)]
op_range = [(gen_math_ops._inv_grad, [-2, 2]),
(gen_math_ops._sigmoid_grad, [-2, 2]),
(gen_math_ops._sqrt_grad, [1, 3]),
(gen_math_ops._tanh_grad, [-2, 2]),]
def rand(dtype):
x = np.random.uniform(
real_range[0], real_range[1], size=shape[0]).astype(dtype)
if dtype in (np.complex64, np.complex128):
x += 1j * np.random.uniform(-2, 2, size=shape[0]).astype(dtype)
return x
for op, real_range in op_range:
with self.test_session():
for dtype, tol in dtype_tols:
x = tf.constant(rand(dtype))
y = tf.constant(rand(dtype))
z = op(x, y)
error = tf.test.compute_gradient_error(
[x, y], [shape, shape],
z,
shape,
x_init_value=[rand(dtype), rand(dtype)])
self.assertLess(error, tol)
class BinaryOpTest(tf.test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = out.eval()
# Test that the op takes precedence over numpy operators.
np_left = tf_func(x, iny).eval()
np_right = tf_func(inx, y).eval()
if also_compare_variables:
var_x = tf.Variable(x)
var_y = tf.Variable(y)
tf.initialize_all_variables().run()
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = tf_func(x, var_y).eval()
np_var_right = tf_func(var_x, y).eval()
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
if also_compare_variables:
self.assertAllClose(np_ans, np_var_left)
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
_GRAD_TOL = {tf.float16: 1e-3,
tf.float32: 1e-3,
tf.complex64: 1e-2,
tf.float64: 1e-5,
tf.complex128: 1e-4}
def _compareGradientX(self, x, y, np_func, tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = tf.test.compute_gradient(inx,
xs,
out,
zs,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
xs,
outf,
zs,
x_init_value=xf,
delta=1e-3)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[tf.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self, x, y, np_func, tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
ys,
out,
zs,
x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
ys,
outf,
zs,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[tf.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
if x.dtype in (np.float16, np.float32, np.float64):
if tf_func not in (_FLOORDIV, tf.floordiv, tf.igamma, tf.igammac, tf.zeta, tf.polygamma):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
if tf_func in (tf.igamma, tf.igammac, tf.zeta, tf.polygamma):
# These methods only support gradients in the second parameter
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(x, y, np.add, tf.add, also_compare_variables=True)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
# Need x > 1
self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta, tf.zeta)
n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(n_small, x_pos_small, special.polygamma, tf.polygamma)
except ImportError as e:
tf.logging.warn("Cannot test special functions: %s" % str(e))
def testFloatDifferentShapes(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
with self.test_session() as sess:
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
s = tf.reduce_sum(inx * iny)
gx, gy = sess.run(tf.gradients(s, [inx, iny]))
# gx is simply the broadcasted y
self.assertAllEqual(gx, np.array([1, 1, 2, 2])
.reshape(2, 2).astype(np.float32))
# gy is x's column summed up
self.assertAllEqual(gy, np.array([3, 7]).
reshape(2, 1).astype(np.float32))
def testFloatVariableOverload(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.int32)
y = np.array([1, 2]).reshape(2, 1).astype(np.int32)
var_x = tf.Variable(x)
var_y = tf.Variable(y)
with self.test_session() as sess:
sess.run([var_x.initializer, var_y.initializer])
left_result = (var_x * y).eval()
right_result = (x * var_y).eval()
np_result = x * y
self.assertAllEqual(np_result, left_result)
self.assertAllEqual(np_result, right_result)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
except ImportError as e:
tf.logging.warn("Cannot test special functions: %s" % str(e))
def testInt8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
def testInt64Basic(self):
x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex64)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex64)
self._compareCpu(x, y, np.add, tf.add)
self._compareCpu(x, y, np.subtract, tf.sub)
self._compareCpu(x, y, np.multiply, tf.mul)
self._compareCpu(x, y + 0.1, np.true_divide, tf.truediv)
self._compareCpu(x, y, np.add, _ADD)
self._compareCpu(x, y, np.subtract, _SUB)
self._compareCpu(x, y, np.multiply, _MUL)
self._compareCpu(x, y + 0.1, np.true_divide, _TRUEDIV)
def testComplex128Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex128)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex128)
self._compareCpu(x, y, np.add, tf.add)
self._compareCpu(x, y, np.subtract, tf.sub)
self._compareCpu(x, y, np.multiply, tf.mul)
self._compareCpu(x, y + 0.1, np.true_divide, tf.truediv)
self._compareCpu(x, y, np.add, _ADD)
self._compareCpu(x, y, np.subtract, _SUB)
self._compareCpu(x, y, np.multiply, _MUL)
self._compareCpu(x, y + 0.1, np.true_divide, _TRUEDIV)
def testStringComparison(self):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with self.test_session(use_gpu=False) as sess:
cmp_eq = tf.equal(x, y)
cmp_not_eq = tf.not_equal(x, y)
values = sess.run([cmp_eq, cmp_not_eq])
self.assertAllEqual([[True, True], [False, False]], values[0])
self.assertAllEqual([[False, False], [True, True]], values[1])
def testString(self):
x = np.array([["x_0_0", "x_0_1", "x_0_2"],
["x_1_0", "x_1_1", "x_1_2"],
["x_2_0", "x_2_1", "x_2_2"]], dtype=np.object)
y = np.array([["y_0_0", "y_0_1", "y_0_2"],
["y_1_0", "y_1_1", "y_1_2"],
["y_2_0", "y_2_1", "y_2_2"]], dtype=np.object)
z = np.array([["z_0", "z_1", "z_2"]], dtype=np.object)
w = np.array("w", dtype=np.object)
self._compareCpu(x, y, _ADD, _ADD)
self._compareCpu(x, z, _ADD, _ADD)
self._compareCpu(x, w, _ADD, _ADD)
self._compareCpu(z, w, _ADD, _ADD)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
if dtype in (np.complex64, np.complex128):
x = (1 + np.linspace(0, 2 + 3j, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 2 - 2j, np.prod(ys))).astype(dtype).reshape(ys)
else:
x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
if tf_func not in (_FLOORDIV, tf.floordiv):
if x.dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(x, y, np_func, tf_func, np.float)
self._compareGradientY(x, y, np_func, tf_func, np.float)
else:
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
# TODO(josh11b,vrv): Refactor this to use parameterized tests.
def _testBCastByFunc(self, funcs, xs, ys):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
np.complex64,
np.complex128,
]
for dtype in dtypes:
for (np_func, tf_func) in funcs:
if (dtype in (np.complex64, np.complex128) and
tf_func in (_FLOORDIV, tf.floordiv)):
continue # floordiv makes no sense for complex numbers
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
(np.add, tf.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
(np.subtract, tf.sub),
(np.subtract, _SUB),
(np.power, tf.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
(np.multiply, tf.mul),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
(np.true_divide, tf.truediv),
(np.floor_divide, tf.floordiv),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
self._testBCastByFunc(funcs, xs, ys)
def testBCast_0A(self):
self._testBCastA([1, 3, 2], [1])
def testBCast_0B(self):
self._testBCastB([1, 3, 2], [1])
def testBCast_0C(self):
self._testBCastC([1, 3, 2], [1])
def testBCast_0D(self):
self._testBCastD([1, 3, 2], [1])
def testBCast_1A(self):
self._testBCastA([1, 3, 2], [2])
def testBCast_1B(self):
self._testBCastB([1, 3, 2], [2])
def testBCast_1C(self):
self._testBCastC([1, 3, 2], [2])
def testBCast_1D(self):
self._testBCastD([1, 3, 2], [2])
def testBCast_2A(self):
self._testBCastA([1, 3, 2], [3, 2])
def testBCast_2B(self):
self._testBCastB([1, 3, 2], [3, 2])
def testBCast_2C(self):
self._testBCastC([1, 3, 2], [3, 2])
def testBCast_2D(self):
self._testBCastD([1, 3, 2], [3, 2])
def testBCast_3A(self):
self._testBCastA([1, 3, 2], [3, 1])
def testBCast_3B(self):
self._testBCastB([1, 3, 2], [3, 1])
def testBCast_3C(self):
self._testBCastC([1, 3, 2], [3, 1])
def testBCast_3D(self):
self._testBCastD([1, 3, 2], [3, 1])
def testBCast_4A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_4B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_4C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_4D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_5A(self):
self._testBCastA([1, 3, 2], [2, 3, 1])
def testBCast_5B(self):
self._testBCastB([1, 3, 2], [2, 3, 1])
def testBCast_5C(self):
self._testBCastC([1, 3, 2], [2, 3, 1])
def testBCast_5D(self):
self._testBCastD([1, 3, 2], [2, 3, 1])
def testBCast_6A(self):
self._testBCastA([1, 3, 2], [2, 1, 1])
def testBCast_6B(self):
self._testBCastB([1, 3, 2], [2, 1, 1])
def testBCast_6C(self):
self._testBCastC([1, 3, 2], [2, 1, 1])
def testBCast_6D(self):
self._testBCastD([1, 3, 2], [2, 1, 1])
def testBCast_7A(self):
self._testBCastA([1, 3, 2], [1, 3, 1])
def testBCast_7B(self):
self._testBCastB([1, 3, 2], [1, 3, 1])
def testBCast_7C(self):
self._testBCastC([1, 3, 2], [1, 3, 1])
def testBCast_7D(self):
self._testBCastD([1, 3, 2], [1, 3, 1])
def testBCast_8A(self):
self._testBCastA([2, 1, 5], [2, 3, 1])
def testBCast_8B(self):
self._testBCastB([2, 1, 5], [2, 3, 1])
def testBCast_8C(self):
self._testBCastC([2, 1, 5], [2, 3, 1])
def testBCast_8D(self):
self._testBCastD([2, 1, 5], [2, 3, 1])
def testBCast_9A(self):
self._testBCastA([2, 0, 5], [2, 0, 1])
def testBCast_9B(self):
self._testBCastB([2, 0, 5], [2, 0, 1])
def testBCast_9C(self):
self._testBCastC([2, 0, 5], [2, 0, 1])
def testBCast_9D(self):
self._testBCastD([2, 0, 5], [2, 0, 1])
def testBCast_10A(self):
self._testBCastA([2, 3, 0], [2, 3, 1])
def testBCast_10B(self):
self._testBCastB([2, 3, 0], [2, 3, 1])
def testBCast_10C(self):
self._testBCastC([2, 3, 0], [2, 3, 1])
def testBCast_10D(self):
self._testBCastD([2, 3, 0], [2, 3, 1])
def testBCast_11A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_11B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_11C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_11D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_12A(self):
self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12B(self):
self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12C(self):
self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12D(self):
self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_13A(self):
self._testBCastA([1, 3, 2, 1, 1], [1])
def testBCast_13B(self):
self._testBCastB([1, 3, 2, 1, 1], [1])
def testBCast_13C(self):
self._testBCastC([1, 3, 2, 1, 1], [1])
def testBCast_13D(self):
self._testBCastD([1, 3, 2, 1, 1], [1])
def testBCast_14A(self):
self._testBCastA([2, 3, 1, 1, 5], [1])
def testBCast_14B(self):
self._testBCastB([2, 3, 1, 1, 5], [1])
def testBCast_14C(self):
self._testBCastC([2, 3, 1, 1, 5], [1])
def testBCast_14D(self):
self._testBCastD([2, 3, 1, 1, 5], [1])
def testBCast_15A(self):
self._testBCastA([10, 3, 1, 2], [3, 1, 2])
def testBCast_15B(self):
self._testBCastB([10, 3, 1, 2], [3, 1, 2])
def testBCast_15C(self):
self._testBCastC([10, 3, 1, 2], [3, 1, 2])
def testBCast_15D(self):
self._testBCastD([10, 3, 1, 2], [3, 1, 2])
def testMismatchedDimensions(self):
for func in [tf.add, tf.sub, tf.mul, tf.div, _ADD, _SUB, _MUL, _TRUEDIV,
_FLOORDIV]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
func(tf.convert_to_tensor([10.0, 20.0, 30.0]),
tf.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testZeroPowGrad(self):
with self.test_session():
for dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
x = tf.constant(0.0, dtype=dtype)
y = tf.constant(2.0, dtype=dtype)
z = tf.pow(x, y)
error = tf.test.compute_gradient_error(y, [], z, [])
self.assertEqual(error, 0)
def testComplexPowGrad(self):
with self.test_session():
for dtype in np.complex64, np.complex128:
for base in 2.0, -2.0:
x = tf.constant(base, dtype=dtype)
y = tf.constant(2.0, dtype=dtype)
z = tf.pow(x, y)
error = tf.test.compute_gradient_error(y, [], z, [])
self.assertLess(error, 2e-4)
class ComparisonOpTest(tf.test.TestCase):
def _compare(self, func, x, y, dtype):
with self.test_session(use_gpu=False):
out = func(tf.convert_to_tensor(np.array([x]).astype(dtype)),
tf.convert_to_tensor(np.array([y]).astype(dtype)))
ret = out.eval()
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compare(tf.less, x, y, t), x < y)
self.assertEqual(self._compare(tf.less_equal, x, y, t), x <= y)
self.assertEqual(self._compare(tf.greater, x, y, t), x > y)
self.assertEqual(self._compare(tf.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compare(tf.equal, x, y, t), x == y)
self.assertEqual(self._compare(tf.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compare(tf.equal, x, y, t), x == y)
self.assertEqual(self._compare(tf.not_equal, x, y, t), x != y)
def _compareCpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
tf_cpu = out.eval()
self.assertAllEqual(np_ans, tf_cpu)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
tf_gpu = out.eval()
self.assertAllEqual(np_ans, tf_gpu)
def _compareBoth(self, x, y, np_func, tf_func):
self._compareCpu(x, y, np_func, tf_func)
if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:
self._compareGpu(x, y, np_func, tf_func)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compareBoth(xt, yt, np.less, tf.less)
self._compareBoth(xt, yt, np.less_equal, tf.less_equal)
self._compareBoth(xt, yt, np.greater, tf.greater)
self._compareBoth(xt, yt, np.greater_equal, tf.greater_equal)
self._compareBoth(xt, yt, np.equal, tf.equal)
self._compareBoth(xt, yt, np.not_equal, tf.not_equal)
# TODO(zhifengc): complex64 doesn't work on GPU yet.
for t in [np.complex64, np.complex128]:
self._compareCpu(x.astype(t), y.astype(t), np.equal, tf.equal)
self._compareCpu(x.astype(t), y.astype(t), np.not_equal, tf.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
self._compareCpu(y, x, np_func, tf_func)
if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:
self._compareGpu(x, y, np_func, tf_func)
self._compareGpu(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, tf.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, tf.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, tf.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, tf.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, tf.equal)
def testBCastNotEqual(self):
self._testBCastByFunc(np.not_equal, tf.not_equal)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [tf.less, tf.less_equal, tf.greater,
tf.greater_equal, tf.equal, tf.not_equal]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x.astype(t), y.astype(t))
class LogicalOpTest(tf.test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = out.eval()
self.assertEqual(out.dtype, tf.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with self.test_session(use_gpu=use_gpu):
out = tf.logical_not(tf.convert_to_tensor(x))
tf_val = out.eval()
self.assertEqual(out.dtype, tf.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def testScalar(self):
data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]:
for x in data:
self._not(x, use_gpu)
for x in data:
for y in data:
self._compareBinary(
x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(
x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(
x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]:
self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testBCast(self):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
for (xs, ys) in shapes:
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]:
self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
for f in [tf.logical_and, tf.logical_or, tf.logical_xor]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x, y)
def testUsingAsPythonValueFails(self):
# Ensure that we raise an error when the user attempts to treat a
# `Tensor` as a Python `bool`.
b = tf.constant(False)
with self.assertRaises(TypeError):
if b:
pass
x = tf.constant(3)
y = tf.constant(4)
with self.assertRaises(TypeError):
if x > y:
pass
z = tf.constant(7)
# The chained comparison should fail because Python computes `x <
# y` and short-circuits the comparison with `z` if it is `False`.
with self.assertRaises(TypeError):
_ = x < y < z
class SelectOpTest(tf.test.TestCase):
def _compare(self, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with self.test_session(use_gpu=use_gpu):
out = tf.select(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
outf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y,
delta=1.0)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
s,
outf,
s,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
def testGradients(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
def testShapeMismatch(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(2, 5, 3) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
tf.select(c, xt, yt)
def testEmptyTensor(self):
c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)
x = np.random.rand(1, 3, 0) * 100
y = np.random.rand(1, 3, 0) * 100
z_expected = np.zeros((1, 3, 0), dtype=np.float32)
with self.test_session():
xt = x.astype(np.float32)
yt = y.astype(np.float32)
z = tf.select(c, xt, yt).eval()
self.assertAllEqual(z_expected, z)
def testNan(self):
"""Verify that nans don't propagate where they shouldn't."""
with self.test_session():
for c in False, True:
for a in 7.0, np.nan:
for b in 5.0, np.nan:
x = tf.select(c, a, b).eval()
y = a if c else b
self.assertEqual(np.isnan(x), np.isnan(y))
class BatchSelectOpTest(tf.test.TestCase):
"""Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+."""
def _compare(self, c, x, y, use_gpu):
np_ans = np.dstack(
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with self.test_session(use_gpu=use_gpu):
out = tf.select(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
outf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
s,
outf,
s,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
def testGradients(self):
c =
|
np.random.randint(0, 2, 16)
|
numpy.random.randint
|
import os
import time
import numpy as np
import copy
import tensorflow as tf
from tensorflow.contrib.nccl.python.ops import nccl_ops
nccl_ops._maybe_load_nccl_ops_so()
from base_model import BaseModel
from sndcgan_zgp import Network
from sndcgan_zgp.utils import *
from sndcgan_zgp.ops import nccl_all_mean, network_mean
from sndcgan_zgp.ops import TowerConfig, device_sync_op, optimizer_op
from data_loader import parallel_image_filename_loader
class TrainingModel(BaseModel):
def __init__(self,
sess,
config,
device_list,
model_name="model_name",
dataset_name="default",
data_dir="./data",
base_dir="./"):
super(TrainingModel, self).__init__(name=model_name, base_dir=base_dir)
self.connect_paths()
snapshot_list = list()
snapshot_list.append(dict(type="file", dir="base_model.py"))
snapshot_list.append(dict(type="file", dir="main.py"))
snapshot_list.append(dict(type="file", dir="parallel_training_model.py"))
snapshot_list.append(dict(type="dir", dir="sndcgan_zgp"))
self.snapshot(snapshot_list)
self.sess = sess
self.config = config
self.device_list = device_list
self.num_device = len(device_list)
self.model_name = model_name
self.train_data, \
self.sample_data, \
self.num_batch, \
self.file_type = parallel_image_filename_loader(data_dir=data_dir,
dataset_name=dataset_name,
sample_num=config.sample_num,
batch_size=config.batch_size
)
self.build_model()
def build_model(self):
size = self.config.final_size
batch_size = self.config.batch_size
gf_dim = self.config.gf_dim
df_dim = self.config.df_dim
z_dim = self.config.z_dim
c_dim = 3
tower_prefix = "Tower_{}"
main_idx = 0
self.gOptim = lambda: tf.train.AdamOptimizer(learning_rate=self.config.learning_rate,
beta1=self.config.beta1)
self.dOptim = lambda: tf.train.AdamOptimizer(learning_rate=self.config.learning_rate,
beta1=self.config.beta1)
# Data flow
# Data parser
def _parse_function(fname, number):
image_string = tf.read_file(fname)
if self.file_type in ['jpg', 'jpeg', 'JPG', 'JPEG']:
image_decoded = tf.image.decode_jpeg(image_string, channels=3)
elif self.file_type in ['png', 'PNG']:
image_decoded = tf.image.decode_png(image_string, channels=3)
else:
raise ValueError("Image type should be in 'jpg', 'png'. Got {}.".format(self.file_type))
image_resized = tf.image.resize_images(image_decoded, (size, size))
image_resized = image_resized / 127.5 - 1
return image_resized
def _parse_function_test(fname, z):
image_string = tf.read_file(fname)
if self.file_type in ['jpg', 'jpeg', 'JPG', 'JPEG']:
image_decoded = tf.image.decode_jpeg(image_string, channels=3)
elif self.file_type in ['png', 'PNG']:
image_decoded = tf.image.decode_png(image_string, channels=3)
else:
raise ValueError("Image type should be in 'jpg', 'png'. Got {}.".format(self.file_type))
image_resized = tf.image.resize_images(image_decoded, (size, size))
image_resized = image_resized / 127.5 - 1
return image_resized, z
# Start towering
# Iterator
self.iterator = nested_dict()
# Loss container
# Tower, data config
print("Tower configuration ... ", end=" ", flush=True)
tower_config_list = list()
for idx, device in enumerate(self.device_list):
# Tower config
tower_config = TowerConfig(idx=idx,
prefix=tower_prefix,
is_main=idx == main_idx,
num_devices=len(self.device_list),
device_name=device,
is_test=False)
tower_config_list.append(tower_config)
# Data flow
# For train
if tower_config.is_main:
dataset = tf.data.Dataset.from_tensor_slices(self.train_data)
dataset = dataset.repeat().shuffle(len(self.train_data) * 2)
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
map_func=_parse_function,
batch_size=batch_size * len(self.device_list),
num_parallel_batches=int(batch_size * len(self.device_list) * 1.5)
)
)
self.iterator['train'] = dataset.make_initializable_iterator()
# For test
if tower_config.is_main:
self.sample_z = tf.constant(np.random.normal(0.0, 1.0,
size=[self.config.sample_num, z_dim])
.astype(dtype=np.float32))
test_dataset = tf.data.Dataset.from_tensor_slices((self.sample_data, self.sample_z))
test_dataset = test_dataset.apply(
tf.contrib.data.map_and_batch(
map_func=_parse_function_test,
batch_size=1,
num_parallel_batches=batch_size * 16
)
)
self.iterator['test'] = test_dataset.make_initializable_iterator()
# Only for test
sample_x, sample_z = self.iterator['test'].get_next()
print("Done !")
# Building network
print("Build Network ...")
network_list = list()
dummy_network_list = list()
# Fetch dataflow
x_full = self.iterator['train'].get_next()
for idx, (device, tower_config) in enumerate(zip(self.device_list, tower_config_list)):
print("\tCreating gpu tower @ {:d} on device {:s}".format(idx, device))
# Fetch dataflow
x = x_full[batch_size * idx: batch_size * (idx+1)]
with tf.device(device), tf.variable_scope(tower_prefix.format(idx)):
z = tf.random_normal(shape=[batch_size, z_dim], dtype=tf.float32, name="z")
# Establish network
network = Network(name="Network",
batch_size=batch_size,
size=size,
gf_dim=gf_dim,
df_dim=df_dim,
reuse=False,
is_training=True,
tower_config=tower_config)
network.build_network(x=x, z=z)
network_list.append(network)
dummy_tower_config = copy.deepcopy(tower_config)
dummy_tower_config.is_test = True
dummy = Network(name="Network",
batch_size=1,
size=size,
gf_dim=gf_dim,
df_dim=df_dim,
reuse=True,
is_training=False,
tower_config=dummy_tower_config)
dummy.build_network(z=sample_z, x=sample_x)
dummy_network_list.append(dummy)
# Establish test network
if tower_config.is_main:
print("\t +- Test network @ tower {:d} on device {:s}".format(idx, device))
self.test_network = dummy
# Using NCCL library. Need to run all variables in list.
self.test_enforcer = network_mean(dummy_network_list, lambda x: x.y)
# Otherwise, it will hang.
g_loss_list = nccl_all_mean(network_list, lambda x: x.g_loss)
d_loss_list = nccl_all_mean(network_list, lambda x: x.d_loss)
self.g_loss = network_mean(network_list, lambda x:x.g_loss)
self.d_loss = network_mean(network_list, lambda x:x.d_loss)
self.gp_loss = network_mean(network_list, lambda x:x.gp_loss)
print(">> Done.")
# Compute gradients
print("Compute gradients ... ", end=' ', flush=True)
self.d_optimize_op = optimizer_op(d_loss_list, network_list, self.dOptim, var_name="type__discriminator")
self.g_optimize_op = optimizer_op(g_loss_list, network_list, self.gOptim, var_name="type__generator")
print("Done !")
self.sync_op = device_sync_op(tower_prefix, main_idx)
self.network_list = network_list
# Saver to save only main tower.
excluding_towers = [tower_config.name for tower_config in tower_config_list if not tower_config.is_main]
tracking_variables_list = [v for v in tf.global_variables() if not any(tower_name in v.op.name for tower_name in excluding_towers)]
self.saver = tf.train.Saver(var_list=tracking_variables_list, max_to_keep=None)
def train(self, config):
# Initializing
print("Initializing ... ", end=' ', flush=True)
self.sess.run(tf.global_variables_initializer())
print("Done !")
# Restoring
if config.checkpoint_dir is None:
pass
else:
print("Restoring ... ", end=' ', flush=True)
self.restore_checkpoints(config.checkpoint_dir, config.restore_list)
print("Done !")
print("Sync towers ... ", end=' ', flush=True)
if self.num_device == 1:
print(" >> Using only one device. Skip syncing. ", end=' ', flush=True)
else:
self.sess.run(self.sync_op)
print("Done !")
counter = 0
start_time = time.time()
# Check test input
print("Checking input pipeline ... ", end=' ', flush=True)
y_sample, x_sample, _ = self.sample_runner([self.test_network.y, self.test_network.x, self.test_enforcer])
save_images(y_sample,
image_manifold_size(len(y_sample)),
os.path.join(self.get_result_dir(), "9_naive_gen_image.jpg"))
save_images(x_sample,
image_manifold_size(len(x_sample)),
os.path.join(self.get_result_dir(), "9_input_image.jpg"))
os.mkdir(os.path.join(self.get_result_dir(), "Result"))
print("Done !")
print("Train iterator initializing ... ", end=' ', flush=True)
self.sess.run(self.iterator['train'].initializer)
print("Done !")
# Batch training
epoch_time = time.time()
effective_num_batch = (self.num_batch // len(self.device_list))
print("CKPT directory: {}".format(self.get_checkpoint_dir()))
print("Effective batch size: {}".format(self.config.batch_size * len(self.device_list)))
while counter // effective_num_batch < config.epoch:
epoch = counter // effective_num_batch
idx = counter % effective_num_batch
counter += 1
# Syncing
if np.mod(counter, 5000) == 0:
if not self.num_device == 1:
self.sess.run(self.sync_op)
print("Sync towers on step {}.".format(counter))
# Update D network
for i_discriminator in range(config.discriminator_iteration):
self.sess.run(self.d_optimize_op)
# Update G network
for i_generator in range(config.generator_iteration):
self.sess.run(self.g_optimize_op)
if
|
np.mod(counter, config.print_interval)
|
numpy.mod
|
# -*- coding: utf-8 -*-
import numpy
#only accept order = numpy.nan or 0
def designmatrix( c, order ):
if numpy.isnan(order):
return numpy.array([],ndmin=2).reshape(c.shape[0],0), numpy.array([],ndmin = 2)
else:
n, nd = c.shape
X =
|
numpy.ones((n, 1))
|
numpy.ones
|
import csv
import time
import math
import numpy as np
import warnings
from rdkit import Chem, DataStructs
from rdkit.Chem import QED
from sklearn.model_selection import KFold, StratifiedKFold
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import ExponentialLR, StepLR
import torch.nn.functional as F
from torch.autograd import Variable
import random
from tqdm import tqdm, trange
import threading
from __future__ import print_function
from __future__ import division
from sklearn.externals import joblib
from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor as RFR
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from azureml.core.model import Model
def get_fp(smiles):
fp = []
processed_indices = []
invalid_indices = []
for i in range(len(smiles)):
mol = smiles[i]
tmp = np.array(mol2image(mol, n=2048))
if np.isnan(tmp[0]):
invalid_indices.append(i)
else:
fp.append(tmp)
processed_indices.append(i)
return np.array(fp), processed_indices, invalid_indices
def get_desc(smiles, calc):
desc = []
processed_indices = []
invalid_indices = []
for i in range(len(smiles)):
sm = smiles[i]
try:
mol = Chem.MolFromSmiles(sm)
tmp = np.array(calc(mol))
desc.append(tmp)
processed_indices.append(i)
except:
invalid_indices.append(i)
desc_array = np.array(desc)
return desc_array, processed_indices, invalid_indices
def normalize_desc(desc_array, desc_mean=None):
desc_array = np.array(desc_array).reshape(len(desc_array), -1)
ind = np.zeros(desc_array.shape)
for i in range(desc_array.shape[0]):
for j in range(desc_array.shape[1]):
try:
if np.isfinite(desc_array[i, j]):
ind[i, j] = 1
except:
pass
for i in range(desc_array.shape[0]):
for j in range(desc_array.shape[1]):
if ind[i, j] == 0:
desc_array[i, j] = 0
if desc_mean is None:
desc_mean = np.mean(desc_array, axis=0)
for i in range(desc_array.shape[0]):
for j in range(desc_array.shape[1]):
if ind[i, j] == 0:
desc_array[i, j] = desc_mean[j]
return desc_array, desc_mean
def mol2image(x, n=2048):
try:
m = Chem.MolFromSmiles(x)
fp = Chem.RDKFingerprint(m, maxPath=4, fpSize=n)
res = np.zeros(len(fp))
DataStructs.ConvertToNumpyArray(fp, res)
return res
except:
return [np.nan]
def sanitize_smiles(smiles, canonical=True, throw_warning=False):
"""
Takes list of SMILES strings and returns list of their sanitized versions.
For definition of sanitized SMILES check
http://www.rdkit.org/docs/api/rdkit.Chem.rdmolops-module.html#SanitizeMol
Parameters
----------
smiles: list
list of SMILES strings
canonical: bool (default True)
parameter specifying whether SMILES will be converted to canonical
format
throw_warning: bool (default False)
parameter specifying whether warnings will be thrown if a SMILES is
invalid
Returns
-------
new_smiles: list
list of SMILES and NaNs if SMILES string is invalid or unsanitized.
If canonical is True, returns list of canonical SMILES.
When canonical is True this function is analogous to:
canonical_smiles(smiles, sanitize=True).
"""
new_smiles = []
for sm in smiles:
try:
if canonical:
new_smiles.append(Chem.MolToSmiles(Chem.MolFromSmiles(sm, sanitize=True)))
else:
new_smiles.append(sm)
except:
if throw_warning:
warnings.warn('Unsanitized SMILES string: ' + sm, UserWarning)
new_smiles.append('')
return new_smiles
def canonical_smiles(smiles, sanitize=True, throw_warning=False):
"""
Takes list of SMILES strings and returns list of their canonical SMILES.
Parameters
----------
smiles: list
list of SMILES strings to convert into canonical format
sanitize: bool (default True)
parameter specifying whether to sanitize SMILES or not.
For definition of sanitized SMILES check
http://www.rdkit.org/docs/api/rdkit.Chem.rdmolops-module.html#SanitizeMol
throw_warning: bool (default False)
parameter specifying whether warnings will be thrown if a SMILES is
invalid
Returns
-------
new_smiles: list
list of canonical SMILES and NaNs if SMILES string is invalid or
unsanitized (when sanitize is True)
When sanitize is True the function is analogous to:
sanitize_smiles(smiles, canonical=True).
"""
new_smiles = []
for sm in smiles:
try:
mol = Chem.MolFromSmiles(sm, sanitize=sanitize)
new_smiles.append(Chem.MolToSmiles(mol))
except:
if throw_warning:
warnings.warn(sm + ' can not be canonized: invalid '
'SMILES string!', UserWarning)
new_smiles.append('')
return new_smiles
def save_smi_to_file(filename, smiles, unique=True):
"""
Takes path to file and list of SMILES strings and writes SMILES to the specified file.
Args:
filename (str): path to the file
smiles (list): list of SMILES strings
unique (bool): parameter specifying whether to write only unique copies or not.
Output:
success (bool): defines whether operation was successfully completed or not.
"""
if unique:
smiles = list(set(smiles))
else:
smiles = list(smiles)
f = open(filename, 'w')
for mol in smiles:
f.writelines([mol, '\n'])
f.close()
return f.closed
def read_smi_file(filename, unique=True, add_start_end_tokens=False):
"""
Reads SMILES from file. File must contain one SMILES string per line
with \n token in the end of the line.
Args:
filename (str): path to the file
unique (bool): return only unique SMILES
Returns:
smiles (list): list of SMILES strings from specified file.
success (bool): defines whether operation was successfully completed or not.
If 'unique=True' this list contains only unique copies.
"""
f = open(filename, 'r')
molecules = []
for line in f:
if add_start_end_tokens:
molecules.append('<' + line[:-1] + '>')
else:
molecules.append(line[:-1])
if unique:
molecules = list(set(molecules))
else:
molecules = list(molecules)
f.close()
return molecules, f.closed
def tokenize(smiles, tokens=None):
"""
Returns list of unique tokens, token-2-index dictionary and number of
unique tokens from the list of SMILES
Parameters
----------
smiles: list
list of SMILES strings to tokenize.
tokens: list, str (default None)
list of unique tokens
Returns
-------
tokens: list
list of unique tokens/SMILES alphabet.
token2idx: dict
dictionary mapping token to its index.
num_tokens: int
number of unique tokens.
"""
if tokens is None:
tokens = list(set(''.join(smiles)))
tokens = list(np.sort(tokens))
tokens = ''.join(tokens)
token2idx = dict((token, i) for i, token in enumerate(tokens))
num_tokens = len(tokens)
return tokens, token2idx, num_tokens
def time_since(since):
s = time.time() - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def cross_validation_split(x, y, n_folds=5, split='random', folds=None):
assert(len(x) == len(y))
x = np.array(x)
y = np.array(y)
if split not in ['random', 'stratified', 'fixed']:
raise ValueError('Invalid value for argument \'split\': '
'must be either \'random\', \'stratified\' '
'or \'fixed\'')
if split == 'random':
cv_split = KFold(n_splits=n_folds, shuffle=True)
folds = list(cv_split.split(x, y))
elif split == 'stratified':
cv_split = StratifiedKFold(n_splits=n_folds, shuffle=True)
folds = list(cv_split.split(x, y))
elif split == 'fixed' and folds is None:
raise TypeError(
'Invalid type for argument \'folds\': found None, but must be list')
cross_val_data = []
cross_val_labels = []
if len(folds) == n_folds:
for fold in folds:
cross_val_data.append(x[fold[1]])
cross_val_labels.append(y[fold[1]])
elif len(folds) == len(x) and np.max(folds) == n_folds:
for f in range(n_folds):
left = np.where(folds == f)[0].min()
right = np.where(folds == f)[0].max()
cross_val_data.append(x[left:right + 1])
cross_val_labels.append(y[left:right + 1])
return cross_val_data, cross_val_labels
def read_object_property_file(path, delimiter=',', cols_to_read=[0, 1],
keep_header=False):
f = open(path, 'r')
reader = csv.reader(f, delimiter=delimiter)
data_full = np.array(list(reader))
if keep_header:
start_position = 0
else:
start_position = 1
assert len(data_full) > start_position
data = [[] for _ in range(len(cols_to_read))]
for i in range(len(cols_to_read)):
col = cols_to_read[i]
data[i] = data_full[start_position:, col]
f.close()
if len(cols_to_read) == 1:
data = data[0]
return data
"""### **Data Process**"""
class GeneratorData(object):
"""
Docstring coming soon...
"""
def __init__(self, training_data_path, tokens=None, start_token='<',
end_token='>', max_len=120, use_cuda=None, **kwargs):
"""
Constructor for the GeneratorData object.
Parameters
----------
training_data_path: str
path to file with training dataset. Training dataset must contain
a column with training strings. The file also may contain other
columns.
tokens: list (default None)
list of characters specifying the language alphabet. Of left
unspecified, tokens will be extracted from data automatically.
start_token: str (default '<')
special character that will be added to the beginning of every
sequence and encode the sequence start.
end_token: str (default '>')
special character that will be added to the end of every
sequence and encode the sequence end.
max_len: int (default 120)
maximum allowed length of the sequences. All sequences longer than
max_len will be excluded from the training data.
use_cuda: bool (default None)
parameter specifying if GPU is used for computations. If left
unspecified, GPU will be used if available
kwargs: additional positional arguments
These include cols_to_read (list, default [0]) specifying which
column in the file with training data contains training sequences
and delimiter (str, default ',') that will be used to separate
columns if there are multiple of them in the file.
"""
super(GeneratorData, self).__init__()
if 'cols_to_read' not in kwargs:
kwargs['cols_to_read'] = []
data = read_object_property_file(training_data_path,
**kwargs)
self.start_token = start_token
self.end_token = end_token
self.file = []
for i in range(len(data)):
if len(data[i]) <= max_len:
self.file.append(self.start_token + data[i] + self.end_token)
self.file_len = len(self.file)
self.all_characters, self.char2idx, \
self.n_characters = tokenize(self.file, tokens)
self.use_cuda = use_cuda
if self.use_cuda is None:
self.use_cuda = torch.cuda.is_available()
def load_dictionary(self, tokens, char2idx):
self.all_characters = tokens
self.char2idx = char2idx
self.n_characters = len(tokens)
def random_chunk(self):
"""
Samples random SMILES string from generator training data set.
Returns:
random_smiles (str).
"""
index = random.randint(0, self.file_len-1)
return self.file[index]
def char_tensor(self, string):
"""
Converts SMILES into tensor of indices wrapped into torch.autograd.Variable.
Args:
string (str): input SMILES string
Returns:
tokenized_string (torch.autograd.Variable(torch.tensor))
"""
tensor = torch.zeros(len(string)).long()
for c in range(len(string)):
tensor[c] = self.all_characters.index(string[c])
if self.use_cuda:
return torch.tensor(tensor).cuda()
else:
return torch.tensor(tensor)
def random_training_set(self, smiles_augmentation):
chunk = self.random_chunk()
if smiles_augmentation is not None:
chunk = '<' + smiles_augmentation.randomize_smiles(chunk[1:-1]) + '>'
inp = self.char_tensor(chunk[:-1])
target = self.char_tensor(chunk[1:])
return inp, target
def read_sdf_file(self, path, fields_to_read):
raise NotImplementedError
def update_data(self, path):
self.file, success = read_smi_file(path, unique=True)
self.file_len = len(self.file)
assert success
class PredictorData(object):
def __init__(self, path, delimiter=',', cols=[0, 1], get_features=None,
has_label=True, labels_start=1, **kwargs):
super(PredictorData, self).__init__()
data = read_object_property_file(path, delimiter, cols_to_read=cols)
if has_label:
self.objects = np.array(data[:labels_start]).reshape(-1)
self.y = np.array(data[labels_start:], dtype='float32')
self.y = self.y.reshape(-1, len(cols) - labels_start)
if self.y.shape[1] == 1:
self.y = self.y.reshape(-1)
else:
self.objects = np.array(data[:labels_start]).reshape(-1)
self.y = [None]*len(self.object)
assert len(self.objects) == len(self.y)
if get_features is not None:
self.x, processed_indices, invalid_indices = \
get_features(self.objects, **kwargs)
self.invalid_objects = self.objects[invalid_indices]
self.objects = self.objects[processed_indices]
self.invalid_y = self.y[invalid_indices]
self.y = self.y[processed_indices]
else:
self.x = self.objects
self.invalid_objects = None
self.invalid_y = None
self.binary_y = None
def binarize(self, threshold):
self.binary_y = np.array(self.y >= threshold, dtype='int32')
"""### **Smiles Enumerator**"""
class Iterator(object):
"""Abstract base class for data iterators.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(n, batch_size, shuffle, seed)
if n < batch_size:
raise ValueError('Input data length is shorter than batch_size\nAdjust batch_size')
def reset(self):
self.batch_index = 0
def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
if self.batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class SmilesIterator(Iterator):
"""Iterator yielding data from a SMILES array.
# Arguments
x: Numpy array of SMILES input data.
y: Numpy array of targets data.
smiles_data_generator: Instance of `SmilesEnumerator`
to use for random SMILES generation.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
dtype: dtype to use for returned batch. Set to keras.backend.floatx if using Keras
"""
def __init__(self, x, y, smiles_data_generator,
batch_size=32, shuffle=False, seed=None,
dtype=np.float32
):
if y is not None and len(x) != len(y):
raise ValueError('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
self.x =
|
np.asarray(x)
|
numpy.asarray
|
# -*- coding: utf-8 -*-
"""
This module defines the NormalizedWorm class
"""
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
def _h_resample_curve(curve, resampling_N=49, widths=None):
'''Resample curve to have resampling_N equidistant segments
I give width as an optional parameter since I want to use the
same interpolation as with the skeletons
I calculate the length here indirectly
'''
# calculate the cumulative length for each segment in the curve
dx = np.diff(curve[:, 0])
dy = np.diff(curve[:, 1])
dr = np.sqrt(dx * dx + dy * dy)
lengths = np.cumsum(dr)
lengths = np.hstack((0, lengths)) # add the first point
tot_length = lengths[-1]
# Verify array lengths
if len(lengths) < 2 or len(curve) < 2:
return None, None, None
fx = interp1d(lengths, curve[:, 0])
fy = interp1d(lengths, curve[:, 1])
subLengths = np.linspace(0 + np.finfo(float).eps, tot_length, resampling_N)
# I add the epsilon because otherwise the interpolation will produce nan
# for zero
try:
resampled_curve = np.zeros((resampling_N, 2))
resampled_curve[:, 0] = fx(subLengths)
resampled_curve[:, 1] = fy(subLengths)
if widths is not None:
fw = interp1d(lengths, widths)
widths = fw(subLengths)
except ValueError:
resampled_curve = np.full((resampling_N, 2), np.nan)
widths = np.full(resampling_N, np.nan)
return resampled_curve, tot_length, widths
def _h_smooth_curve(curve, window=5, pol_degree=3):
'''smooth curves using the savgol_filter'''
if curve.shape[0] < window:
# nothing to do here return an empty array
return np.full_like(curve, np.nan)
# consider the case of one (widths) or two dimensions (skeletons, contours)
if curve.ndim == 1:
smoothed_curve = savgol_filter(curve, window, pol_degree)
else:
smoothed_curve = np.zeros_like(curve)
for nn in range(curve.ndim):
smoothed_curve[:, nn] = savgol_filter(
curve[:, nn], window, pol_degree)
return smoothed_curve
def get_group_borders(index_o, pad_val = False):
#add zeros at the edge to consider any block in the edges
index = np.hstack([pad_val, index_o , pad_val])
switches = np.diff(index.astype(np.int))
turn_on, =
|
np.where(switches==1)
|
numpy.where
|
#!/usr/bin/env python
#
# Created by: <NAME>, March 2002
#
""" Test functions for linalg.basic module
"""
from __future__ import division, print_function, absolute_import
"""
Bugs:
1) solve.check_random_sym_complex fails if a is complex
and transpose(a) = conjugate(a) (a is Hermitian).
"""
__usage__ = """
Build linalg:
python setup_linalg.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
Run tests if linalg is not installed:
python tests/test_basic.py
"""
import numpy as np
from numpy import arange, array, dot, zeros, identity, conjugate, transpose, \
float32
import numpy.linalg as linalg
from numpy.testing import TestCase, rand, run_module_suite, assert_raises, \
assert_equal, assert_almost_equal, assert_array_almost_equal, assert_, \
assert_allclose
from scipy.linalg import solve, inv, det, lstsq, pinv, pinv2, pinvh, norm,\
solve_banded, solveh_banded, solve_triangular
from scipy.linalg._testutils import assert_no_overwrite
def random(size):
return rand(*size)
class TestSolveBanded(TestCase):
def test_real(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_complex(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2j, 1, 20, 2j],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2j, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0,1j],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_real(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0,1:], 1) + np.diag(ab[1,:], 0) + np.diag(ab[2,:-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_complex(self):
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0,1:], 1) + np.diag(ab[1,:], 0) + np.diag(ab[2,:-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_check_finite(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0])
x = solve_banded((l, u), ab, b4, check_finite=False)
assert_array_almost_equal(dot(a, x), b4)
def test_bad_shape(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1,4)
assert_raises(ValueError, solve_banded, (l, u), ab, bad)
assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0])
# Values of (l,u) are not compatible with ab.
assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0])
class TestSolveHBanded(TestCase):
def test_01_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 1D array.
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_upper(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_03_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 2D array with shape (3,1).
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1,1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0.0, 1.0, 0.0, 0.0]).reshape(-1,1))
def test_01_lower(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_lower(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_float32(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0, 2.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_float32(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_complex(self):
# Solve
# [ 4 -j 2 0] [2-j]
# [ j 4 -j 2] X = [4-j]
# [ 2 j 4 -j] [4+j]
# [ 0 2 j 4] [2+j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0])
def test_02_complex(self):
# Solve
# [ 4 -j 2 0] [2-j 2+4j]
# [ j 4 -j 2] X = [4-j -1-j]
# [ 2 j 4 -j] [4+j 4+2j]
# [ 0 2 j 4] [2+j j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([[2-1j, 2+4j],
[4.0-1j, -1-1j],
[4.0+1j, 4+2j],
[2+1j, 1j]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_upper(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_03_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 2D array with shape (3,1).
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0]).reshape(-1,1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1,1))
def test_tridiag_01_lower(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, lower=True)
|
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
|
numpy.testing.assert_array_almost_equal
|
import copy
import glob
import os
import time
from collections import deque
import sys
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from a2c_ppo_acktr import algo, utils
from a2c_ppo_acktr.arguments import get_args
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.model import Policy, MLPAttnBase, MLPHardAttnBase
from a2c_ppo_acktr.storage import RolloutStorage
from evaluation import evaluate
from a2c_ppo_acktr.utils import save_obj, load_obj
# EVAL_ENVS = {'five_arms': ['h_bandit-randchoose-v6', 5],
# 'ten_arms': ['h_bandit-randchoose-v5', 10],
# 'many_arms': ['h_bandit-randchoose-v1', 100]}
EVAL_ENVS = {'ten_arms': ['h_bandit-obs-randchoose-v5', 10],
'many_arms': ['h_bandit-obs-randchoose-v1', 100]}
def main():
args = get_args()
import random; random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
logdir = args.env_name + '_' + args.algo + '_num_arms_' + str(args.num_processes) + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
if args.use_privacy:
logdir = logdir + '_privacy'
elif args.use_noisygrad:
logdir = logdir + '_noisygrad'
elif args.use_pcgrad:
logdir = logdir + '_pcgrad'
elif args.use_testgrad:
logdir = logdir + '_testgrad'
elif args.use_median_grad:
logdir = logdir + '_mediangrad'
logdir = os.path.join('runs', logdir)
logdir = os.path.join(os.path.expanduser(args.log_dir), logdir)
utils.cleanup_log_dir(logdir)
# Ugly but simple logging
log_dict = {
'task_steps': args.task_steps,
'grad_noise_ratio': args.grad_noise_ratio,
'max_task_grad_norm': args.max_task_grad_norm,
'use_noisygrad': args.use_noisygrad,
'use_pcgrad': args.use_pcgrad,
'use_testgrad': args.use_testgrad,
'use_testgrad_median': args.use_testgrad_median,
'testgrad_quantile': args.testgrad_quantile,
'median_grad': args.use_median_grad,
'use_meanvargrad': args.use_meanvargrad,
'meanvar_beta': args.meanvar_beta,
'no_special_grad_for_critic': args.no_special_grad_for_critic,
'use_privacy': args.use_privacy,
'seed': args.seed,
'recurrent': args.recurrent_policy,
'obs_recurrent': args.obs_recurrent,
'cmd': ' '.join(sys.argv[1:])
}
for eval_disp_name, eval_env_name in EVAL_ENVS.items():
log_dict[eval_disp_name] = []
summary_writer = SummaryWriter()
summary_writer.add_hparams({'task_steps': args.task_steps,
'grad_noise_ratio': args.grad_noise_ratio,
'max_task_grad_norm': args.max_task_grad_norm,
'use_noisygrad': args.use_noisygrad,
'use_pcgrad': args.use_pcgrad,
'use_testgrad': args.use_testgrad,
'use_testgrad_median': args.use_testgrad_median,
'testgrad_quantile': args.testgrad_quantile,
'median_grad': args.use_median_grad,
'use_meanvargrad': args.use_meanvargrad,
'meanvar_beta': args.meanvar_beta,
'no_special_grad_for_critic': args.no_special_grad_for_critic,
'use_privacy': args.use_privacy,
'seed': args.seed,
'recurrent': args.recurrent_policy,
'obs_recurrent': args.obs_recurrent,
'cmd': ' '.join(sys.argv[1:])}, {})
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
print('making envs...')
envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
args.gamma, args.log_dir, device, False, steps=args.task_steps,
free_exploration=args.free_exploration, recurrent=args.recurrent_policy,
obs_recurrent=args.obs_recurrent, multi_task=True)
val_envs = make_vec_envs(args.val_env_name, args.seed, args.num_processes,
args.gamma, args.log_dir, device, False, steps=args.task_steps,
free_exploration=args.free_exploration, recurrent=args.recurrent_policy,
obs_recurrent=args.obs_recurrent, multi_task=True)
eval_envs_dic = {}
for eval_disp_name, eval_env_name in EVAL_ENVS.items():
eval_envs_dic[eval_disp_name] = make_vec_envs(eval_env_name[0], args.seed, args.num_processes,
None, logdir, device, True, steps=args.task_steps,
recurrent=args.recurrent_policy,
obs_recurrent=args.obs_recurrent, multi_task=True,
free_exploration=args.free_exploration)
prev_eval_r = {}
print('done')
if args.hard_attn:
actor_critic = Policy(
envs.observation_space.shape,
envs.action_space,
base=MLPHardAttnBase,
base_kwargs={'recurrent': args.recurrent_policy or args.obs_recurrent})
else:
actor_critic = Policy(
envs.observation_space.shape,
envs.action_space,
base=MLPAttnBase,
base_kwargs={'recurrent': args.recurrent_policy or args.obs_recurrent})
actor_critic.to(device)
if (args.continue_from_epoch > 0) and args.save_dir != "":
save_path = os.path.join(args.save_dir, args.algo)
actor_critic_, loaded_obs_rms_ = torch.load(os.path.join(save_path,
args.env_name +
"-epoch-{}.pt".format(args.continue_from_epoch)))
actor_critic.load_state_dict(actor_critic_.state_dict())
if args.algo != 'ppo':
raise "only PPO is supported"
agent = algo.PPO(
actor_critic,
args.clip_param,
args.ppo_epoch,
args.num_mini_batch,
args.value_loss_coef,
args.entropy_coef,
lr=args.lr,
eps=args.eps,
num_tasks=args.num_processes,
attention_policy=False,
max_grad_norm=args.max_grad_norm,
weight_decay=args.weight_decay)
val_agent = algo.PPO(
actor_critic,
args.clip_param,
args.ppo_epoch,
args.num_mini_batch,
args.value_loss_coef,
args.entropy_coef,
lr=args.val_lr,
eps=args.eps,
num_tasks=args.num_processes,
attention_policy=True,
max_grad_norm=args.max_grad_norm,
weight_decay=args.weight_decay)
rollouts = RolloutStorage(args.num_steps, args.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size)
val_rollouts = RolloutStorage(args.num_steps, args.num_processes,
val_envs.observation_space.shape, val_envs.action_space,
actor_critic.recurrent_hidden_state_size)
obs = envs.reset()
rollouts.obs[0].copy_(obs)
rollouts.to(device)
val_obs = val_envs.reset()
val_rollouts.obs[0].copy_(val_obs)
val_rollouts.to(device)
episode_rewards = deque(maxlen=10)
start = time.time()
num_updates = int(
args.num_env_steps) // args.num_steps // args.num_processes
save_copy = True
for j in range(args.continue_from_epoch, args.continue_from_epoch+num_updates):
# policy rollouts
for step in range(args.num_steps):
# Sample actions
actor_critic.eval()
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step], rollouts.recurrent_hidden_states[step],
rollouts.masks[step])
actor_critic.train()
# Obser reward and next obs
obs, reward, done, infos = envs.step(action)
for info in infos:
if 'episode' in info.keys():
episode_rewards.append(info['episode']['r'])
for k, v in info['episode'].items():
summary_writer.add_scalar(f'training/{k}', v, j * args.num_processes * args.num_steps + args.num_processes * step)
# If done then clean the history of observations.
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if 'bad_transition' in info.keys() else [1.0]
for info in infos])
rollouts.insert(obs, recurrent_hidden_states, action,
action_log_prob, value, reward, masks, bad_masks)
actor_critic.eval()
with torch.no_grad():
next_value = actor_critic.get_value(
rollouts.obs[-1], rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
actor_critic.train()
rollouts.compute_returns(next_value, args.use_gae, args.gamma,
args.gae_lambda, args.use_proper_time_limits)
if save_copy:
prev_weights = copy.deepcopy(actor_critic.state_dict())
prev_opt_state = copy.deepcopy(agent.optimizer.state_dict())
prev_val_opt_state = copy.deepcopy(val_agent.optimizer.state_dict())
save_copy = False
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
# validation rollouts
for val_iter in range(args.val_agent_steps):
for step in range(args.num_steps):
# Sample actions
actor_critic.eval()
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
val_rollouts.obs[step], val_rollouts.recurrent_hidden_states[step],
val_rollouts.masks[step])
actor_critic.train()
# Obser reward and next obs
obs, reward, done, infos = val_envs.step(action)
# If done then clean the history of observations.
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if 'bad_transition' in info.keys() else [1.0]
for info in infos])
val_rollouts.insert(obs, recurrent_hidden_states, action,
action_log_prob, value, reward, masks, bad_masks)
actor_critic.eval()
with torch.no_grad():
next_value = actor_critic.get_value(
val_rollouts.obs[-1], val_rollouts.recurrent_hidden_states[-1],
val_rollouts.masks[-1]).detach()
actor_critic.train()
val_rollouts.compute_returns(next_value, args.use_gae, args.gamma,
args.gae_lambda, args.use_proper_time_limits)
val_value_loss, val_action_loss, val_dist_entropy = val_agent.update(val_rollouts)
val_rollouts.after_update()
# save for every interval-th episode or for the last epoch
if (j % args.save_interval == 0
or j == num_updates - 1) and args.save_dir != "":
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
torch.save([
actor_critic,
getattr(utils.get_vec_normalize(envs), 'obs_rms', None)
], os.path.join(save_path, args.env_name + "-epoch-{}.pt".format(j)))
if j % args.log_interval == 0 and len(episode_rewards) > 1:
total_num_steps = (j + 1) * args.num_processes * args.num_steps
end = time.time()
print(
"Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n"
.format(j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards), np.mean(episode_rewards),
np.median(episode_rewards), np.min(episode_rewards),
|
np.max(episode_rewards)
|
numpy.max
|
import hashlib
import math
import numpy as np
import pprint
import pytest
import random
import re
import subprocess
import sys
import tempfile
import json
from catboost import (
CatBoost,
CatBoostClassifier,
CatBoostRegressor,
CatBoostError,
EFstrType,
FeaturesData,
Pool,
cv,
sum_models,
train,)
from catboost.eval.catboost_evaluation import CatboostEvaluation, EvalType
from catboost.utils import eval_metric, create_cd, get_roc_curve, select_threshold
from catboost.utils import DataMetaInfo, TargetStats, compute_training_options
import os.path
from pandas import read_table, DataFrame, Series, Categorical
from six import PY3
from six.moves import xrange
from catboost_pytest_lib import (
DelayedTee,
binary_path,
data_file,
local_canonical_file,
permute_dataset_columns,
remove_time_from_json,
test_output_path,
generate_random_labeled_set
)
if sys.version_info.major == 2:
import cPickle as pickle
else:
import _pickle as pickle
pytest_plugins = "list_plugin",
fails_on_gpu = pytest.mark.fails_on_gpu
EPS = 1e-5
BOOSTING_TYPE = ['Ordered', 'Plain']
OVERFITTING_DETECTOR_TYPE = ['IncToDec', 'Iter']
NONSYMMETRIC = ['Lossguide', 'Depthwise']
TRAIN_FILE = data_file('adult', 'train_small')
TEST_FILE = data_file('adult', 'test_small')
CD_FILE = data_file('adult', 'train.cd')
NAN_TRAIN_FILE = data_file('adult_nan', 'train_small')
NAN_TEST_FILE = data_file('adult_nan', 'test_small')
NAN_CD_FILE = data_file('adult_nan', 'train.cd')
CLOUDNESS_TRAIN_FILE = data_file('cloudness_small', 'train_small')
CLOUDNESS_TEST_FILE = data_file('cloudness_small', 'test_small')
CLOUDNESS_CD_FILE = data_file('cloudness_small', 'train.cd')
QUERYWISE_TRAIN_FILE = data_file('querywise', 'train')
QUERYWISE_TEST_FILE = data_file('querywise', 'test')
QUERYWISE_CD_FILE = data_file('querywise', 'train.cd')
QUERYWISE_CD_FILE_WITH_GROUP_WEIGHT = data_file('querywise', 'train.cd.group_weight')
QUERYWISE_CD_FILE_WITH_GROUP_ID = data_file('querywise', 'train.cd.query_id')
QUERYWISE_CD_FILE_WITH_SUBGROUP_ID = data_file('querywise', 'train.cd.subgroup_id')
QUERYWISE_TRAIN_PAIRS_FILE = data_file('querywise', 'train.pairs')
QUERYWISE_TRAIN_PAIRS_FILE_WITH_PAIR_WEIGHT = data_file('querywise', 'train.pairs.weighted')
QUERYWISE_TEST_PAIRS_FILE = data_file('querywise', 'test.pairs')
AIRLINES_5K_TRAIN_FILE = data_file('airlines_5K', 'train')
AIRLINES_5K_TEST_FILE = data_file('airlines_5K', 'test')
AIRLINES_5K_CD_FILE = data_file('airlines_5K', 'cd')
SMALL_CATEGORIAL_FILE = data_file('small_categorial', 'train')
SMALL_CATEGORIAL_CD_FILE = data_file('small_categorial', 'train.cd')
BLACK_FRIDAY_TRAIN_FILE = data_file('black_friday', 'train')
BLACK_FRIDAY_TEST_FILE = data_file('black_friday', 'test')
BLACK_FRIDAY_CD_FILE = data_file('black_friday', 'cd')
OUTPUT_MODEL_PATH = 'model.bin'
OUTPUT_COREML_MODEL_PATH = 'model.mlmodel'
OUTPUT_CPP_MODEL_PATH = 'model.cpp'
OUTPUT_PYTHON_MODEL_PATH = 'model.py'
OUTPUT_JSON_MODEL_PATH = 'model.json'
OUTPUT_ONNX_MODEL_PATH = 'model.onnx'
PREDS_PATH = 'predictions.npy'
PREDS_TXT_PATH = 'predictions.txt'
FIMP_NPY_PATH = 'feature_importance.npy'
FIMP_TXT_PATH = 'feature_importance.txt'
OIMP_PATH = 'object_importances.txt'
JSON_LOG_PATH = 'catboost_info/catboost_training.json'
TARGET_IDX = 1
CAT_FEATURES = [0, 1, 2, 4, 6, 8, 9, 10, 11, 12, 16]
model_diff_tool = binary_path("catboost/tools/model_comparator/model_comparator")
np.set_printoptions(legacy='1.13')
class LogStdout:
def __init__(self, file):
self.log_file = file
def __enter__(self):
self.saved_stdout = sys.stdout
sys.stdout = self.log_file
return self.saved_stdout
def __exit__(self, exc_type, exc_value, exc_traceback):
sys.stdout = self.saved_stdout
self.log_file.close()
def compare_canonical_models(model, diff_limit=0):
return local_canonical_file(model, diff_tool=[model_diff_tool, '--diff-limit', str(diff_limit)])
def map_cat_features(data, cat_features):
result = []
for i in range(data.shape[0]):
result.append([])
for j in range(data.shape[1]):
result[i].append(str(data[i, j]) if j in cat_features else data[i, j])
return result
def _check_shape(pool, object_count, features_count):
return np.shape(pool.get_features()) == (object_count, features_count)
def _check_data(data1, data2):
return np.all(np.isclose(data1, data2, rtol=0.001, equal_nan=True))
def _count_lines(afile):
with open(afile, 'r') as f:
num_lines = sum(1 for line in f)
return num_lines
def _generate_nontrivial_binary_target(num, seed=20181219, prng=None):
'''
Generate binary vector with non zero variance
:param num:
:return:
'''
if prng is None:
prng = np.random.RandomState(seed=seed)
def gen():
return prng.randint(0, 2, size=num)
if num <= 1:
return gen()
y = gen() # 0/1 labels
while y.min() == y.max():
y = gen()
return y
def _generate_random_target(num, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
return prng.random_sample((num,))
def set_random_weight(pool, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
pool.set_weight(prng.random_sample(pool.num_row()))
if pool.num_pairs() > 0:
pool.set_pairs_weight(prng.random_sample(pool.num_pairs()))
def verify_finite(result):
inf = float('inf')
for r in result:
assert(r == r)
assert(abs(r) < inf)
def append_param(metric_name, param):
return metric_name + (':' if ':' not in metric_name else ';') + param
# returns (features DataFrame, cat_feature_indices)
def load_pool_features_as_df(pool_file, cd_file, target_idx):
data = read_table(pool_file, header=None, dtype=str)
data.drop([target_idx], axis=1, inplace=True)
return (data, Pool(pool_file, column_description=cd_file).get_cat_feature_indices())
# Test cases begin here ########################################################
def test_load_file():
assert _check_shape(Pool(TRAIN_FILE, column_description=CD_FILE), 101, 17)
def test_load_list():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = map_cat_features(pool.get_features(), cat_features)
label = pool.get_label()
assert _check_shape(Pool(data, label, cat_features), 101, 17)
def test_load_ndarray():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = np.array(map_cat_features(pool.get_features(), cat_features))
label = np.array(pool.get_label())
assert _check_shape(Pool(data, label, cat_features), 101, 17)
@pytest.mark.parametrize('dataset', ['adult', 'adult_nan', 'querywise'])
def test_load_df_vs_load_from_file(dataset):
train_file, cd_file, target_idx, other_non_feature_columns = {
'adult': (TRAIN_FILE, CD_FILE, TARGET_IDX, []),
'adult_nan': (NAN_TRAIN_FILE, NAN_CD_FILE, TARGET_IDX, []),
'querywise': (QUERYWISE_TRAIN_FILE, QUERYWISE_CD_FILE, 2, [0, 1, 3, 4])
}[dataset]
pool1 = Pool(train_file, column_description=cd_file)
data = read_table(train_file, header=None)
labels = DataFrame(data.iloc[:, target_idx], dtype=np.float32)
data.drop([target_idx] + other_non_feature_columns, axis=1, inplace=True)
cat_features = pool1.get_cat_feature_indices()
pool2 = Pool(data, labels, cat_features)
assert _check_data(pool1.get_features(), pool2.get_features())
assert _check_data([float(label) for label in pool1.get_label()], pool2.get_label())
def test_load_series():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
data = read_table(TRAIN_FILE, header=None)
labels = Series(data.iloc[:, TARGET_IDX])
data.drop([TARGET_IDX], axis=1, inplace=True)
data = Series(list(data.values))
cat_features = pool.get_cat_feature_indices()
pool2 = Pool(data, labels, cat_features)
assert _check_data(pool.get_features(), pool2.get_features())
assert [int(label) for label in pool.get_label()] == pool2.get_label()
def test_pool_cat_features():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
assert np.all(pool.get_cat_feature_indices() == CAT_FEATURES)
def test_pool_cat_features_as_strings():
df = DataFrame(data=[[1, 2], [3, 4]], columns=['col1', 'col2'])
pool = Pool(df, cat_features=['col2'])
assert np.all(pool.get_cat_feature_indices() == [1])
data = [[1, 2, 3], [4, 5, 6]]
pool = Pool(data, feature_names=['col1', 'col2', 'col3'], cat_features=['col2', 'col3'])
assert np.all(pool.get_cat_feature_indices() == [1, 2])
data = [[1, 2, 3], [4, 5, 6]]
with pytest.raises(CatBoostError):
Pool(data, cat_features=['col2', 'col3'])
def test_load_generated():
pool_size = (100, 10)
prng = np.random.RandomState(seed=20181219)
data = np.round(prng.normal(size=pool_size), decimals=3)
label = _generate_nontrivial_binary_target(pool_size[0], prng=prng)
pool = Pool(data, label)
assert _check_data(pool.get_features(), data)
assert _check_data(pool.get_label(), label)
def test_load_dumps():
pool_size = (100, 10)
prng = np.random.RandomState(seed=20181219)
data = prng.randint(10, size=pool_size)
labels = _generate_nontrivial_binary_target(pool_size[0], prng=prng)
pool1 = Pool(data, labels)
lines = []
for i in range(len(data)):
line = [str(labels[i])] + [str(x) for x in data[i]]
lines.append('\t'.join(line))
text = '\n'.join(lines)
with open('test_data_dumps', 'w') as f:
f.write(text)
pool2 = Pool('test_data_dumps')
assert _check_data(pool1.get_features(), pool2.get_features())
assert pool1.get_label() == [int(label) for label in pool2.get_label()]
def test_dataframe_with_pandas_categorical_columns():
df = DataFrame()
df['num_feat_0'] = [0, 1, 0, 2, 3, 1, 2]
df['num_feat_1'] = [0.12, 0.8, 0.33, 0.11, 0.0, 1.0, 0.0]
df['cat_feat_2'] = Series(['A', 'B', 'A', 'C', 'A', 'A', 'A'], dtype='category')
df['cat_feat_3'] = Series(['x', 'x', 'y', 'y', 'y', 'x', 'x'])
df['cat_feat_4'] = Categorical(
['large', 'small', 'medium', 'large', 'small', 'small', 'medium'],
categories=['small', 'medium', 'large'],
ordered=True
)
df['cat_feat_5'] = [0, 1, 0, 2, 3, 1, 2]
labels = [0, 1, 1, 0, 1, 0, 1]
model = CatBoostClassifier(iterations=2)
model.fit(X=df, y=labels, cat_features=[2, 3, 4, 5])
pred = model.predict(df)
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
# feature_matrix is (doc_count x feature_count)
def get_features_data_from_matrix(feature_matrix, cat_feature_indices, order='C'):
object_count = len(feature_matrix)
feature_count = len(feature_matrix[0])
cat_feature_count = len(cat_feature_indices)
num_feature_count = feature_count - cat_feature_count
result_num = np.empty((object_count, num_feature_count), dtype=np.float32, order=order)
result_cat = np.empty((object_count, cat_feature_count), dtype=object, order=order)
for object_idx in xrange(object_count):
num_feature_idx = 0
cat_feature_idx = 0
for feature_idx in xrange(len(feature_matrix[object_idx])):
if (cat_feature_idx < cat_feature_count) and (cat_feature_indices[cat_feature_idx] == feature_idx):
# simplified handling of transformation to bytes for tests
result_cat[object_idx, cat_feature_idx] = (
feature_matrix[object_idx, feature_idx]
if isinstance(feature_matrix[object_idx, feature_idx], bytes)
else str(feature_matrix[object_idx, feature_idx]).encode('utf-8')
)
cat_feature_idx += 1
else:
result_num[object_idx, num_feature_idx] = float(feature_matrix[object_idx, feature_idx])
num_feature_idx += 1
return FeaturesData(num_feature_data=result_num, cat_feature_data=result_cat)
def get_features_data_from_file(data_file, drop_columns, cat_feature_indices, order='C'):
data_matrix_from_file = read_table(data_file, header=None, dtype=str)
data_matrix_from_file.drop(drop_columns, axis=1, inplace=True)
return get_features_data_from_matrix(np.array(data_matrix_from_file), cat_feature_indices, order)
def compare_flat_index_and_features_data_pools(flat_index_pool, features_data_pool):
assert flat_index_pool.shape == features_data_pool.shape
cat_feature_indices = flat_index_pool.get_cat_feature_indices()
num_feature_count = flat_index_pool.shape[1] - len(cat_feature_indices)
flat_index_pool_features = flat_index_pool.get_features()
features_data_pool_features = features_data_pool.get_features()
for object_idx in xrange(flat_index_pool.shape[0]):
num_feature_idx = 0
cat_feature_idx = 0
for flat_feature_idx in xrange(flat_index_pool.shape[1]):
if (
(cat_feature_idx < len(cat_feature_indices))
and (cat_feature_indices[cat_feature_idx] == flat_feature_idx)
):
# simplified handling of transformation to bytes for tests
assert (flat_index_pool_features[object_idx][flat_feature_idx] ==
features_data_pool_features[object_idx][num_feature_count + cat_feature_idx])
cat_feature_idx += 1
else:
assert np.isclose(
flat_index_pool_features[object_idx][flat_feature_idx],
features_data_pool_features[object_idx][num_feature_idx],
rtol=0.001,
equal_nan=True
)
num_feature_idx += 1
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_from_features_data_vs_load_from_files(order):
pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
features_data = get_features_data_from_file(
data_file=TRAIN_FILE,
drop_columns=[TARGET_IDX],
cat_feature_indices=pool_from_files.get_cat_feature_indices(),
order=order
)
pool_from_features_data = Pool(data=features_data)
compare_flat_index_and_features_data_pools(pool_from_files, pool_from_features_data)
def test_features_data_with_empty_objects():
fd = FeaturesData(
cat_feature_data=np.empty((0, 4), dtype=object)
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 4
assert fd.get_num_feature_count() == 0
assert fd.get_cat_feature_count() == 4
assert fd.get_feature_names() == [''] * 4
fd = FeaturesData(
num_feature_data=np.empty((0, 2), dtype=np.float32),
num_feature_names=['f0', 'f1']
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 2
assert fd.get_num_feature_count() == 2
assert fd.get_cat_feature_count() == 0
assert fd.get_feature_names() == ['f0', 'f1']
fd = FeaturesData(
cat_feature_data=np.empty((0, 2), dtype=object),
num_feature_data=np.empty((0, 3), dtype=np.float32)
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 5
assert fd.get_num_feature_count() == 3
assert fd.get_cat_feature_count() == 2
assert fd.get_feature_names() == [''] * 5
def test_features_data_names():
# empty specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
)
assert fd.get_feature_names() == [''] * 5
# full specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
num_feature_names=['weight', 'price', 'volume']
)
assert fd.get_feature_names() == ['weight', 'price', 'volume', 'shop', 'search']
# partial specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
num_feature_names=['weight', 'price', 'volume']
)
assert fd.get_feature_names() == ['weight', 'price', 'volume', '', '']
# partial specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
)
assert fd.get_feature_names() == ['', '', '', 'shop', 'search']
def compare_pools_from_features_data_and_generic_matrix(
features_data,
generic_matrix,
cat_features_indices,
feature_names=None
):
pool1 = Pool(data=features_data)
pool2 = Pool(data=generic_matrix, cat_features=cat_features_indices, feature_names=feature_names)
assert _check_data(pool1.get_features(), pool2.get_features())
assert pool1.get_cat_feature_indices() == pool2.get_cat_feature_indices()
assert pool1.get_feature_names() == pool2.get_feature_names()
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_features_data_good(order):
# 0 objects
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(cat_feature_data=np.empty((0, 4), dtype=object, order=order)),
np.empty((0, 4), dtype=object),
cat_features_indices=[0, 1, 2, 3]
)
# 0 objects
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.empty((0, 2), dtype=object, order=order),
cat_feature_names=['cat0', 'cat1'],
num_feature_data=np.empty((0, 3), dtype=np.float32, order=order),
),
np.empty((0, 5), dtype=object),
cat_features_indices=[3, 4],
feature_names=['', '', '', 'cat0', 'cat1']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order)
),
[[b'amazon', b'bing'], [b'ebay', b'google']],
cat_features_indices=[0, 1]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order)
),
[[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]],
cat_features_indices=[]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order)
),
[[1.0, 2.0, 3.0, b'amazon', b'bing'], [22.0, 7.1, 10.2, b'ebay', b'google']],
cat_features_indices=[3, 4]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
cat_feature_names=['shop', 'search']
),
[[b'amazon', b'bing'], [b'ebay', b'google']],
cat_features_indices=[0, 1],
feature_names=['shop', 'search']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order),
num_feature_names=['weight', 'price', 'volume']
),
[[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]],
cat_features_indices=[],
feature_names=['weight', 'price', 'volume']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order),
num_feature_names=['weight', 'price', 'volume']
),
[[1.0, 2.0, 3.0, b'amazon', b'bing'], [22.0, 7.1, 10.2, b'ebay', b'google']],
cat_features_indices=[3, 4],
feature_names=['weight', 'price', 'volume', 'shop', 'search']
)
def test_features_data_bad():
# empty
with pytest.raises(CatBoostError):
FeaturesData()
# names w/o data
with pytest.raises(CatBoostError):
FeaturesData(cat_feature_data=[[b'amazon', b'bing']], num_feature_names=['price'])
# bad matrix type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=[[b'amazon', b'bing']],
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float32)
)
# bad matrix shape
with pytest.raises(CatBoostError):
FeaturesData(num_feature_data=np.array([[[1.0], [2.0], [3.0]]], dtype=np.float32))
# bad element type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([b'amazon', b'bing'], dtype=object),
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float64)
)
# bad element type
with pytest.raises(CatBoostError):
FeaturesData(cat_feature_data=np.array(['amazon', 'bing']))
# bad names type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'reddit']], dtype=object),
cat_feature_names=[None, 'news_aggregator']
)
# bad names length
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'bing']], dtype=object),
cat_feature_names=['search_engine', 'news_aggregator']
)
# no features
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[], [], []], dtype=object),
num_feature_data=np.array([[], [], []], dtype=np.float32)
)
# number of objects is different
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'bing']], dtype=object),
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float32)
)
def test_predict_regress(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_sklearn_regress(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_sklearn_class(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, loss_function='Logloss:border=0.5', task_type=task_type, devices='0')
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_class_raw(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict(test_pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_raw_predict_equals_to_model_predict(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, task_type=task_type, devices='0')
model.fit(train_pool, eval_set=test_pool)
assert(model.is_fitted())
pred = model.predict(test_pool, prediction_type='RawFormulaVal')
assert np.all(np.isclose(model.get_test_eval(), pred, rtol=1.e-6))
@pytest.mark.parametrize('problem', ['Classifier', 'Regressor'])
def test_predict_and_predict_proba_on_single_object(problem):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
if problem == 'Classifier':
model = CatBoostClassifier(iterations=2)
else:
model = CatBoostRegressor(iterations=2)
model.fit(train_pool)
test_data = read_table(TEST_FILE, header=None)
test_data.drop([TARGET_IDX], axis=1, inplace=True)
pred = model.predict(test_data)
if problem == 'Classifier':
pred_probabilities = model.predict_proba(test_data)
random.seed(0)
for i in xrange(3): # just some indices
test_object_idx = random.randrange(test_data.shape[0])
assert pred[test_object_idx] == model.predict(test_data.values[test_object_idx])
if problem == 'Classifier':
assert np.array_equal(pred_probabilities[test_object_idx], model.predict_proba(test_data.values[test_object_idx]))
def test_model_pickling(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, task_type=task_type, devices='0')
model.fit(train_pool, eval_set=test_pool)
pred = model.predict(test_pool, prediction_type='RawFormulaVal')
model_unpickled = pickle.loads(pickle.dumps(model))
pred_new = model_unpickled.predict(test_pool, prediction_type='RawFormulaVal')
assert all(pred_new == pred)
def test_fit_from_file(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
predictions1 = model.predict(train_pool)
model.fit(TRAIN_FILE, column_description=CD_FILE)
predictions2 = model.predict(train_pool)
assert all(predictions1 == predictions2)
assert 'train_finish_time' in model.get_metadata()
@fails_on_gpu(how='assert 0.019921323750168085 < EPS, where 0.019921323750168085 = abs((0.03378972364589572 - 0.053711047396063805))')
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_fit_from_features_data(order, task_type):
pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(pool_from_files)
assert(model.is_fitted())
predictions_from_files = model.predict(pool_from_files)
features_data = get_features_data_from_file(
data_file=TRAIN_FILE,
drop_columns=[TARGET_IDX],
cat_feature_indices=pool_from_files.get_cat_feature_indices(),
order=order
)
model.fit(X=features_data, y=pool_from_files.get_label())
predictions_from_features_data = model.predict(Pool(features_data))
for prediction1, prediction2 in zip(predictions_from_files, predictions_from_features_data):
assert abs(prediction1 - prediction2) < EPS
def test_fit_from_empty_features_data(task_type):
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
with pytest.raises(CatBoostError):
model.fit(
X=FeaturesData(num_feature_data=np.empty((0, 2), dtype=np.float32)),
y=np.empty((0), dtype=np.int32)
)
def test_coreml_import_export(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 20, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml")
canon_pred = model.predict(test_pool)
coreml_loaded_model = CatBoostRegressor()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
assert all(canon_pred == coreml_loaded_model.predict(test_pool))
return compare_canonical_models(output_coreml_model_path)
def test_coreml_import_export_one_hot_features(task_type):
train_pool = Pool(SMALL_CATEGORIAL_FILE, column_description=SMALL_CATEGORIAL_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 2, 'task_type': task_type, 'devices': '0', 'one_hot_max_size': 4})
model.fit(train_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml", pool=train_pool)
pred = model.predict(train_pool)
coreml_loaded_model = CatBoostRegressor()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
assert all(pred == coreml_loaded_model.predict(train_pool))
return compare_canonical_models(output_coreml_model_path)
@pytest.mark.parametrize('pool', ['adult', 'higgs'])
def test_convert_model_to_json(task_type, pool):
train_pool = Pool(data_file(pool, 'train_small'), column_description=data_file(pool, 'train.cd'))
test_pool = Pool(data_file(pool, 'test_small'), column_description=data_file(pool, 'train.cd'))
converted_model_path = test_output_path("converted_model.bin")
model = CatBoost({'iterations': 20, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
output_json_model_path = test_output_path(OUTPUT_JSON_MODEL_PATH)
model.save_model(output_model_path)
model.save_model(output_json_model_path, format="json")
model2 = CatBoost()
model2.load_model(output_json_model_path, format="json")
model2.save_model(converted_model_path)
pred1 = model.predict(test_pool)
pred2 = model2.predict(test_pool)
assert _check_data(pred1, pred2)
subprocess.check_call((model_diff_tool, output_model_path, converted_model_path, '--diff-limit', '0.000001'))
return compare_canonical_models(converted_model_path)
def test_coreml_cbm_import_export(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 20, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
canon_pred = model.predict(test_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml")
coreml_loaded_model = CatBoost()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
coreml_loaded_model.save_model(output_model_path)
cbm_loaded_model = CatBoost()
cbm_loaded_model.load_model(output_model_path)
assert all(canon_pred == cbm_loaded_model.predict(test_pool))
return compare_canonical_models(output_coreml_model_path)
def test_cpp_export_no_cat_features(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_cpp_model_path = test_output_path(OUTPUT_CPP_MODEL_PATH)
model.save_model(output_cpp_model_path, format="cpp")
return local_canonical_file(output_cpp_model_path)
def test_cpp_export_with_cat_features(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 20, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_cpp_model_path = test_output_path(OUTPUT_CPP_MODEL_PATH)
model.save_model(output_cpp_model_path, format="cpp")
return local_canonical_file(output_cpp_model_path)
@pytest.mark.parametrize('iterations', [2, 40])
def test_export_to_python_no_cat_features(task_type, iterations):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost({'iterations': iterations, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python")
return local_canonical_file(output_python_model_path)
@pytest.mark.parametrize('iterations', [2, 40])
def test_export_to_python_with_cat_features(task_type, iterations):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': iterations, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python", pool=train_pool)
return local_canonical_file(output_python_model_path)
def test_export_to_python_with_cat_features_from_pandas(task_type):
model = CatBoost({'iterations': 5, 'task_type': task_type, 'devices': '0'})
X = DataFrame([[1, 2], [3, 4]], columns=['Num', 'Categ'])
y = [1, 0]
cat_features = [1]
model.fit(X, y, cat_features)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python", pool=X)
return local_canonical_file(output_python_model_path)
@pytest.mark.parametrize('problem_type', ['binclass', 'multiclass', 'regression'])
def test_onnx_export(problem_type):
if problem_type == 'binclass':
loss_function = 'Logloss'
train_path = TRAIN_FILE
cd_path = CD_FILE
elif problem_type == 'multiclass':
loss_function = 'MultiClass'
train_path = CLOUDNESS_TRAIN_FILE
cd_path = CLOUDNESS_CD_FILE
elif problem_type == 'regression':
loss_function = 'RMSE'
train_path = TRAIN_FILE
cd_path = CD_FILE
else:
raise Exception('Unsupported problem_type: %s' % problem_type)
train_pool = Pool(train_path, column_description=cd_path)
model = CatBoost(
{
'task_type': 'CPU', # TODO(akhropov): GPU results are unstable, difficult to compare models
'loss_function': loss_function,
'iterations': 5,
'depth': 4,
# onnx format export does not yet support categorical features so ignore them
'ignored_features': train_pool.get_cat_feature_indices()
}
)
model.fit(train_pool)
output_onnx_model_path = test_output_path(OUTPUT_ONNX_MODEL_PATH)
model.save_model(
output_onnx_model_path,
format="onnx",
export_parameters={
'onnx_domain': 'ai.catboost',
'onnx_model_version': 1,
'onnx_doc_string': 'test model for problem_type %s' % problem_type,
'onnx_graph_name': 'CatBoostModel_for_%s' % problem_type
}
)
return compare_canonical_models(output_onnx_model_path)
def test_predict_class(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict(test_pool, prediction_type="Class")
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_zero_learning_rate(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0, task_type=task_type, devices='0')
with pytest.raises(CatBoostError):
model.fit(train_pool)
def test_predict_class_proba(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict_proba(test_pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
@fails_on_gpu(how='assert 0.031045619651137835 < EPS, where 0.031045619651137835 = <function amax at ...')
@pytest.mark.parametrize('function_name', ['predict', 'predict_proba'])
def test_predict_funcs_from_features_data(function_name, task_type):
function = getattr(CatBoostClassifier, function_name)
train_pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool_from_files)
test_pool_from_files = Pool(TEST_FILE, column_description=CD_FILE)
predictions_from_files = function(model, test_pool_from_files)
train_features_data, test_features_data = [
get_features_data_from_file(
data_file=data_file,
drop_columns=[TARGET_IDX],
cat_feature_indices=train_pool_from_files.get_cat_feature_indices()
)
for data_file in [TRAIN_FILE, TEST_FILE]
]
model.fit(X=train_features_data, y=train_pool_from_files.get_label())
predictions_from_features_data = function(model, test_features_data)
for prediction1, prediction2 in zip(predictions_from_files, predictions_from_features_data):
assert np.max(np.abs(prediction1 - prediction2)) < EPS
# empty
empty_test_features_data = FeaturesData(
num_feature_data=np.empty((0, test_features_data.get_num_feature_count()), dtype=np.float32),
cat_feature_data=np.empty((0, test_features_data.get_cat_feature_count()), dtype=object)
)
empty_predictions = function(model, empty_test_features_data)
assert len(empty_predictions) == 0
def test_no_cat_in_predict(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred1 = model.predict(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()))
pred2 = model.predict(Pool(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()), cat_features=train_pool.get_cat_feature_indices()))
assert _check_data(pred1, pred2)
def test_save_model(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoost({'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
model2 = CatBoost()
model2.load_model(output_model_path)
pred1 = model.predict(test_pool)
pred2 = model2.predict(test_pool)
assert _check_data(pred1, pred2)
def test_multiclass(task_type):
pool = Pool(CLOUDNESS_TRAIN_FILE, column_description=CLOUDNESS_CD_FILE)
classifier = CatBoostClassifier(iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_multiclass_classes_count_missed_classes(task_type):
prng = np.random.RandomState(seed=0)
pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice([1, 3], size=100))
classifier = CatBoostClassifier(classes_count=4, iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(pool)
classes = new_classifier.predict(pool)
assert pred.shape == (100, 4)
assert
|
np.array(classes)
|
numpy.array
|
# $Author: carthik $
# $Revision: 267 $
# $Date: 2010-06-08 22:33:22 -0400 (Tue, 08 Jun 2010) $
# $HeadURL: file:///home/esp01/svn/code/python/branches/patricio/photpipe/lib/suntimecorr.py $
# $Id: suntimecorr.py 267 2010-06-09 02:33:22Z carthik $
import numpy as np
import re
from scipy.constants import c
from .splinterp import splinterp
def getcoords(file):
"""
Use regular expressions to extract X,Y,Z, and time values from the
horizons file.
Parameters:
-----------
file : Strings list
A list containing the lines of a horizons file.
Returns:
--------
A four elements list containing the X, Y, Z, and time arrays of
values from file.
Example:
--------
start_data = '$$SOE'
end_data = '$$EOE'
# Read in whole table as an list of strings, one string per line
ctable = open('/home/esp01/ancil/horizons/all_spitzer.vec', 'r')
wholetable = ctable.readlines()
ctable.close()
# Find start and end line
i = 0
while wholetable[i].find(end_data) == -1:
if wholetable[i].find(start_data) != -1:
start = i + 1
i += 1
# Chop table
data = wholetable[start:i-2]
# Find values:
x, y, z, t = getcoords(data)
# print(x, y, z, t)
Modification History:
---------------------
2010-11-01 patricio Written by <NAME>.
<EMAIL>
"""
x, y, z, time = [], [], [], []
for i in np.arange(len(file)):
# Use regular expressions to match strings enclosed between X,
# Y, Z and end of line
m = re.search(' X =(.*)Y =(.*) Z =(.*)\n', file[i])
if m != None:
x.append(np.double(m.group(1)))
y.append(np.double(m.group(2)))
z.append(np.double(m.group(3)))
# Match first word which is followed by ' = A'
t = re.search('(.+) = A', file[i])
if t != None:
time.append(np.double(t.group(1)))
# return numpy arrays
return np.array(x), np.array(y), np.array(z), np.array(time)
def suntimecorr(ra, dec, obst, coordtable, verbose=False):
"""
This function calculates the light-travel time correction from
observer to a standard location. It uses the 2D coordinates (RA
and DEC) of the object being observed and the 3D position of the
observer relative to the standard location. The latter (and the
former, for solar-system objects) may be gotten from JPL's
Horizons system.
Parameters:
-----------
ra : Float
Right ascension of target object in radians.
dec : Float
Declination of target object in radians.
obst : Float or Numpy Float array
Time of observation in Julian Date (may be a vector)
coordtable : String
Filename of output table from JPL HORIZONS specifying
the position of the observatory relative to the
standard position.
verbose : Boolean
If True, print X,Y,Z coordinates.
Returns:
--------
This function returns the time correction in seconds to be ADDED
to the observation time to get the time when the observed photons
would have reached the plane perpendicular to their travel and
containing the reference position.
Notes:
------
The position vectors from coordtable are given in the following
coordinate system:
Reference epoch: J2000.0
xy-plane: plane of the Earth's mean equator at the reference epoch
x-axis : out along ascending node of instantaneous plane of the Earth's
orbit and the Earth's mean equator at the reference epoch
z-axis : along the Earth mean north pole at the reference epoch
Ephemerides are often calculated for BJD, barycentric Julian date.
That is, they are correct for observations taken at the solar
system barycenter's distance from the target. The BJD of our
observation is the time the photons we observe would have crossed
the sphere centered on the object and containing the barycenter.
We must thus add the light-travel time from our observatory to
this sphere. For non-solar-system observations, we approximate
the sphere as a plane, and calculate the dot product of the vector
from the barycenter to the telescope and a unit vector to from the
barycenter to the target, and divide by the speed of light.
Properly, the coordinates should point from the standard location
to the object. Practically, for objects outside the solar system,
the adjustment from, e.g., geocentric (RA-DEC) coordinates to
barycentric coordinates has a negligible effect on the trig
functions used in the routine.
The horizons file in coordtable should be in the form of the
following example, with a subject line of JOB:
!$$SOF
!
! Example e-mail command file. If mailed to "<EMAIL>"
! with subject "JOB", results will be mailed back.
!
! This example demonstrates a subset of functions. See main doc for
! full explanation. Send blank e-mail with subject "BATCH-LONG" to
! <EMAIL> for complete example.
!
EMAIL_ADDR = '<EMAIL>' ! Send output to this address
! (can be blank for auto-reply)
COMMAND = '-79' ! Target body, closest apparition
OBJ_DATA = 'YES' ! No summary of target body data
MAKE_EPHEM = 'YES' ! Make an ephemeris
START_TIME = '2005-Aug-24 06:00' ! Start of table (UTC default)
STOP_TIME = '2005-Aug-25 02:00' ! End of table
STEP_SIZE = '1 hour' ! Table step-size
TABLE_TYPE = 'VECTOR' ! Specify VECTOR ephemeris table type
CENTER = '@10' ! Set observer (coordinate center)
REF_PLANE = 'FRAME' ! J2000 equatorial plane
VECT_TABLE = '3' ! Selects output type (3=all).
OUT_UNITS = 'KM-S' ! Vector units# KM-S, AU-D, KM-D
CSV_FORMAT = 'NO' ! Comma-separated output (YES/NO)
VEC_LABELS = 'YES' ! Label vectors in output (YES/NO)
VECT_CORR = 'NONE' ! Correct for light-time (LT),
! or lt + stellar aberration (LT+S),
! or (NONE) return geometric
! vectors only.
!$$EOF
Example:
---------
>>> # Spitzer is in nearly the Earth's orbital plane. Light coming from
>>> # the north ecliptic pole should hit the observatory and the sun at
>>> # about the same time.
>>> import suntimecorr as sc
>>> ra = 18.0 * np.pi / 12 # ecliptic north pole coordinates in radians
>>> dec = 66.5 * np.pi / 180 # "
>>> obst = np.array([2453607.078]) # Julian date of 2005-08-24 14:00
>>> print( sc.suntimecorr(ra, dec, obst,
'/home/esp01/ancil/horizons/cs41_spitzer.vec') )
1.00810877 # about 1 sec, close to zero
>>> # If the object has the RA and DEC of Spitzer, light time should be
>>> # about 8 minutes to the sun.
>>> obs = np.array([111093592.8346969, -97287023.315796047,
-42212080.826677799])
>>> # vector to the object
>>> obst = np.array([2453602.5])
>>> print( np.sqrt(np.sum(obs**2.0)) )
153585191.481 # about 1 AU, good
>>> raobs = np.arctan(obs[1]/ obs[0])
>>> decobs = np.arctan(obs[2]/ np.sqrt(obs[0]**2 + obs[1]**2))
>>> print(raobs, decobs)
-0.7192383661, -0.2784282118
>>> print( sc.suntimecorr(raobs, decobs, obst,
'/home/esp01/ancil/horizons/cs41_spitzer.vec') / 60.0)
8.5228630 # good, about 8 minutes light time to travel 1 AU
Modification History:
---------------------
2005-12-01 statia Written by <NAME>.
2006-03-09 jh Corrected 90deg error in algorithm, renamed,
updated header, made Coordtable a positional
arg since it's required, switched to radians.
2007-06-28 jh Renamed to suntimecorr since we now use
barycentric Julian date.
2009-01-28 jh Change variables to long, use spline instead
of linfit so we can use one HORIZONS file for
the whole mission.
2009-02-22 jh Reshape spline results to shape of obst. Make
it handle unsorted unput data properly.
Header update.
2010-07-10 patricio Converted to python. (<EMAIL>)
2010-11-01 patricio Docstring updated.
"""
start_data = '$$SOE'
end_data = '$$EOE'
# Read in whole table as an list of strings, one string per line
ctable = open(coordtable, 'r')
wholetable = ctable.readlines()
ctable.close()
# Find start and end line
i = 0
# while end has not been found:
while wholetable[i].find(end_data) == -1:
# if start is found get the index of next line:
if wholetable[i].find(start_data) != -1:
start = i + 1
i += 1
# Chop table
data = wholetable[start:i-2]
# Extract values:
x, y, z, time = getcoords(data)
# Interpolate to observing times:
# We must preserve the shape and order of obst. Spline takes
# monotonic input and produces linear output. x, y, z, time are
# sorted as HORIZONS produces them.
# Save shape of obst
tshape = np.shape(obst)
# Reshape to 1D and sort
obstime = obst.flatten()
ti = np.argsort(obstime) # indexes of sorted array by time
tsize = np.size(obstime)
# Allocate output arrays
obsx = np.zeros(tsize)
obsy = np.zeros(tsize)
obsz =
|
np.zeros(tsize)
|
numpy.zeros
|
import tools as tls
import camera
import numpy as np
import cv2
import matplotlib.pyplot as plt
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def sobel_absolute_scaled(gray):
# allow images with color depth = one
if len(gray.shape) != 2:
raise TypeError
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
return scaled_sobel
def sobel_magnitude(gray, sobel_kernel=3):
# allow images with color depth = one
if len(gray.shape) != 2:
raise TypeError
# Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
return gradmag
def sobel_direction(gray, sobel_kernel=3): #, thresh=(0, np.pi/2)):
# allow images with color depth = one
if len(gray.shape) != 2:
raise TypeError
# Take the gradient in x and y separately
sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, sobel_kernel)
sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, sobel_kernel)
# Take the absolute value of the x and y gradients
abs_sobel_x = np.absolute(sobel_x)
abs_sobel_y = np.absolute(sobel_y)
# calculate the direction of the gradient
absgraddir = np.arctan2(abs_sobel_y, abs_sobel_x)
return absgraddir
def channel_threshold(channel, thresh=(170,255)):
# Threshold color channel
binary =
|
np.zeros_like(channel)
|
numpy.zeros_like
|
import math
import matplotlib.pyplot as plt
import numpy as np
from typing import Dict, Tuple
from CSIKit.visualization.metric import Metric
from matplotlib.colors import BoundaryNorm, LinearSegmentedColormap
from matplotlib.ticker import MaxNLocator
class Graph:
def __init__(self, metric: Metric):
self.metric = metric
self._axes = []
super().__init__()
def plot(self, values_per_measurement):
"""
function to plot the visualization into the axes.
return axes[]
"""
self._axes = []
self._plot_axes(values_per_measurement)
if not isinstance(self._axes,list):
raise Exception("return value is not list")
return self._axes
def _plot_axes(self, values_per_measurement):
"""
Abstract function.
This function has to fill self.axes
It should call self._create_new_ax
"""
raise Exception("Not implemented function plot")
def _create_new_ax(self):
"""
return new axes and appends it to self.axes
"""
ax = plt.subplot()
self._axes.append(ax)
return ax
class TupleGraph:
pass
class PlotBox(Graph):
def _plot_axes(self, values_per_measurement):
axes = self._create_new_ax()
data = list(values_per_measurement.values())
labels = list(values_per_measurement.keys())
if all(isinstance(k, int) for k in labels): # if name is metric
width = max(list(values_per_measurement.keys())) / \
(2*len(list(values_per_measurement.keys())))
axes.boxplot(data, positions=labels, labels=labels, widths=width)
else:
width = 0.5
axes.boxplot(data, labels=labels, widths=width)
ind = np.arange(1, len(values_per_measurement)+1)
axes.set_xticks(ind)
axes.set_xticklabels(
tuple(values_per_measurement.keys()), rotation=45, ha="right")
mini = min({min(ar) for ar in data})
maxi = max({max(ar) for ar in data})
if maxi > 0 and mini > 0:
axes.set_ylim(bottom=0)
elif maxi < 0 and mini < 0:
axes.set_ylim(top=0)
axes.set_ylabel(f"{self.metric.get_name()}[{self.metric.get_unit()}]")
axes.set_xlabel('Measurement')
class PlotCandle(Graph):
def __init__(self, metric):
super().__init__(metric)
self.plot_wick = True
@classmethod
def _calc_average(_cls, values_per_measurement):
averages = {}
for mes_name in values_per_measurement.copy():
values = values_per_measurement[mes_name]
averages[mes_name] = (sum(values)/len(values))
return averages
@classmethod
def _calc_std_errs(cls, values_per_measurement):
"""returns standard diviation of each measurement """
val_per_meas = values_per_measurement.copy()
std_errs = {}
for name in val_per_meas:
std_errs[name] = np.std(val_per_meas[name])
return std_errs
@classmethod
def _calc_confidence_diff(cls, variance, interval=0.95):
""" Retruns the +- confidenz delta of the meassrements"""
INTERVALS = {
0.90: 1.645,
0.95: 1.96,
0.98: 2.326,
0.99: 2.576
}
if not interval in INTERVALS:
raise Exception(f"Invalid interval {interval}")
confidenz_errors = {}
for name in variance:
std_err = math.sqrt(variance[name])
confidenz = INTERVALS[interval] * \
(std_err / math.sqrt(len(variance[name])))
confidenz_errors[name] = confidenz
return confidenz_errors
def _plot_axes(self, values_per_measurement, plot_wick=True):
axes = self._create_new_ax()
if all(isinstance(k, int) for k in values_per_measurement.keys()): # if name is metric
width = max(list(values_per_measurement.keys())) / \
(2*len(list(values_per_measurement.keys())))
#width = 4
# for name in self._values_per_measurement: # for each measurement
self._plot_candle(axes, values_per_measurement,
width=width, plot_wick=plot_wick)
else: # else plot by name
width = 0.4
self._plot_candle(axes, values_per_measurement,
width=width, plot_wick=plot_wick)
ind = np.arange(len(values_per_measurement))
# plot unique text at the center of each candle
axes.set_xticks(ind)
axes.set_xticklabels(
tuple(values_per_measurement.keys()), rotation=45, ha="right")
axes.set_ylabel(f"{self.metric.get_name()}[{self.metric.get_unit()}]")
axes.set_xlabel('Measurement')
@classmethod
def _plot_candle(cls, axes, values_per_measurement, width=4, color="#008000", x_offset=0, plot_wick=True):
averages = cls._calc_average(values_per_measurement)
if len(values_per_measurement) == 1:
plot_wick = False
if plot_wick:
std_errors = cls._calc_std_errs(values_per_measurement)
# confidences = cls._calc_confidence_diff(self._values_per_measurement)
wick_width = 4
if all(isinstance(k, int) for k in values_per_measurement.keys()): # if name is metric
# for name in self._values_per_measurement: # for each measurement
x_arr = values_per_measurement.keys()
if x_offset != 0:
x_arr = [i+x_offset for i in x_arr] # add offset to x plot
if plot_wick:
axes.bar(x_arr, averages.values(), width=width, yerr=std_errors.values(
), error_kw=dict(linewidth=wick_width), color=color)
else:
axes.bar(x_arr, averages.values(), width=width,
error_kw=dict(linewidth=wick_width), color=color)
else: # else plot by name
ind = np.arange(len(values_per_measurement))
ind = [i+x_offset for i in ind] # add offset to x plot
if plot_wick:
axes.bar(ind, averages.values(), width=width, yerr=std_errors.values(
), error_kw=dict(linewidth=wick_width), color=color)
else:
axes.bar(ind, averages.values(), width=width,
error_kw=dict(linewidth=wick_width), color=color)
class PlotCandleTuple(TupleGraph, PlotCandle):
"""
Abstract class to plot group of bars by datatype tuple
"""
def __init__(self, metric):
super().__init__(metric)
self._values_per_measurement: Dict[str, Tuple] = {}
COLORS = ['#008000', 'red', 'blue']
def _plot_axes(self, values_per_measurement, plot_wick=True):
axes = self._create_new_ax()
axes.set_autoscalex_on(True)
# gets the size of the contained tuple
tuple_size = len(list(values_per_measurement.values())[0][0])
if all(isinstance(k, int) for k in values_per_measurement.keys()): # if name is metric
width = (max(list(values_per_measurement.keys())) /
(2*len(list(values_per_measurement.keys()))))/tuple_size
# for name in self._values_per_measurement: # for each measurement
for tuple_i in range(tuple_size):
self._plot_candle(axes, self._get_measurement_by_tuple_index(values_per_measurement, tuple_i), width, x_offset=(
tuple_i-1)*width, color=self.COLORS[tuple_i], plot_wick=plot_wick)
else: # else plot by name
width = 0.8 / tuple_size
# plot each bar of group per tuple
for tuple_i in range(tuple_size):
self._plot_candle(axes, self._get_measurement_by_tuple_index(values_per_measurement, tuple_i), width, x_offset=(
tuple_i-1)*width, color=self.COLORS[tuple_i], plot_wick=plot_wick)
# unique label per candle group
ind = np.arange(len(values_per_measurement))
axes.set_xticks(ind)
axes.set_xticklabels(
tuple(values_per_measurement.keys()), rotation=45, ha="right")
axes.set_ylabel(f"{self.metric.get_name()}[{self.metric.get_unit()}]")
axes.set_xlabel('Measurement')
@classmethod
def _get_measurement_by_tuple_index(cls, values_per_measurement, tuple_index):
"""
this function returns a "normal" measurement dict like used at CandlePlot to reuse the CandlePlot._plot_candle()
returns Dict[str, list[int]]
"""
measurements = values_per_measurement.copy()
result = {}
for name in measurements:
measurement = measurements[name]
result[name] = [i[tuple_index] for i in measurement]
return result
class PlotCandleTuple_Phase(PlotCandleTuple):
def _plot_axes(self, values_per_measurement, plot_wick=False): # set wick false
super()._plot_axes( values_per_measurement, plot_wick=plot_wick)
{ax._axes.set_ylim((0, np.pi)) for ax in self._axes}
class PlotColorMap(Graph):
def __init__(self, metric):
super().__init__(metric)
self.vmin=None
self.vmax=None
self.cmap = plt.cm.plasma
self.color_legend = True
def _plot_axes(self, values_per_measurement):
for measur_name in values_per_measurement:
axes = self._create_new_ax()
amplitude_per_sub = values_per_measurement[measur_name]
amplitude_per_sub = np.matrix(
|
np.array(amplitude_per_sub)
|
numpy.array
|
# -*- coding: utf-8 -*-
import unittest
from snewpy.flavor_transformation import MassHierarchy, MixingParameters
from snewpy.flavor_transformation import \
NoTransformation, CompleteExchange, \
AdiabaticMSW, NonAdiabaticMSWH, \
AdiabaticMSWes, NonAdiabaticMSWes, \
TwoFlavorDecoherence, ThreeFlavorDecoherence, \
NeutrinoDecay
from astropy import units as u
from astropy import constants as c
import numpy as np
from numpy import sin, cos, exp, abs
class TestFlavorTransformations(unittest.TestCase):
def setUp(self):
# Dummy time and energy arrays, with proper dimensions.
self.t = np.arange(10) * u.s
self.E = np.linspace(1,100,21) * u.MeV
# Dummy mixing angles.
self.theta12 = 33 * u.deg
self.theta13 = 9 * u.deg
self.theta23 = 49 * u.deg
self.theta14 = 1 * u.deg
# Dummy neutrino decay parameters; see arXiv:1910.01127.
self.mass3 = 0.5 * u.eV/c.c**2
self.lifetime = 1 * u.day
self.distance = 10 * u.kpc
def test_noxform(self):
"""
Survival probabilities for no oscillations
"""
xform = NoTransformation()
self.assertEqual(xform.prob_ee(self.t, self.E), 1)
self.assertEqual(xform.prob_ex(self.t, self.E), 0)
self.assertEqual(xform.prob_xx(self.t, self.E), 1)
self.assertEqual(xform.prob_xe(self.t, self.E), 0)
self.assertEqual(xform.prob_eebar(self.t, self.E), 1)
self.assertEqual(xform.prob_exbar(self.t, self.E), 0)
self.assertEqual(xform.prob_xxbar(self.t, self.E), 1)
self.assertEqual(xform.prob_xebar(self.t, self.E), 0)
def test_fullex(self):
"""
Survival probabilities for complete electron->X transformation
"""
xform = CompleteExchange()
self.assertEqual(xform.prob_ee(self.t, self.E), 0)
self.assertEqual(xform.prob_ex(self.t, self.E), 1)
self.assertEqual(xform.prob_xx(self.t, self.E), 0.5)
self.assertEqual(xform.prob_xe(self.t, self.E), 0.5)
self.assertEqual(xform.prob_eebar(self.t, self.E), 0)
self.assertEqual(xform.prob_exbar(self.t, self.E), 1)
self.assertEqual(xform.prob_xxbar(self.t, self.E), 0.5)
self.assertEqual(xform.prob_xebar(self.t, self.E), 0.5)
def test_adiabaticmsw_nmo(self):
"""
Adiabatic MSW with normal ordering
"""
xform = AdiabaticMSW(mix_angles=(self.theta12, self.theta13, self.theta23), mh=MassHierarchy.NORMAL)
self.assertEqual(xform.prob_ee(self.t, self.E), sin(self.theta13)**2)
self.assertEqual(xform.prob_ex(self.t, self.E), 1. - sin(self.theta13)**2)
self.assertEqual(xform.prob_xx(self.t, self.E), 0.5*(1. + sin(self.theta13)**2))
self.assertEqual(xform.prob_xe(self.t, self.E), 0.5*(1. - sin(self.theta13)**2))
self.assertEqual(xform.prob_eebar(self.t, self.E), (cos(self.theta12)*cos(self.theta13))**2)
self.assertEqual(xform.prob_exbar(self.t, self.E), 1. - (cos(self.theta12)*cos(self.theta13))**2)
self.assertEqual(xform.prob_xxbar(self.t, self.E), 0.5*(1. + (cos(self.theta12)*cos(self.theta13))**2))
self.assertEqual(xform.prob_xebar(self.t, self.E), 0.5*(1. - (cos(self.theta12)*cos(self.theta13))**2))
# Test interface using default mixing angles defined in the submodule.
mixpars = MixingParameters(MassHierarchy.NORMAL)
th12, th13, th23 = mixpars.get_mixing_angles()
xform = AdiabaticMSW()
self.assertEqual(xform.prob_ee(self.t, self.E), sin(th13)**2)
self.assertEqual(xform.prob_ex(self.t, self.E), 1. - sin(th13)**2)
self.assertEqual(xform.prob_xx(self.t, self.E), 0.5*(1. + sin(th13)**2))
self.assertEqual(xform.prob_xe(self.t, self.E), 0.5*(1. - sin(th13)**2))
self.assertEqual(xform.prob_eebar(self.t, self.E), (cos(th12)*cos(th13))**2)
self.assertEqual(xform.prob_exbar(self.t, self.E), 1. - (cos(th12)*cos(th13))**2)
self.assertEqual(xform.prob_xxbar(self.t, self.E), 0.5*(1. + (
|
cos(th12)
|
numpy.cos
|
import six
import pytest
import numpy as np
from mock import patch, MagicMock
from eight_mile.optz import (
create_lr_scheduler,
CosineDecayScheduler,
CyclicLRScheduler,
ExponentialDecayScheduler,
WarmupLinearScheduler,
ConstantScheduler,
PiecewiseDecayScheduler,
ZarembaDecayScheduler,
InverseTimeDecayScheduler,
CompositeLRScheduler,
)
@pytest.fixture
def piecewise():
min_ = np.random.randint(1, 5)
max_ = np.random.randint(min_ + 2, min_ + 7)
bounds = [min_, max_]
vals = np.random.uniform(size=len(bounds) + 1)
return bounds, vals
def test_zaremba_with_nones():
eta = np.random.rand()
zd = ZarembaDecayScheduler(lr=eta)
for step in np.random.randint(0, 1000000, size=100):
assert zd(step) == eta
def test_piecewise_start(piecewise):
b, v = piecewise
p = PiecewiseDecayScheduler(b, v)
lr = p(0)
assert lr == v[0]
def test_piecewise_mid(piecewise):
b, v = piecewise
p = PiecewiseDecayScheduler(b, v)
step = np.random.randint(np.min(b) + 1, np.max(b))
lr = p(step)
assert lr == v[1]
def test_piecewise_lsat(piecewise):
b, v = piecewise
p = PiecewiseDecayScheduler(b, v)
step = np.random.randint(np.max(b) + 3, np.max(b) + 100)
lr = p(step)
assert lr == v[-1]
def test_staircase_decay_flat():
steps = np.random.randint(900, 1001)
sd = ExponentialDecayScheduler(steps, np.random.rand(), lr=np.random.rand(), staircase=True)
stair_one_one = sd(np.random.randint(steps - 100, steps))
stair_one_two = sd(np.random.randint(steps - 100, steps))
stair_two = sd(np.random.randint(steps + 1, steps + 10))
assert stair_one_one == stair_one_two
assert stair_one_two != stair_two
def test_staircase_value():
sd = ExponentialDecayScheduler(1000, 0.9, lr=1.0, staircase=True)
gold = 1.0
test = sd(100)
np.testing.assert_allclose(test, gold)
gold = 0.9
test = sd(1001)
np.testing.assert_allclose(test, gold)
def test_exp_values():
sd = ExponentialDecayScheduler(1000, 0.9, lr=1.0)
gold = 0.9895192582062144
test = sd(100)
np.testing.assert_allclose(test, gold)
gold = 0.8999051805311098
test = sd(1001)
np.testing.assert_allclose(test, gold)
def test_warmup_peaks():
steps = np.random.randint(100, 1000)
lr = np.random.rand()
wls = WarmupLinearScheduler(steps, lr=lr)
peak = wls(steps)
assert peak == lr
past = wls(steps + np.random.randint(100, 10000))
assert past == lr
def test_warmup_increases():
steps = np.random.randint(100, 1000)
lr = np.random.rand()
wls = WarmupLinearScheduler(steps, lr=lr)
lrs = [wls(s) for s in range(steps)]
last = -1
for lr in lrs:
assert lr > last
last = lr
def test_cyclic_lr():
bounds = 1000
min_eta = 1e-5
max_eta = 1e-2
clr = CyclicLRScheduler(max_eta, bounds, lr=min_eta)
start = clr(0)
up = clr(bounds / 2.0)
mid = clr(bounds)
down = clr(bounds + (bounds / 2.0))
end = clr(2 * bounds)
late = clr(3 * bounds)
assert start == min_eta
assert up > start
assert up < mid
assert mid == max_eta
assert down < mid
assert down > end
assert end == min_eta
assert late == max_eta
def test_cosine_lr():
cd = CosineDecayScheduler(1000, lr=0.1)
iters = [0, 100, 900, 1000, 1001]
golds = [0.1, 0.09755283, 0.002447176, 0.0, 0.0]
for i, gold in zip(iters, golds):
np.testing.assert_allclose(cd(i), gold, rtol=1e-6)
def test_constant_lr():
lr =
|
np.random.rand()
|
numpy.random.rand
|
import numpy as np
from scipy import sparse
class Solver:
"""A class representing a linear FEM solver for:
Laplace Eigenvalue problems and Poisson Equation
Inputs can be geometry classes which have vertices and elements.
Currently TriaMesh and TetMesh are implemented.
FEM matrices (stiffness (or A) and mass matrix (or B)) are computed
during the construction. After that the Eigenvalue solver (eigs) or
Poisson Solver (poisson) can be called.
The class has a static member to create the mass matrix of TriaMesh
for external function that do not need stiffness.
"""
def __init__(self, geometry, lump=False, aniso=None, aniso_smooth=10):
"""
Construct the Solver class. Computes linear Laplace FEM stiffness and
mass matrix for TriaMesh or TetMesh input geometries. For TriaMesh it can also
construct the anisotropic Laplace.
Inputs:
geometry : is a geometry class, currently either TriaMesh or TetMesh
aniso : float, anisotropy for curvature based anisotopic Laplace
can also be tuple (a_min, a_max) to differentially affect
the min and max curvature directions. E.g. (0,50) will set
scaling to 1 into min curv direction even if the max curvature
is large in those regions (= isotropic in regions with
large max curv and min curv close to zero= concave cylinder)
lump : whether to lump the mass matrix (diagonal), default False
"""
if type(geometry).__name__ == "TriaMesh":
if aniso is not None:
# anisotropic Laplace
print("TriaMesh with anisotropic Laplace-Beltrami")
u1, u2, c1, c2 = geometry.curvature_tria(smoothit=aniso_smooth)
# Diag mat to specify anisotropy strength
if isinstance(aniso, (list, tuple, set, np.ndarray)):
if len(aniso) != 2:
raise ValueError('aniso should be scalar or tuple/array of length 2!')
aniso0 = aniso[0]
aniso1 = aniso[1]
else:
aniso0 = aniso
aniso1 = aniso
aniso_mat = np.empty((geometry.t.shape[0], 2))
aniso_mat[:, 1] = np.exp(-aniso1 * np.abs(c1))
aniso_mat[:, 0] = np.exp(-aniso0 * np.abs(c2))
a, b = self._fem_tria_aniso(geometry, u1, u2, aniso_mat, lump)
else:
print("TriaMesh with regular Laplace-Beltrami")
a, b = self._fem_tria(geometry, lump)
elif type(geometry).__name__ == "TetMesh":
print("TetMesh with regular Laplace")
a, b = self._fem_tetra(geometry, lump)
else:
raise ValueError('Geometry type "' + type(geometry).__name__ + '" unknown')
self.stiffness = a
self.mass = b
self.geotype = type(geometry)
@staticmethod
def _fem_tria(tria, lump=False):
"""
computeABtria(v,t) computes the two sparse symmetric matrices representing
the Laplace Beltrami Operator for a given triangle mesh using
the linear finite element method (assuming a closed mesh or
the Neumann boundary condition).
Inputs: v - vertices : list of lists of 3 floats
t - triangles: list of lists of 3 int of indices (>=0) into v array
Outputs: A - sparse sym. (n x n) positive semi definite numpy matrix
B - sparse sym. (n x n) positive definite numpy matrix (inner product)
Can be used to solve sparse generalized Eigenvalue problem: A x = lambda B x
or to solve Poisson equation: A x = B f (where f is function on mesh vertices)
or to solve Laplace equation: A x = 0
or to model the operator's action on a vector x: y = B\(Ax)
"""
import sys
# Compute vertex coordinates and a difference vector for each triangle:
t1 = tria.t[:, 0]
t2 = tria.t[:, 1]
t3 = tria.t[:, 2]
v1 = tria.v[t1, :]
v2 = tria.v[t2, :]
v3 = tria.v[t3, :]
v2mv1 = v2 - v1
v3mv2 = v3 - v2
v1mv3 = v1 - v3
# Compute cross product and 4*vol for each triangle:
cr = np.cross(v3mv2, v1mv3)
vol = 2 * np.sqrt(np.sum(cr * cr, axis=1))
# zero vol will cause division by zero below, so set to small value:
vol_mean = 0.0001 * np.mean(vol)
vol[vol < sys.float_info.epsilon] = vol_mean
# compute cotangents for A
# using that v2mv1 = - (v3mv2 + v1mv3) this can also be seen by
# summing the local matrix entries in the old algorithm
a12 = np.sum(v3mv2 * v1mv3, axis=1) / vol
a23 = np.sum(v1mv3 * v2mv1, axis=1) / vol
a31 = np.sum(v2mv1 * v3mv2, axis=1) / vol
# compute diagonals (from row sum = 0)
a11 = -a12 - a31
a22 = -a12 - a23
a33 = -a31 - a23
# stack columns to assemble data
local_a = np.column_stack((a12, a12, a23, a23, a31, a31, a11, a22, a33)).reshape(-1)
i = np.column_stack((t1, t2, t2, t3, t3, t1, t1, t2, t3)).reshape(-1)
j = np.column_stack((t2, t1, t3, t2, t1, t3, t1, t2, t3)).reshape(-1)
# Construct sparse matrix:
# a = sparse.csr_matrix((local_a, (i, j)))
a = sparse.csc_matrix((local_a, (i, j)))
# construct mass matrix (sparse or diagonal if lumped)
if not lump:
# create b matrix data (account for that vol is 4 times area)
b_ii = vol / 24
b_ij = vol / 48
local_b = np.column_stack((b_ij, b_ij, b_ij, b_ij, b_ij, b_ij,
b_ii, b_ii, b_ii)).reshape(-1)
b = sparse.csc_matrix((local_b, (i, j)))
else:
# when lumping put all onto diagonal (area/3 for each vertex)
b_ii = vol / 12
local_b = np.column_stack((b_ii, b_ii, b_ii)).reshape(-1)
i = np.column_stack((t1, t2, t3)).reshape(-1)
b = sparse.csc_matrix((local_b, (i, i)))
return a, b
@staticmethod
def _fem_tria_aniso(tria, u1, u2, aniso_mat, lump=False):
"""
computeABtria(v,t) computes the two sparse symmetric matrices representing
the Laplace Beltrami Operator for a given triangle mesh using
the linear finite element method (assuming a closed mesh or
the Neumann boundary condition).
Inputs: v - vertices : list of lists of 3 floats
t - triangles: N list of lists of 3 int of indices (>=0) into v array
u1 - min curv: min curvature direction per triangle (Nx3 floats)
u2 - max curv: max curvature direction per triangle (Nx3 floats)
aniso_mat - anisotropy matrix: diagonal elements in u1,u2 basis per
triangle (Nx2 floats)
Outputs: A - sparse sym. (n x n) positive semi definite numpy matrix
B - sparse sym. (n x n) positive definite numpy matrix (inner product)
Can be used to solve sparse generalized Eigenvalue problem: A x = lambda B x
or to solve Poisson equation: A x = B f (where f is function on mesh vertices)
or to solve Laplace equation: A x = 0
or to model the operator's action on a vector x: y = B\(Ax)
"""
import sys
# Compute vertex coordinates and a difference vector for each triangle:
t1 = tria.t[:, 0]
t2 = tria.t[:, 1]
t3 = tria.t[:, 2]
v1 = tria.v[t1, :]
v2 = tria.v[t2, :]
v3 = tria.v[t3, :]
v2mv1 = v2 - v1
v3mv2 = v3 - v2
v1mv3 = v1 - v3
# transform edge e to basis U = (U1,U2) via U^T * e
# Ui is n x 3, e is n x 1, result is n x 2
uv2mv1 = np.column_stack((np.sum(u1 * v2mv1, axis=1), np.sum(u2 * v2mv1, axis=1)))
uv3mv2 = np.column_stack((np.sum(u1 * v3mv2, axis=1),
|
np.sum(u2 * v3mv2, axis=1)
|
numpy.sum
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
# Written by <NAME>
"""This file contain the utils class that can
improve programming efficiency on IPU by popart.
"""
import builtins
import string
import numpy as np
import popart
from _globals import GLOBAL_V, set_batch, get_batch_size, get_anchor_return_type, train_mode_on, train_mode, safe_mode, safe_mode_on, safe_mode_off, get_builder, set_builder, set_seed, get_seed, set_options, get_options, set_device, get_device_type, get_ai_onnx_version, set_memory_proportion, get_memory_proportion, enable_global_initializer, get_global_initializer, get_exclude_weights, set_exclude_weights, get_all_trainable_weights, load_model, set_load_strict, load_strict, set_weight_fp16, get_weight_fp16, get_all_tensors_info
CONSTANT_COUNTER = [0]
TENSOR_NAMES = []
def unified_op_warpper(func):
def warpper_func(*args, **kwargs):
results = func(*args, **kwargs)
return results
return warpper_func
def name_scope(scope_name):
return get_builder().nameScope(scope_name)
def add_input_tensor(dtype, shape, debugContext=''):
input_t = get_builder().addInputTensor(popart.TensorInfo(dtype, shape),
debugContext=debugContext)
return TTensor(input_t)
def identity(input, debugContext=''):
return TTensor(get_builder().aiOnnx.identity([input.getIpuIndex()],
debugContext=debugContext))
def pad(data, pads, mode='constant', constant_value=0, debugContext=''):
constant_value = constant(constant_value).cast(data.dtype.upper())
pads = to_tensor(pads).cast('INT64').flatten()
result = get_builder().aiOnnx.pad(
[data.getIpuIndex(),
pads.getIpuIndex(),
constant_value.getIpuIndex()],
mode=mode,
debugContext=debugContext)
return TTensor(result)
def _conv2d(input,
filter,
bias=False,
strides=[1, 1],
pads=[1, 1, 1, 1],
dilations=[1, 1],
group=1,
debugContext=''):
"""Encapsulation of function get_builder().aiOnnx.conv!
args:
x: input tensor
ksize: int,kernel size
stride: int,stride of conv
pads: int, conv padding
c_out: int, output channel
group: int, conv group nums,default:1
"""
args = [input.getIpuIndex(), filter.getIpuIndex()]
if bias:
args.append(bias.getIpuIndex())
output = get_builder().aiOnnx.conv(args,
strides=strides,
pads=pads,
dilations=dilations,
group=group,
debugContext=debugContext)
if get_memory_proportion() is not None:
get_builder().setAvailableMemoryProportion(output,
get_memory_proportion())
return TTensor(output)
def relu(x, debugContext=""):
"""
args:
x: input tensor
"""
if isinstance(x, list):
x = [ele.getIpuIndex() for ele in x]
else:
x = [x.getIpuIndex()]
x = get_builder().aiOnnx.relu(x, debugContext=debugContext)
return TTensor(x)
def maxPooling(x,
strides=[2, 2],
kernel_size=[2, 2],
padding=[0, 0, 0, 0],
dilations=[1, 1],
ceil_mode=0,
debugContext=""):
"""
args:
x:
strides: maxpool stride(output_size=input_size/strides)
kernel_size: window's size that used to find max value
"""
x = get_builder().aiOnnx.maxpool(args=[x.getIpuIndex()],
num_outputs=1,
kernel_shape=kernel_size,
pads=padding,
strides=strides,
dilations=dilations,
ceil_mode=ceil_mode,
debugContext=debugContext)
return TTensor(x[0])
def avgPooling(x,
strides=2,
kernel_size=2,
padding=0,
count_include_pad=0,
debugContext=""):
x = get_builder().aiOnnx.averagepool(
[x.getIpuIndex()],
kernel_shape=[kernel_size, kernel_size],
count_include_pad=count_include_pad,
pads=[padding] * 4,
strides=[strides, strides],
debugContext=debugContext)
return TTensor(x)
def check_all_constant(tensors):
for t in tensors:
if isinstance(t, ConstantTensor) is False:
return False
return True
def resize(x,
roi=None,
scales=None,
sizes=None,
coordinate_transformation_mode='half_pixel',
cubic_coeff_a=-0.75,
exclude_outside=0,
extrapolation_value=0.0,
mode='nearest',
nearest_mode='round_prefer_floor',
debugContext=''):
# TODO Check whether each parameter is correct
# x:N-D tensor
# roi: 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X
# scales: tensor(float),The scale array along each dimension.
# sizes: tensor(int64),The size of the output tensor.
# Only one of 'scales' and 'sizes' can be specified.
assert None in [scales, sizes] and set([scales, sizes]) == 2
if roi is None:
assert coordinate_transformation_mode == 'tf_crop_and_resize'
else:
raise not NotImplementedError
roi = constant(
np.array([0, -1] * x.shape.ndims).astype(
mappin_gc2npy[x.dtype])) if roi is None else roi
scales = constant(np.array(
[1.0] * x.shape.ndims).astype('FLOAT32')) if scales is None else scales
sizes = constant(np.array(
[1] * x.shape.ndims).astype('INT64')) if sizes is None else sizes
inputs_list = [
x.getIpuIndex(),
roi.getIpuIndex(),
scales.getIpuIndex(),
sizes.getIpuIndex()
]
inputs_dic = {
'coordinate_transformation_mode': coordinate_transformation_mode,
'cubic_coeff_a': cubic_coeff_a,
'exclude_outside': exclude_outside,
'extrapolation_value': extrapolation_value,
'mode': mode,
'nearest_mode': nearest_mode,
'debugContext': debugContext
}
result = TTensor(get_builder().aiOnnx.resize(inputs_list, **inputs_dic))
return result
def matmul(x, y, debugContext=""):
if check_all_constant([x, y]):
# degrade to np op
result = np.matmul(x.data, y.data)
return constant(result)
else:
assert x.dtype in ['FLOAT', "FLOAT16"]
assert y.dtype in ['FLOAT', "FLOAT16"]
return TTensor(get_builder().aiOnnx.matmul(
[x.getIpuIndex(), y.getIpuIndex()], debugContext=debugContext))
def scalarTensor2int(t):
if isinstance(t, ConstantTensor):
return int(t.data)
assert isinstance(t, int)
return t
def reshape(source, target_shape, debugContext=""):
"""
args:
source : tensor name
target_shape: list of int e.g.: [3,4,5,6]
"""
if isinstance(target_shape, TTensor):
target_shape = target_shape.data
if isinstance(target_shape, np.ndarray):
target_shape = target_shape.tolist()
if isinstance(target_shape, list):
target_shape = [scalarTensor2int(ele) for ele in target_shape]
target_shape = constant(np.array(target_shape).astype(np.int64),
debugContext=debugContext)
if check_all_constant([source, target_shape]):
# degrade to np op
result = source.data.reshape(target_shape.data)
result = constant(result)
return result
else:
return TTensor(get_builder().aiOnnx.reshape(
[source.getIpuIndex(),
target_shape.getIpuIndex()],
debugContext=debugContext))
def softmax_2d(x, axis=1, debugContext=""):
assert axis in [-1, 1]
assert x.shape.ndims == 2
x = get_builder().aiOnnx.softmax(
[x.getIpuIndex()], axis=axis,
debugContext=debugContext)
return TTensor(x)
def _batchNorm(
x,
scale,
biases,
mean,
var,
num_outputs=1,
momentum=0.9,
epsilon=1e-5,
debugContext="",
):
results = get_builder().aiOnnx.batchnormalization(
[
x.getIpuIndex(),
scale.getIpuIndex(),
biases.getIpuIndex(),
mean.getIpuIndex(),
var.getIpuIndex()
],
num_outputs=num_outputs,
epsilon=epsilon,
momentum=momentum,
debugContext=debugContext)
results = results[0] if num_outputs == 1 else results
if isinstance(results, list):
results = [TTensor(r) for r in results]
else:
results = [TTensor(results)]
return results
def _concat(tensor_list, dim, debugContext=""):
if check_all_constant(tensor_list):
# degrade to np op
np_arr_list = [t.data for t in tensor_list]
result = np.concatenate(np_arr_list, axis=dim)
return constant(result)
return TTensor(get_builder().aiOnnx.concat(
[tensor.getIpuIndex() for tensor in tensor_list],
dim,
debugContext=debugContext))
def sqrt(x, debugContext=""):
result = get_builder().aiOnnx.sqrt([x.getIpuIndex()],
debugContext=debugContext)
return TTensor(result)
def sigmoid(tensor, debugContext=""):
return TTensor(get_builder().aiOnnx.sigmoid([tensor.getIpuIndex()],
debugContext=debugContext))
def transpose(x, dim_order, debugContext=""):
"""dim_order: list of int. eg:[0,2,3,1]"""
if check_all_constant([x]):
# degrade to np op
result = np.transpose(x.data, dim_order)
return constant(result)
return TTensor(get_builder().aiOnnx.transpose([x.getIpuIndex()],
dim_order,
debugContext=debugContext))
def mean(x, debugContext=""):
return TTensor(get_builder().aiOnnx.mean(x.getIpuIndex(),
debugContext=debugContext))
def gc_slice(x, axes, starts, ends, debugContext=""):
if check_all_constant([x, axes, starts, ends]):
# degrade to np op
x = x.data
x_slices = []
for start, end in zip(starts.data.tolist(), ends.data.tolist()):
x_slices.append(slice(start, end))
return constant(x[x_slices])
else:
x = get_builder().aiOnnx.slice([
x.getIpuIndex(),
starts.getIpuIndex(),
ends.getIpuIndex(),
axes.getIpuIndex()
],
debugContext=debugContext)
return TTensor(x)
def topk(x, k, sorted=True, dim=-1, debugContext=""):
"""
args:
k: the count of return
dim: in which dim to sort and clip
"""
if k.shape.ndims == 0:
k = k.unsqueeze(0)
else:
assert k.shape.ndims == 1
values, order = get_builder().aiOnnx.topk(
[x.getIpuIndex(), k.getIpuIndex()],
axis=dim,
sorted=sorted,
debugContext=debugContext)
return TTensor(values), TTensor(order)
def constant(x, debugContext=''):
if np.isscalar(x):
return ConstantScalar(None, x)
assert isinstance(x, np.ndarray)
return ConstantTensor(None, x)
def shapeConstant(x, debugContext=''):
if np.isscalar(x):
x = np.array(x)
assert isinstance(x, np.ndarray)
return ShapeTensor(None, x)
def gather(x, indices, dim=0, debugContext=""):
x = get_builder().aiOnnx.gather(
[x.getIpuIndex(), indices.getIpuIndex()],
axis=dim,
debugContext=debugContext)
return TTensor(x)
def addInitializedInputTensor(array, debugContext=""):
"""
args:
array: an numpy array that will be copy to IPU
return:
str: tensor name
"""
name = get_builder().addInitializedInputTensor(array,
debugContext=debugContext)
return TTensor(name)
def unsqueeze(x, dims, debugContext=""):
"""
args:
dim: list of int of which dim will delete
eg:[3] or [1,3]
"""
if check_all_constant([x]):
# degrade to np op
result = np.expand_dims(x.data, axis=dims)
return constant(result)
x = get_builder().aiOnnx.unsqueeze([x.getIpuIndex()],
axes=dims,
debugContext=debugContext)
return TTensor(x)
def ceil(x, debugContext=""):
result = get_builder().aiOnnx.ceil([x.getIpuIndex()],
debugContext=debugContext)
return TTensor(result)
def squeeze(x, dims, debugContext=""):
if check_all_constant([x]):
# degrade to np op
x = x.data
current_dim = float('inf')
for dim in reversed(dims):
assert current_dim > dim
current_dim = dim
x = x.squeeze(dim)
return constant(x)
if isinstance(dims, int):
dims = [dims]
for dim in dims:
assert x.pureShape[dim] == 1
x = get_builder().aiOnnx.squeeze([x.getIpuIndex()],
axes=dims,
debugContext=debugContext)
return TTensor(x)
def exp(x, debugContext=""):
return TTensor(get_builder().aiOnnx.exp([x.getIpuIndex()],
debugContext=debugContext))
def printTensor(t):
get_builder().aiGraphcore.printtensor([t.getIpuIndex()], print_gradient=0)
def mul(tensors, debugContext=""):
if check_all_constant(tensors):
# degrade to np op
result = 1
for t in tensors:
result = t.data * result
return constant(result, debugContext=debugContext)
return TTensor(get_builder().aiOnnx.mul([t.getIpuIndex() for t in tensors],
debugContext=debugContext))
def add(tensors, debugContext=""):
if check_all_constant(tensors):
# degrade to np op
result = 0
for t in tensors:
result = result + t.data
return constant(result, debugContext=debugContext)
return TTensor(get_builder().aiOnnx.add([t.getIpuIndex() for t in tensors],
debugContext=debugContext))
def div(tensors, debugContext=""):
assert len(tensors) == 2
if check_all_constant(tensors):
# degrade to np op
result = tensors[0].data / tensors[1].data
return constant(result, debugContext=debugContext)
return TTensor(get_builder().aiOnnx.div([t.getIpuIndex() for t in tensors],
debugContext=debugContext))
def sub(tensors, debugContext=""):
assert len(tensors) == 2
if check_all_constant(tensors):
# degrade to np op
result = tensors[0].data - tensors[1].data
return constant(result, debugContext=debugContext)
return TTensor(get_builder().aiOnnx.sub([t.getIpuIndex() for t in tensors],
debugContext=debugContext))
def max(tensor_list, debugContext=""):
if check_all_constant(tensor_list):
# degrade to np op
arr_list = [t.data for t in tensor_list]
result = np.max(arr_list)
return constant(result)
return TTensor(get_builder().aiOnnx.max(
[t.getIpuIndex() for t in tensor_list], debugContext=debugContext))
def min(tensor_list, debugContext=""):
if check_all_constant(tensor_list):
# degrade to np op
arr_list = [t.data for t in tensor_list]
result = np.min(arr_list)
return constant(result)
return TTensor(get_builder().aiOnnx.min(
[t.getIpuIndex() for t in tensor_list], debugContext=debugContext))
def split(x, lenOfSplit, dim, debugContext=""):
"""
args:
lenOfSplit: (4,1) split into two piece,one's length is 4 ,
the other is 1
"""
return TTensor(get_builder().aiOnnx.split([x.getIpuIndex()],
len(lenOfSplit),
dim,
lenOfSplit,
debugContext=debugContext))
def clip(x, minmun=-np.inf, maxmun=np.inf, debugContext=""):
if get_ai_onnx_version() >= 11:
minmun = constant(np.asarray(minmun).astype(np.float32))
maxmun = constant(np.asarray(maxmun).astype(np.float32))
return TTensor(get_builder().aiOnnx.clip(
[x.getIpuIndex(),
minmun.getIpuIndex(),
maxmun.getIpuIndex()],
debugContext=debugContext))
else:
return TTensor(get_builder().aiOnnx.clip([x.getIpuIndex()],
maxmun.getIpuIndex(),
minmun.getIpuIndex(),
debugContext=debugContext))
def reduceprod(x, dim, keepdims=False, debugContext=""):
"""
args:
dim: int .which dim to do prod
"""
x = get_builder().aiOnnx.reduceprod([x.getIpuIndex()],
axes=[dim],
keepdims=keepdims,
debugContext=debugContext)
return TTensor(x)
def cast(x, target_type='FLOAT', debugContext=''):
"""
target_type:
FLOAT|FLOAT16|INT8|INT16|INT32|UINT8|UINT16|UINT32|BOOL
"""
target_type = 'FLOAT' if target_type == 'FLOAT32' else target_type
if check_all_constant([x]):
# degrade to np op
data = x.data.astype(mappin_gc2npy[target_type])
return constant(data)
else:
return TTensor(get_builder().aiOnnx.cast([x.getIpuIndex()],
target_type.upper(),
debugContext))
def log(x, debugContext=''):
return TTensor(get_builder().aiOnnx.log([x.getIpuIndex()], debugContext))
def less(x, y, debugContext=''):
x, y = align_tensor([x, y])
return TTensor(get_builder().aiOnnx.less(
[x.getIpuIndex(), y.getIpuIndex()], debugContext))
def abs(x, debugContext=''):
return TTensor(get_builder().aiOnnx.abs([x.getIpuIndex()], debugContext))
def argmax(x, axis=0, keepdims=0, debugContext=''):
return TTensor(get_builder().aiOnnx.argmax([x.getIpuIndex()], axis,
keepdims, debugContext))
def reducemax(x, axes=0, keepdims=0, debugContext=''):
if isinstance(axes, int):
axes = [axes]
assert isinstance(axes, list) or axes is None
return TTensor(get_builder().aiOnnx.reducemax([x.getIpuIndex()], axes,
keepdims, debugContext))
def reducemin(x, axes=0, keepdims=0, debugContext=''):
if isinstance(axes, int):
axes = [axes]
assert isinstance(axes, list) or axes is None
return TTensor(get_builder().aiOnnx.reducemin([x.getIpuIndex()], axes,
keepdims, debugContext))
def reduceMean(x, axes, keepdims=False, debugContext=''):
if isinstance(axes, int):
axes = [axes]
assert isinstance(axes, list) or axes is None
return TTensor(get_builder().aiOnnx.reducemean([x.getIpuIndex()],
axes=axes,
keepdims=keepdims,
debugContext=debugContext))
def greater(x, y, debugContext=''):
x, y = align_tensor([x, y])
return TTensor(get_builder().aiOnnx.greater(
[x.getIpuIndex(), y.getIpuIndex()], debugContext))
def equal(x, y, debugContext=''):
x, y = align_tensor([x, y])
return TTensor(get_builder().aiOnnx.equal(
[x.getIpuIndex(), y.getIpuIndex()], debugContext))
def logical_and(x, y, debugContext=''):
return TTensor(get_builder().aiOnnx.logical_and(
[x.getIpuIndex(), y.getIpuIndex()], debugContext))
def logical_not(x, debugContext=''):
return TTensor(get_builder().aiOnnx.logical_not([x.getIpuIndex()],
debugContext))
def logical_or(x, y, debugContext=''):
return TTensor(get_builder().aiOnnx.logical_or(
[x.getIpuIndex(), y.getIpuIndex()], debugContext))
def reduceSum(x, axes=None, keepdims=0, debugContext=''):
if isinstance(axes, int):
axes = [axes]
assert isinstance(axes, list) or axes is None
return TTensor(get_builder().aiOnnx.reducesum([x.getIpuIndex()], axes,
keepdims, debugContext))
def cumsum(x, axes, exclusive=0, reverse=0, debugContext=''):
if x.dtype == 'FLOAT16':
raise NotImplementedError('not support fp16')
return TTensor(get_builder().aiOnnx.cumsum(
[x.getIpuIndex(), axes.getIpuIndex()], exclusive, reverse,
debugContext))
def expand(x, shape, debugContext=''):
return TTensor(get_builder().aiOnnx.expand(
[x.getIpuIndex(), shape.getIpuIndex()], debugContext=debugContext))
def randomuniformlike(x, high=6.0, low=-6.0):
result = get_builder().aiOnnx.randomuniformlike([x.getIpuIndex()],
high=high,
low=low)
return TTensor(result)
def flatten(x):
'''implements the np.flatten function
'''
if check_all_constant([x]):
x = x.data.flatten()
return constant(x)
x = get_builder().aiOnnx.flatten([x.getIpuIndex()], 0)
return TTensor(x).squeeze(0)
def oneslike(t, dtype=None, debugContext=''):
dtype = t.dtype if dtype is None else dtype
dshape = t.pureShape
assert 0 not in dshape
return ones(dshape, dtype, debugContext=debugContext)
def ones(shape, dtype='FLOAT', debugContext=''):
return constant(np.ones(shape, dtype=mappin_gc2npy[dtype]),
debugContext=debugContext)
def zeroslike(t, dtype=None, debugContext=''):
dtype = t.dtype if dtype is None else dtype
dshape = t.pureShape
assert 0 not in dshape
return zeros(dshape, dtype, debugContext=debugContext)
def zeros(shape, dtype='FLOAT', debugContext=''):
return constant(np.zeros(shape, dtype=mappin_gc2npy[dtype]),
debugContext=debugContext)
def where(condition, x, y, debugContext=''):
return TTensor(get_builder().aiOnnx.where(
[condition.getIpuIndex(),
x.getIpuIndex(),
y.getIpuIndex()]))
def detach(x):
return TTensor(get_builder().aiGraphcore.detach([x.getIpuIndex()]))
def one_hot(indices, depth, values=None, debugContext=''):
'''
values: [off_value, on_value]
if indice is -1, the corrosponding arr is [0]*depth
'''
if isinstance(depth, int):
depth = to_tensor(depth, dtype='INT64')
if values is None:
values = constant(np.asarray([0, 1]).astype(np.int32),
debugContext=debugContext)
assert indices.dtype in [
'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16'
]
assert depth.dtype in [
'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16'
]
assert values.dtype in [
'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16'
]
result = get_builder().aiOnnx.onehot(
[indices.getIpuIndex(),
depth.getIpuIndex(),
values.getIpuIndex()],
debugContext=debugContext)
result = TTensor(result)
result_shape = list(result.pureShape)
if result_shape[1] == 0:
result_shape[1] = depth
result = result.reshape(result_shape)
return result
def tile(input, repeats, debugContext=""):
if check_all_constant([input, repeats]):
result = np.tile(input.data, repeats.data)
return constant(result)
result = get_builder().aiOnnx.tile(
[input.getIpuIndex(), repeats.getIpuIndex()], debugContext)
return TTensor(result)
def checkTensorsTypeSame(tensors_list):
types = [t.dtype for t in tensors_list]
if len(set(types)) == 2:
assert 'int32' in types and 'int64' in types, 'only int32 and int64 seem as same type'
else:
assert len(set(types)) == 1, 'types should be same'
def np64to32_or_16(np_arr):
# temp ussage while IPU not support with 64bit data
local_mappin = {
'int64': np.int32,
'uint64': np.uint32,
'float64': np.float16
}
if np_arr.dtype.name in list(local_mappin.keys()):
np_arr = np_arr.astype(local_mappin[np_arr.dtype.name])
return np_arr
def to_tensor(x, dtype=None):
# return ConstantTensor if x type is int,float,ndarray,list, or ConstantTensor
# return TTensor if x type is TTensor
if
|
np.isscalar(x)
|
numpy.isscalar
|
from numbers import Number
from typing import Tuple, Optional, List, Union, Dict
import torch
import numpy as np
from ....data.subject import Subject
from ....constants import LABEL, DATA, AFFINE, TYPE, STEM
from ....typing import TypeRangeFloat
from .. import RandomTransform
from ... import SpatialTransform
class RandomAffineFFT(RandomTransform, SpatialTransform):
r"""Random affine transformation.
Args:
scales: Tuple :math:`(a, b)` defining the scaling
magnitude. The scaling values along each dimension are
:math:`(s_1, s_2, s_3)`, where :math:`s_i \sim \mathcal{U}(a, b)`.
For example, using ``scales=(0.5, 0.5)`` will zoom out the image,
making the objects inside look twice as small while preserving
the physical size and position of the image.
degrees: Tuple :math:`(a, b)` defining the rotation range in degrees.
The rotation angles around each axis are
:math:`(\theta_1, \theta_2, \theta_3)`,
where :math:`\theta_i \sim \mathcal{U}(a, b)`.
If only one value :math:`d` is provided,
:math:`\theta_i \sim \mathcal{U}(-d, d)`.
isotropic: If ``True``, the scaling factor along all dimensions is the
same, i.e. :math:`s_1 = s_2 = s_3`.
default_pad_value: As the image is rotated, some values near the
borders will be undefined.
If ``'minimum'``, the fill value will be the image minimum.
If ``'mean'``, the fill value is the mean of the border values.
If ``'otsu'``, the fill value is the mean of the values at the
border that lie under an
`Otsu threshold <https://ieeexplore.ieee.org/document/4310076>`_.
p: Probability that this transform will be applied.
seed: See :py:class:`~torchio.transforms.augmentation.RandomTransform`.
.. note:: Rotations are performed around the center of the image.
Example:
>>> from torchio.transforms import RandomAffine, Interpolation
>>> sample = images_dataset[0] # instance of torchio.ImagesDataset
>>> transform = RandomAffine(
... scales=(0.9, 1.2),
... degrees=(10),
... isotropic=False,
... default_pad_value='otsu',
... )
>>> transformed = transform(sample)
From the command line::
$ torchio-transform t1.nii.gz RandomAffine --kwargs "degrees=30 default_pad_value=minimum" --seed 42 affine_min.nii.gz
"""
def __init__(
self,
scales: Tuple[float, float] = (0.9, 1.1),
degrees: TypeRangeFloat = 10,
isotropic: bool = False,
default_pad_value: Union[str, float] = 'otsu',
p: float = 1,
oversampling_pct=0.2,
metrics: Dict = None
):
super().__init__(p=p, metrics=metrics)
self.scales = scales
self.degrees = self.parse_degrees(degrees)
self.isotropic = isotropic
self.default_pad_value = self.parse_default_value(default_pad_value)
self.oversampling_pct = oversampling_pct
@staticmethod
def parse_default_value(value: Union[str, float]) -> Union[str, float]:
if isinstance(value, Number) or value in ('minimum', 'otsu', 'mean'):
return value
message = (
'Value for default_pad_value must be "minimum", "otsu", "mean"'
' or a number'
)
raise ValueError(message)
def apply_transform(self, sample: Subject) -> dict:
sample.check_consistent_spatial_shape()
scaling_params, rotation_params = self.get_params(
self.scales, self.degrees, self.isotropic)
random_parameters_dict = {
'scaling': scaling_params,
'rotation': rotation_params,
'oversampling' : self.oversampling_pct
}
for image_dict in sample.get_images(intensity_only=False):
if image_dict[TYPE] == LABEL:
padding_values = [0]
else:
padding_values = estimate_borders_mean_std(image_dict[DATA].numpy())
add_name = image_dict[STEM] if image_dict[STEM] is not None else '' #because when called directly with tensor, it is not define hmmm...
random_parameters_dict['noise_mean_' + add_name] = float(padding_values[0])
random_parameters_dict['noise_std_' + add_name] = float(padding_values[1])
image_dict[DATA] = self.apply_affine_transform(
image_dict[DATA],
scaling_params,
rotation_params,
padding_values,
)
#sample.add_transform(self, random_parameters_dict)
return sample
@staticmethod
def get_params(
scales: Tuple[float, float],
degrees: Tuple[float, float],
isotropic: bool,
) -> Tuple[List[float], List[float]]:
scaling_params = torch.FloatTensor(3).uniform_(*scales)
if isotropic:
scaling_params.fill_(scaling_params[0])
rotation_params = torch.FloatTensor(3).uniform_(*degrees)
return scaling_params.tolist(), rotation_params.tolist()
def apply_affine_transform(
self,
tensor: torch.Tensor,
scaling_params: List[float],
rotation_params: List[float],
padding_values: List[float]
) -> torch.Tensor:
assert tensor.ndim == 4
assert len(tensor) == 1
from torchio.transforms.augmentation.intensity.random_motion_from_time_course import create_rotation_matrix_3d
import math
import finufftpy
image = tensor[0]
#noise_mean, nois_std = estimate_borders_mean_std(np.abs(image.numpy())) #random_noise gives negativ values ...
noise_mean, nois_std = estimate_borders_mean_std(image.numpy())
original_image_shape = image.shape
if self.oversampling_pct > 0.0:
if len(padding_values) == 2: #mean std
padd_mode = 'random.normal'
else:
padd_mode = 'constant'
image = self._oversample(image, self.oversampling_pct, padding_mode=padd_mode,
padding_normal=padding_values)
#im_freq_domain = (np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(image)))).astype(np.complex128)
im_freq_domain = self._fft_im(image)
#if self.oversampling_pct > 0.0:
# im_freq_domain = self._oversample(im_freq_domain, self.oversampling_pct,
# padding_mode='random.normal', padding_normal=(noise_mean, nois_std))
rrrot = -np.radians(rotation_params); rrrot[1] = - rrrot[1] #to get the same as sitk ... hmmm
rotation_matrices = create_rotation_matrix_3d(rrrot)
scaling_matrices = np.eye(3) / np.array(scaling_params) #/ to have same convention as
rotation_matrices = np.matmul(rotation_matrices, scaling_matrices)
im_shape = im_freq_domain.shape
center = [math.ceil((x - 1) / 2) for x in im_shape]
[i1, i2, i3] = np.meshgrid(2*(np.arange(im_shape[0]) - center[0])/im_shape[0],
2*(np.arange(im_shape[1]) - center[1])/im_shape[1],
2*(np.arange(im_shape[2]) - center[2])/im_shape[2], indexing='ij')
grid_coordinates = np.array([i1.flatten('F'), i2.flatten('F'), i3.flatten('F')])
method='one_matrix'
if method=='one_matrix':
new_grid_coords =
|
np.matmul(rotation_matrices, grid_coordinates)
|
numpy.matmul
|
# -*- coding: utf-8 -*-
"""Test for the csdm object
1) generate csdm object.
2) split multiple dependent variables to individual objects.
3) add, sub, iadd, radd, isub, rsub, mul, imul, for scalar and ScalarQuantity.
4) rmul, truediv, itruediv, rtruediv, pow, ipow for scalar and ScalarQuantity.
5) min, max, clip, real, imag, conj, round, angle functions.
"""
import json
import numpy as np
import pytest
import csdmpy as cp
def get_test(type):
out = np.random.rand(10).astype(type)
a_test = cp.new()
a_test.dimensions.append(cp.LinearDimension(count=10, increment="1s"))
a_test.add_dependent_variable(
{"type": "internal", "quantity_type": "scalar", "unit": "m", "components": out}
)
return out, a_test
def get_test_2d(type):
out = np.random.rand(50).astype(type).reshape(10, 5)
a_test = cp.new()
a_test.dimensions.append(cp.LinearDimension(count=5, increment="1s"))
a_test.dimensions.append(cp.LinearDimension(count=10, increment="1m"))
a_test.add_dependent_variable(
{
"type": "internal",
"quantity_type": "scalar",
"unit": "m",
"components": out.ravel(),
}
)
return out, a_test
def test_csdm():
data = cp.new(description="This is a test")
assert data != "sd"
assert data.size == 1
# read_only
assert data.read_only is False
data.read_only = True
assert data.read_only is True
error = "Expecting an instance of type"
with pytest.raises(TypeError, match=".*{0}.*".format(error)):
data.read_only = "True"
# tags
assert data.tags == []
data.tags = ["1", "2", "3"]
assert data.tags == ["1", "2", "3"]
error = "Expecting an instance of type"
with pytest.raises(TypeError, match=".*{0}.*".format(error)):
data.tags = "23"
# version
assert data.version == cp.csdm.CSDM.__latest_CSDM_version__
# geographic_coordinate
assert data.geographic_coordinate == {}
error = "can't set attribute"
with pytest.raises(AttributeError, match=".*{0}.*".format(error)):
data.geographic_coordinate = {}
# description
assert data.description == "This is a test"
data.description = "Enough with the tests"
assert data.description == "Enough with the tests"
error = "Expecting an instance of type"
with pytest.raises(TypeError, match=".*{0}.*".format(error)):
data.description = {}
# application
assert data.application == {}
data.application = {"csdmpy": "Some day"}
assert data.application == {"csdmpy": "Some day"}
error = "Expecting an instance of type"
with pytest.raises(TypeError, match=".*{0}.*".format(error)):
data.application = "Some other day"
# filename
assert data.filename == ""
# data_structure
structure = {
"csdm": {
"version": "1.0",
"read_only": True,
"tags": ["1", "2", "3"],
"description": "Enough with the tests",
"application": {"csdmpy": "Some day"},
}
}
assert data.data_structure == str(
json.dumps(structure, ensure_ascii=False, sort_keys=False, indent=2)
)
assert data.dict() == structure
# equality check
dm = data.copy()
assert dm == data
assert dm.shape == ()
dm.dimensions.append(cp.LinearDimension(count=10, increment="1s"))
assert dm != data
def test_split():
a = cp.new()
a.dimensions.append(cp.LinearDimension(count=10, increment="1m"))
a.add_dependent_variable(
{"type": "internal", "components": np.arange(10) + 1, "quantity_type": "scalar"}
)
b = cp.new()
b.dimensions.append(cp.LinearDimension(count=10, increment="1m"))
b.add_dependent_variable(
{"type": "internal", "components": np.arange(10) + 2, "quantity_type": "scalar"}
)
c = cp.new()
c.dimensions.append(cp.LinearDimension(count=10, increment="1m"))
c.add_dependent_variable(
{"type": "internal", "components": np.arange(10) + 1, "quantity_type": "scalar"}
)
c.add_dependent_variable(
{"type": "internal", "components": np.arange(10) + 2, "quantity_type": "scalar"}
)
a_, b_ = c.split()
assert a_ == a
assert b_ == b
a_test = cp.new()
a_test.dimensions.append(cp.LinearDimension(count=10, increment="1s"))
a_test.add_dependent_variable(
{
"type": "internal",
"quantity_type": "scalar",
"unit": "m",
"components": np.arange(10),
}
)
a1_test = cp.new()
a1_test.dimensions.append(cp.LinearDimension(count=10, increment="1m"))
a1_test.add_dependent_variable(
{
"type": "internal",
"quantity_type": "scalar",
"unit": "m",
"components":
|
np.arange(10)
|
numpy.arange
|
from ..util import BaseCase
import numpy as np
import scipy.sparse as sps
import warnings
import pygsti.tools.matrixtools as mt
class MatrixToolsTester(BaseCase):
def test_is_hermitian(self):
herm_mx = np.array([[ 1, 1+2j],
[1-2j, 3]], 'complex')
non_herm_mx = np.array([[ 1, 4+2j],
[1+2j, 3]], 'complex')
self.assertTrue(mt.is_hermitian(herm_mx))
self.assertFalse(mt.is_hermitian(non_herm_mx))
s = mt.mx_to_string(non_herm_mx)
# TODO assert correctness
def test_is_pos_def(self):
pos_mx = np.array([[ 4, 0.2],
[0.1, 3]], 'complex')
non_pos_mx = np.array([[ 0, 1],
[1, 0]], 'complex')
self.assertTrue(mt.is_pos_def(pos_mx))
self.assertFalse(mt.is_pos_def(non_pos_mx))
def test_is_valid_density_mx(self):
density_mx = np.array([[ 0.9, 0],
[ 0, 0.1]], 'complex')
non_density_mx = np.array([[ 2.0, 1.0],
[-1.0, 0]], 'complex')
self.assertTrue(mt.is_valid_density_mx(density_mx))
self.assertFalse(mt.is_valid_density_mx(non_density_mx))
s = mt.mx_to_string(density_mx)
# TODO assert correctness
def test_nullspace(self):
a = np.array([[1, 1], [1, 1]])
print("Nullspace = ", mt.nullspace(a))
expected = np.array(
[[ 0.70710678],
[-0.70710678]]
)
diff1 = np.linalg.norm(mt.nullspace(a) - expected)
diff2 = np.linalg.norm(mt.nullspace(a) + expected) # -1*expected is OK too (just an eigenvector)
self.assertTrue(np.isclose(diff1, 0) or np.isclose(diff2, 0))
diff1 = np.linalg.norm(mt.nullspace_qr(a) - expected)
diff2 = np.linalg.norm(mt.nullspace_qr(a) + expected) # -1*expected is OK too (just an eigenvector)
self.assertTrue(np.isclose(diff1, 0) or np.isclose(diff2, 0))
mt.print_mx(a)
def test_helpers(self):
a = np.array([1, 2, 3], 'd')
self.assertTrue(mt.array_eq(a, a))
def test_matrix_log(self):
M = np.array([[-1, 0], [0, -1]], 'complex') # degenerate negative evals
mt.real_matrix_log(M, actionIfImaginary="raise", TOL=1e-6)
# TODO assert correctness
M = np.array([[-1, 1e-10], [1e-10, -1]], 'complex') # degenerate negative evals, but will generate complex evecs
mt.real_matrix_log(M, actionIfImaginary="raise", TOL=1e-6)
# TODO assert correctness
M = np.array([[1, 0], [0, -1]], 'd') # a negative *unparied* eigenvalue => log may be imaginary
mt.real_matrix_log(M, actionIfImaginary="ignore", TOL=1e-6)
# TODO assert correctness
def test_matrix_log_warns_on_imaginary(self):
M = np.array([[1, 0], [0, -1]], 'd')
self.assertWarns(Warning, mt.real_matrix_log, M, actionIfImaginary="warn", TOL=1e-6)
def test_matrix_log_raises_on_imaginary(self):
M = np.array([[1, 0], [0, -1]], 'd')
with self.assertRaises(ValueError):
mt.real_matrix_log(M, actionIfImaginary="raise", TOL=1e-6)
def test_matrix_log_raises_on_invalid_action(self):
M = np.array([[1, 0], [0, -1]], 'd')
with self.assertRaises(AssertionError):
mt.real_matrix_log(M, actionIfImaginary="foobar", TOL=1e-6)
def test_matrix_log_raise_on_no_real_log(self):
a = np.array([[1, 1], [1, 1]])
with self.assertRaises(AssertionError):
mt.real_matrix_log(a)
def test_minweight_match(self):
a = np.array([1, 2, 3, 4], 'd')
b = np.array([3.1, 2.1, 4.1, 1.1], 'd')
expectedPairs = [(0, 3), (1, 1), (2, 0), (3, 2)] # (i,j) indices into a & b
wts = mt.minweight_match(a, b, metricfn=None, return_pairs=False,
pass_indices_to_metricfn=False)
wts, pairs = mt.minweight_match(a, b, metricfn=None, return_pairs=True,
pass_indices_to_metricfn=False)
self.assertEqual(set(pairs), set(expectedPairs))
def fn(x, y): return abs(x - y)
wts, pairs = mt.minweight_match(a, b, metricfn=fn, return_pairs=True,
pass_indices_to_metricfn=False)
self.assertEqual(set(pairs), set(expectedPairs))
def fn(i, j): return abs(a[i] - b[j])
wts, pairs = mt.minweight_match(a, b, metricfn=fn, return_pairs=True,
pass_indices_to_metricfn=True)
self.assertEqual(set(pairs), set(expectedPairs))
def test_fancy_assignment(self):
a = np.zeros((4, 4, 4), 'd')
twoByTwo = np.ones((2, 2), 'd')
#NOTEs from commit message motivating why we need this:
# a = np.zeros((3,3,3))
# a[:,1:2,1:3].shape == (3,1,2) # good!
# a[0,:,1:3].shape == (3,2) #good!
# a[0,:,[1,2]].shape == (2,3) # ?? (broacasting ':' makes this like a[0,[1,2]])
# a[:,[1,2],[1,2]].shape == (3,2) # ?? not (3,2,2) b/c lists broadcast
# a[:,[1],[1,2]].shape == (3,2) # ?? not (3,1,2) b/c lists broadcast
# a[:,[1,2],[0,1,2]].shape == ERROR b/c [1,2] can't broadcast to [0,1,2]!
#simple integer indices
mt._fas(a, (0, 0, 0), 4.5) # a[0,0,0] = 4.5
self.assertAlmostEqual(a[0, 0, 0], 4.5)
mt._fas(a, (0, 0, 0), 4.5, add=True) # a[0,0,0] += 4.5
self.assertAlmostEqual(a[0, 0, 0], 9.0)
#still simple: mix of slices and integers
mt._fas(a, (slice(0, 2), slice(0, 2), 0), twoByTwo) # a[0:2,0:2,0] = twoByTwo
self.assertArraysAlmostEqual(a[0:2, 0:2, 0], twoByTwo)
#complex case: some/all indices are integer arrays
mt._fas(a, ([0, 1], [0, 1], 0), twoByTwo[:, :]) # a[0:2,0:2,0] = twoByTwo - but a[[0,1],[0,1],0] wouldn't do this!
self.assertArraysAlmostEqual(a[0:2, 0:2, 0], twoByTwo)
mt._fas(a, ([0, 1], [0, 1], 0), twoByTwo[:, :], add=True) # a[0:2,0:2,0] = twoByTwo - but a[[0,1],[0,1],0] wouldn't do this!
self.assertArraysAlmostEqual(a[0:2, 0:2, 0], 2 * twoByTwo)
# Fancy indexing (without assignment)
self.assertEqual(mt._findx(a, (0, 0, 0)).shape, ()) # (1,1,1))
self.assertEqual(mt._findx(a, (slice(0, 2), slice(0, 2), slice(0, 2))).shape, (2, 2, 2))
self.assertEqual(mt._findx(a, (slice(0, 2), slice(0, 2), 0)).shape, (2, 2))
self.assertEqual(mt._findx(a, ([0, 1], [0, 1], 0)).shape, (2, 2))
self.assertEqual(mt._findx(a, ([], [0, 1], 0)).shape, (0, 2))
def test_safe_ops(self):
mx = np.array([[1+1j, 0],
[2+2j, 3+3j]], 'complex')
smx = sps.csr_matrix(mx)
smx_lil = sps.lil_matrix(mx) # currently unsupported
r = mt.safereal(mx, inplace=False)
self.assertArraysAlmostEqual(r, np.real(mx))
i = mt.safeimag(mx, inplace=False)
self.assertArraysAlmostEqual(i, np.imag(mx))
r = mt.safereal(smx, inplace=False)
self.assertArraysAlmostEqual(r.toarray(), np.real(mx))
i = mt.safeimag(smx, inplace=False)
self.assertArraysAlmostEqual(i.toarray(),
|
np.imag(mx)
|
numpy.imag
|
import numpy as np
import struct
import os
DEG2RAD=np.pi/180.0
RAD2DEG=180.0/np.pi
#Change flicks theta in [-pi/2,pi/2] --> [0,pi]
def read_flicks_file(file_directory,flicks_file):
#Read header file
hdrfile=open(os.path.join(file_directory,"flicks.hdr"),"r")
for idx in range(2):
hdrfile.readline()
line=hdrfile.readline()
if 'spherical exponential' not in line.lower():
print('Warning! Coordinate type "'+line+'" not recognized')
line=hdrfile.readline()
n1p=int(line[:2])
line=hdrfile.readline()
n2p=int(line[:2])
line=hdrfile.readline()
n3p=int(line[:2])
for idx in range(2):
hdrfile.readline()
nvar=0
for idx in range(12):
line=hdrfile.readline()
if len(line)>0:
nvar+=int(line[:1])
hdrfile.close()
#Read main serial flicks file
flicksfile=open(os.path.join(file_directory,flicks_file),"rb")
#Use struct.unpack, big endian => use '>' at start of format string
#Note! Every time Fortran write command is invoked, there are 4 bytes at the start and end with the number of bytes being written
#Use file.read(4) or file.read(8) to skip this padding
flicksfile.read(25)
time=struct.unpack('>f', flicksfile.read(4))[0]
flicksfile.read(8)
ntblks=struct.unpack('>i', flicksfile.read(4))[0]
nlblks=struct.unpack('>i', flicksfile.read(4))[0]
newgrd=struct.unpack('>i', flicksfile.read(4))[0]
flicksfile.read(4)
coord_logR=np.zeros((nlblks,2))
coord_theta=np.zeros((nlblks,2))
coord_phi=np.zeros((nlblks,2))
data=np.zeros((nlblks,n3p,n2p,n1p,nvar))
idx_leaf=0
for idx_blk in range(ntblks):
flicksfile.read(4)
iputwrk=struct.unpack('>'+21*'i', flicksfile.read(21*4))
flicksfile.read(8)
rputwrk=struct.unpack('>'+6*'f', flicksfile.read(6*4))
flicksfile.read(4)
if iputwrk[2]==1:
coord_logR[idx_leaf,:]=[rputwrk[0],rputwrk[1]]
coord_theta[idx_leaf,:]=[rputwrk[2]+np.pi*0.5,rputwrk[3]+np.pi*0.5]
coord_phi[idx_leaf,:]=[rputwrk[4],rputwrk[5]]
for idx_p in range(n3p):
for idx_t in range(n2p):
for idx_r in range(n1p):
flicksfile.read(4)
data[idx_leaf,idx_p,idx_t,idx_r,:]=struct.unpack('>'+nvar*'f', flicksfile.read(nvar*4))
flicksfile.read(4)
idx_leaf+=1
flicksfile.close()
return time,ntblks,nlblks,newgrd,coord_logR,coord_theta,coord_phi,data
def get_flicks_time(flicks_file):
flicksfile=open(flicks_file,"rb")
flicksfile.read(25)
time=struct.unpack('>f', flicksfile.read(4))[0]
flicksfile.close()
return time
def get_flicks_grid_dimensions(data):
nlblk=np.shape(data)[0]
n1p=np.shape(data)[3]
n2p=np.shape(data)[2]
n3p=np.shape(data)[1]
nvar=np.shape(data)[4]
return nlblk,n1p,n2p,n3p,nvar
def read_bfield_file(file_directory,bfield_file):
#Read header file
hdrfile=open(os.path.join(file_directory,"bfield.hdr"),"r")
hdrfile.readline()
line=hdrfile.readline()
if 'spherical exponential' not in line.lower():
print('Warning! Coordinate type "'+line+'" not recognized')
line=hdrfile.readline()
n1p=int(line[:2])
line=hdrfile.readline()
n2p=int(line[:2])
line=hdrfile.readline()
n3p=int(line[:2])
hdrfile.close()
#Read main bfield file
bfieldfile=open(os.path.join(file_directory,bfield_file),"rb")
bfieldfile.read(4)
time=struct.unpack('>d', bfieldfile.read(8))[0]
bfieldfile.read(8)
ntblks=struct.unpack('>i', bfieldfile.read(4))[0]
nlblks=struct.unpack('>i', bfieldfile.read(4))[0]
bfieldfile.read(4)
coord_logR=np.zeros((nlblks,2))
coord_theta=np.zeros((nlblks,2))
coord_phi=np.zeros((nlblks,2))
data=np.zeros((nlblks,n3p,n2p,n1p,3))
idx_leaf=0
for idx in range(ntblks):
bfieldfile.read(4)
iputwrk=struct.unpack('>'+35*'i', bfieldfile.read(35*4))
bfieldfile.read(8)
rputwrk=struct.unpack('>'+6*'d', bfieldfile.read(6*8))
bfieldfile.read(4)
if iputwrk[2]==1:
coord_logR[idx_leaf,:]=[rputwrk[0],rputwrk[1]]
coord_theta[idx_leaf,:]=[rputwrk[2]+np.pi*0.5,rputwrk[3]+np.pi*0.5]
coord_phi[idx_leaf,:]=[rputwrk[4],rputwrk[5]]
for idx_p in range(n3p):
for idx_t in range(n2p):
for idx_r in range(n1p):
bfieldfile.read(4)
data[idx_leaf,idx_p,idx_t,idx_r,:]=struct.unpack('>'+3*'d', bfieldfile.read(24))
bfieldfile.read(4)
idx_leaf+=1
bfieldfile.close()
return time,ntblks,nlblks,coord_logR,coord_theta,coord_phi,data
def interp_pointpair(x,x0,x1,y0,y1):
if x0==x1:
return y0
else:
return y0+(x-x0)*(y1-y0)/(x1-x0)
#Target angle in degrees
def phi_slice(target_phi,coord_logR,coord_theta,coord_phi,data):
nlblk,n1p,n2p,n3p,nvar=get_flicks_grid_dimensions(data)
target_phi_actual=target_phi*np.pi/180.0
num_blocks=len(coord_phi[:,0])
bounding_lblk=[]
for idx in range(num_blocks):
if coord_phi[idx,0]<coord_phi[idx,1]:
if coord_phi[idx,0]<=target_phi_actual and coord_phi[idx,1]>=target_phi_actual:
bounding_lblk.append(idx)
else:
if coord_phi[idx,0]>=target_phi_actual and coord_phi[idx,1]<=target_phi_actual:
bounding_lblk.append(idx)
new_logR=np.zeros((len(bounding_lblk)*n2p*n1p))
new_theta=np.zeros((len(bounding_lblk)*n2p*n1p))
new_data=np.zeros((len(bounding_lblk)*n2p*n1p,nvar))
for idx in range(len(bounding_lblk)):
idx_phi=int(np.floor((target_phi_actual-coord_phi[bounding_lblk[idx],0])/(coord_phi[bounding_lblk[idx],1]-coord_phi[bounding_lblk[idx],0])*(n3p-1)))
if idx_phi==n3p-1:
idx_phi=n3p-2
for idx_t in range(n2p):
for idx_r in range(n1p):
new_logR[idx*n2p*n1p+idx_t*n1p+idx_r]=idx_r/(n1p-1)*(coord_logR[bounding_lblk[idx],1]-coord_logR[bounding_lblk[idx],0])+coord_logR[bounding_lblk[idx],0]
new_theta[idx*n2p*n1p+idx_t*n1p+idx_r]=idx_t/(n2p-1)*(coord_theta[bounding_lblk[idx],1]-coord_theta[bounding_lblk[idx],0])+coord_theta[bounding_lblk[idx],0]
phi_low=idx_phi/(n3p-1)*(coord_phi[bounding_lblk[idx],1]-coord_phi[bounding_lblk[idx],0])+coord_phi[bounding_lblk[idx],0]
phi_high=(idx_phi+1)/(n3p-1)*(coord_phi[bounding_lblk[idx],1]-coord_phi[bounding_lblk[idx],0])+coord_phi[bounding_lblk[idx],0]
for idx_v in range(nvar):
new_data[idx*n2p*n1p+idx_t*n1p+idx_r,idx_v]=interp_pointpair(target_phi_actual,phi_low,phi_high,data[bounding_lblk[idx],idx_phi,idx_t,idx_r,idx_v], data[bounding_lblk[idx],idx_phi+1,idx_t,idx_r,idx_v])
return new_logR,new_theta,new_data
#Target in linear space (as opposed to logR)
def R_slice(target_R,coord_logR,coord_theta,coord_phi,data):
nlblk,n1p,n2p,n3p,nvar=get_flicks_grid_dimensions(data)
target_logR_actual=np.log(target_R)
num_blocks=len(coord_logR[:,0])
bounding_lblk=[]
for idx in range(num_blocks):
if coord_logR[idx,0]<coord_logR[idx,1]:
if coord_logR[idx,0]<=target_logR_actual and coord_logR[idx,1]>=target_logR_actual:
bounding_lblk.append(idx)
else:
if coord_logR[idx,0]>=target_logR_actual and coord_logR[idx,1]<=target_logR_actual:
bounding_lblk.append(idx)
new_theta=np.zeros((len(bounding_lblk)*n3p*n2p))
new_phi=np.zeros((len(bounding_lblk)*n3p*n2p))
new_data=np.zeros((len(bounding_lblk)*n3p*n2p,nvar))
for idx in range(len(bounding_lblk)):
idx_logR=int(np.floor((target_logR_actual-coord_logR[bounding_lblk[idx],0])/(coord_logR[bounding_lblk[idx],1]-coord_logR[bounding_lblk[idx],0])*(n1p-1)))
if idx_logR==n1p-1:
idx_logR=n1p-2
for idx_p in range(n3p):
for idx_t in range(n2p):
new_theta[idx*n3p*n2p+idx_p*n2p+idx_t]=idx_t/(n2p-1)*(coord_theta[bounding_lblk[idx],1]-coord_theta[bounding_lblk[idx],0])+coord_theta[bounding_lblk[idx],0]
new_phi[idx*n3p*n2p+idx_p*n2p+idx_t]=idx_p/(n3p-1)*(coord_phi[bounding_lblk[idx],1]-coord_phi[bounding_lblk[idx],0])+coord_phi[bounding_lblk[idx],0]
logR_low=idx_logR/(n1p-1)*(coord_logR[bounding_lblk[idx],1]-coord_logR[bounding_lblk[idx],0])+coord_logR[bounding_lblk[idx],0]
logR_high=(idx_logR+1)/(n1p-1)*(coord_logR[bounding_lblk[idx],1]-coord_logR[bounding_lblk[idx],0])+coord_logR[bounding_lblk[idx],0]
for idx_v in range(nvar):
new_data[idx*n3p*n2p+idx_p*n2p+idx_t,idx_v]=interp_pointpair(target_logR_actual,logR_low,logR_high,data[bounding_lblk[idx],idx_p,idx_t,idx_logR,idx_v], data[bounding_lblk[idx],idx_p,idx_t,idx_logR+1,idx_v])
return new_theta,new_phi,new_data
#Do not save the upper edge points (i.e. very highest r, theta, phi points)
#Otherwise, the redundant points at the upper edge of every block, which overlap with other blocks, must not be included
def get_full_unstructured_cart_grid_noedge(coord_logR,coord_theta,coord_phi,data):
nlblk,n1p,n2p,n3p,nvar=get_flicks_grid_dimensions(data)
n1pm1=n1p-1
n2pm1=n2p-1
n3pm1=n3p-1
new_logR=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_theta=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_phi=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_X=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_Y=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_Z=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_data=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1),nvar))
for idx in range(nlblk):
for idx_p in range(n3pm1):
for idx_t in range(n2pm1):
for idx_r in range(n1pm1):
new_logR[idx*n3pm1*n2pm1*n1pm1+idx_p*n2pm1*n1pm1+idx_t*n1pm1+idx_r]=idx_r/n1pm1*(coord_logR[idx,1]-coord_logR[idx,0])+coord_logR[idx,0]
new_theta[idx*n3pm1*n2pm1*n1pm1+idx_p*n2pm1*n1pm1+idx_t*n1pm1+idx_r]=idx_t/n2pm1*(coord_theta[idx,1]-coord_theta[idx,0])+coord_theta[idx,0]
new_phi[idx*n3pm1*n2pm1*n1pm1+idx_p*n2pm1*n1pm1+idx_t*n1pm1+idx_r]=idx_p/n3pm1*(coord_phi[idx,1]-coord_phi[idx,0])+coord_phi[idx,0]
for idx_v in range(nvar):
new_data[idx*n3pm1*n2pm1*n1pm1+idx_p*n2pm1*n1pm1+idx_t*n1pm1+idx_r,idx_v]=data[idx,idx_p,idx_t,idx_r,idx_v]
for idx in range(nlblk*(n3p-1)*(n2p-1)*(n1p-1)):
R=np.exp(new_logR[idx])
sin_theta=np.sin(new_theta[idx])
new_X[idx]=R*sin_theta*np.cos(new_phi[idx])
new_Y[idx]=R*sin_theta*np.sin(new_phi[idx])
new_Z[idx]=R*np.cos(new_theta[idx])
return new_logR,new_theta,new_phi,new_X,new_Y,new_Z,new_data
def get_min_sep_sph_grid_noedge(coord_logR,coord_theta,coord_phi,data):
nlblk,n1p,n2p,n3p,nvar=get_flicks_grid_dimensions(data)
n1pm1=n1p-1
n2pm1=n2p-1
n3pm1=n3p-1
new_logR=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_theta=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_phi=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_X=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_Y=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_Z=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1)))
new_data=np.zeros((nlblk*(n3p-1)*(n2p-1)*(n1p-1),nvar))
min_sep_logR=abs(coord_logR[0,1]-coord_logR[0,0])
min_sep_theta=abs(coord_theta[0,1]-coord_theta[0,0])
min_sep_phi=abs(coord_phi[0,1]-coord_phi[0,0])
for idx in range(nlblk):
for idx_p in range(n3pm1):
for idx_t in range(n2pm1):
for idx_r in range(n1pm1):
new_logR[idx*n3pm1*n2pm1*n1pm1+idx_p*n2pm1*n1pm1+idx_t*n1pm1+idx_r]=idx_r/n1pm1*(coord_logR[idx,1]-coord_logR[idx,0])+coord_logR[idx,0]
new_theta[idx*n3pm1*n2pm1*n1pm1+idx_p*n2pm1*n1pm1+idx_t*n1pm1+idx_r]=idx_t/n2pm1*(coord_theta[idx,1]-coord_theta[idx,0])+coord_theta[idx,0]
new_phi[idx*n3pm1*n2pm1*n1pm1+idx_p*n2pm1*n1pm1+idx_t*n1pm1+idx_r]=idx_p/n3pm1*(coord_phi[idx,1]-coord_phi[idx,0])+coord_phi[idx,0]
min_sep_logR=min(min_sep_logR,abs(coord_logR[idx,1]-coord_logR[idx,0]))
min_sep_theta=min(min_sep_theta,abs(coord_theta[idx,1]-coord_theta[idx,0]))
min_sep_phi=min(min_sep_phi,abs(coord_phi[idx,1]-coord_phi[idx,0]))
for idx_v in range(nvar):
new_data[idx*n3pm1*n2pm1*n1pm1+idx_p*n2pm1*n1pm1+idx_t*n1pm1+idx_r,idx_v]=data[idx,idx_p,idx_t,idx_r,idx_v]
min_sep_logR/=n1pm1
min_sep_theta/=n2pm1
min_sep_phi/=n3pm1
final_logR=np.linspace(min(new_logR),max(new_logR),num=(max(new_logR)-min(new_logR))/min_sep_logR)
final_theta=np.linspace(min(new_theta),max(new_theta),num=(max(new_theta)-min(new_theta))/min_sep_theta)
final_phi=np.linspace(min(new_phi),max(new_phi),num=(max(new_phi)-min(new_phi))/min_sep_phi)
from scipy.interpolate import griddata
print(len(final_logR),len(final_theta),len(final_phi))
grid_logR3,grid_theta3,grid_phi3=np.meshgrid(final_logR,final_theta,final_phi)
logRho_grid=griddata((new_logR,new_theta,new_phi), np.log(new_data[:,0]), (grid_logR3,grid_theta3,grid_phi3), method='linear')#,fill_value=min(logRho_cart))
np.save("Regular_grid_logR.np",final_logR)
np.save("Regular_grid_theta.np",final_theta)
np.save("Regular_grid_phi.np",final_phi)
np.save("Regular_grid_rho.np",logRho_grid)
return grid_logR3,grid_theta3,grid_phi3,logRho_grid
#Must be a cuboid
#f000=f(x0,y0,z0)
#f100=f(x1,y0,z0) etc
def Trilinear_interpolation(x,y,z,x0,x1,y0,y1,z0,z1,f000,f001,f010,f011,f100,f101,f110,f111):
x_shift=x-x0
y_shift=y-y0
z_shift=z-z0
delta_x=x1-x0
delta_y=y1-y0
delta_z=z1-z0
a1=(f100-f000)/delta_x
a2=(f010-f000)/delta_y
a3=(f001-f000)/delta_z
a4=(f110-f100-f010+f000)/delta_y/delta_x
a5=(f110-f100-f001+f000)/delta_z/delta_x
a6=(f011-f001-f010+f000)/delta_z/delta_y
a7=(f111-f110-f101-f011+f001+f010+f100-f000)/delta_x/delta_y/delta_z
return f000+a1*x_shift+a2*y_shift+a3*z_shift+a4*x_shift*y_shift+a5*x_shift*z_shift+a6*y_shift*z_shift+a7*x_shift*y_shift*z_shift
#Assumes array arr is monotonically increasing
def ref_idx_search(arr,target_value,prev_idx):
idx=prev_idx
if arr[idx]<=target_value:
while idx<len(arr)-2 and arr[idx+1]<=target_value:
idx+=1
else:
while idx>0 and arr[idx-1]>target_value:
idx-=1
return idx
def o1_field_trace(pos_in,X,Y,Z,B,dt):
delta_X=X[1]-X[0]
delta_Y=Y[1]-Y[0]
delta_Z=Z[1]-Z[0]
x_0=pos_in[0]
y_0=pos_in[1]
z_0=pos_in[2]
idx_x=min(max(int(np.floor((x_0-X[0])/delta_X)),0),len(X)-2)
idx_y=min(max(int(np.floor((y_0-Y[0])/delta_Y)),0),len(Y)-2)
idx_z=min(max(int(np.floor((z_0-Z[0])/delta_Z)),0),len(Z)-2)
B_1=Trilinear_interpolation(x_0,y_0,z_0,X[idx_x],X[idx_x+1],Y[idx_y],Y[idx_y+1],Z[idx_z],Z[idx_z+1],B[idx_x,idx_y,idx_z,:], B[idx_x,idx_y,idx_z+1,:], B[idx_x,idx_y+1,idx_z,:], B[idx_x,idx_y+1,idx_z+1,:], B[idx_x+1,idx_y,idx_z,:], B[idx_x+1,idx_y,idx_z+1,:], B[idx_x+1,idx_y+1,idx_z,:], B[idx_x+1,idx_y+1,idx_z+1,:])
return pos_in+dt*B_1/np.sqrt(B_1[0]*B_1[0]+B_1[1]*B_1[1]+B_1[2]*B_1[2])
def rk4_field_trace_cartesian(pos_in,X,Y,Z,B,dt):
delta_X=X[1]-X[0]
delta_Y=Y[1]-Y[0]
delta_Z=Z[1]-Z[0]
x_0=pos_in[0]
y_0=pos_in[1]
z_0=pos_in[2]
idx_x=min(max(int(np.floor((x_0-X[0])/delta_X)),0),len(X)-2)
idx_y=min(max(int(np.floor((y_0-Y[0])/delta_Y)),0),len(Y)-2)
idx_z=min(max(int(np.floor((z_0-Z[0])/delta_Z)),0),len(Z)-2)
B_1=Trilinear_interpolation(x_0,y_0,z_0,X[idx_x],X[idx_x+1],Y[idx_y],Y[idx_y+1],Z[idx_z],Z[idx_z+1],B[idx_x,idx_y,idx_z,:], B[idx_x,idx_y,idx_z+1,:], B[idx_x,idx_y+1,idx_z,:], B[idx_x,idx_y+1,idx_z+1,:], B[idx_x+1,idx_y,idx_z,:], B[idx_x+1,idx_y,idx_z+1,:], B[idx_x+1,idx_y+1,idx_z,:], B[idx_x+1,idx_y+1,idx_z+1,:])
k_1=dt*B_1/np.sqrt(B_1[0]*B_1[0]+B_1[1]*B_1[1]+B_1[2]*B_1[2])
x_1=pos_in[0]+k_1[0]*0.5
y_1=pos_in[1]+k_1[1]*0.5
z_1=pos_in[2]+k_1[2]*0.5
idx_x=min(max(int(np.floor((x_1-X[0])/delta_X)),0),len(X)-2)
idx_y=min(max(int(np.floor((y_1-Y[0])/delta_Y)),0),len(Y)-2)
idx_z=min(max(int(np.floor((z_1-Z[0])/delta_Z)),0),len(Z)-2)
B_2=Trilinear_interpolation(x_1,y_1,z_1,X[idx_x],X[idx_x+1],Y[idx_y],Y[idx_y+1],Z[idx_z],Z[idx_z+1],B[idx_x,idx_y,idx_z,:], B[idx_x,idx_y,idx_z+1,:], B[idx_x,idx_y+1,idx_z,:], B[idx_x,idx_y+1,idx_z+1,:], B[idx_x+1,idx_y,idx_z,:], B[idx_x+1,idx_y,idx_z+1,:], B[idx_x+1,idx_y+1,idx_z,:], B[idx_x+1,idx_y+1,idx_z+1,:])
k_2=dt*B_2/np.sqrt(B_2[0]*B_2[0]+B_2[1]*B_2[1]+B_2[2]*B_2[2])
x_2=pos_in[0]+k_2[0]*0.5
y_2=pos_in[1]+k_2[1]*0.5
z_2=pos_in[2]+k_2[2]*0.5
idx_x=min(max(int(np.floor((x_2-X[0])/delta_X)),0),len(X)-2)
idx_y=min(max(int(np.floor((y_2-Y[0])/delta_Y)),0),len(Y)-2)
idx_z=min(max(int(np.floor((z_2-Z[0])/delta_Z)),0),len(Z)-2)
B_3=Trilinear_interpolation(x_2,y_2,z_2,X[idx_x],X[idx_x+1],Y[idx_y],Y[idx_y+1],Z[idx_z],Z[idx_z+1],B[idx_x,idx_y,idx_z,:], B[idx_x,idx_y,idx_z+1,:], B[idx_x,idx_y+1,idx_z,:], B[idx_x,idx_y+1,idx_z+1,:], B[idx_x+1,idx_y,idx_z,:], B[idx_x+1,idx_y,idx_z+1,:], B[idx_x+1,idx_y+1,idx_z,:], B[idx_x+1,idx_y+1,idx_z+1,:])
k_3=dt*B_3/np.sqrt(B_3[0]*B_3[0]+B_3[1]*B_3[1]+B_3[2]*B_3[2])
x_3=pos_in[0]+k_3[0]
y_3=pos_in[1]+k_3[1]
z_3=pos_in[2]+k_3[2]
idx_x=min(max(int(np.floor((x_3-X[0])/delta_X)),0),len(X)-2)
idx_y=min(max(int(np.floor((y_3-Y[0])/delta_Y)),0),len(Y)-2)
idx_z=min(max(int(np.floor((z_3-Z[0])/delta_Z)),0),len(Z)-2)
B_4=Trilinear_interpolation(x_3,y_3,z_3,X[idx_x],X[idx_x+1],Y[idx_y],Y[idx_y+1],Z[idx_z],Z[idx_z+1],B[idx_x,idx_y,idx_z,:], B[idx_x,idx_y,idx_z+1,:], B[idx_x,idx_y+1,idx_z,:], B[idx_x,idx_y+1,idx_z+1,:], B[idx_x+1,idx_y,idx_z,:], B[idx_x+1,idx_y,idx_z+1,:], B[idx_x+1,idx_y+1,idx_z,:], B[idx_x+1,idx_y+1,idx_z+1,:])
k_4=dt*B_4/np.sqrt(B_4[0]*B_4[0]+B_4[1]*B_4[1]+B_4[2]*B_4[2])
return pos_in+(k_1+2.0*k_2+2.0*k_3+k_4)/6.0
def rk4_field_trace_spherical(pos_in,R,theta,phi,B,dt,idx_r_p,idx_t_p,idx_p_p):
r_0=pos_in[0]
t_0=pos_in[1]
p_0=pos_in[2]
sin_th=np.sin(t_0)
cos_th=np.cos(t_0)
sin_ph=np.sin(p_0)
cos_ph=
|
np.cos(p_0)
|
numpy.cos
|
import numpy as np
from scipy import stats
from fastubl import *
def test_basics():
max_bg_sigma = 5
fu = FastUnbinned(
true_mu=[5, 10],
dists=[stats.norm(),
stats.uniform(loc=-max_bg_sigma,
scale=2 * max_bg_sigma)])
n_toys = 1000
(bf, result) = fu.toy_llrs(n_trials=n_toys)
# Length matches
assert len(bf) == n_toys
assert len(result) == n_toys
# No Nans
assert np.sum(np.isnan(bf)) == 0
assert np.sum(np.isnan(result)) == 0
# Best fits >= 0
assert np.all(bf >= 0)
signal_hyps = np.arange(10)
lower, upper = fu.toy_intervals(signal_hyps, n_trials=n_toys)
# Length matches
assert len(lower) == n_toys
assert len(upper) == n_toys
# Only signal hypotheses in intervals
assert np.all(np.in1d(lower, signal_hyps))
assert np.all(np.in1d(upper, signal_hyps))
# Lower limits never above upper limits
assert
|
np.all(lower <= upper)
|
numpy.all
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import numpy as np
import pyqtgraph as pg
from PyQt5 import QtWidgets
from src.gui.sharedcomnponets.sharedcomponets import GUIToolKit
from src.simpleFOCConnector import SimpleFOCDevice
class SimpleFOCGraphicWidget(QtWidgets.QGroupBox):
disconnectedState = 0
initialConnectedState = 1
connectedPausedState = 2
connectedPlottingStartedState = 3
signals = ['Target', 'Vq','Vd','Cq','Cd','Vel','Angle']
signal_tooltip = ['Target', 'Voltage D [Volts]','Voltage D [Volts]','Current Q [miliAmps]','Current D [miliAmps]','Velocity [rad/sec]','Angle [rad]']
signalColors = [GUIToolKit.RED_COLOR, GUIToolKit.BLUE_COLOR, GUIToolKit.PURPLE_COLOR,GUIToolKit.YELLOW_COLOR, GUIToolKit.MAROON_COLOR, GUIToolKit.ORANGE_COLOR, GUIToolKit.GREEN_COLOR]
signalIcons = ['reddot', 'bluedot','purpledot', 'yellowdot', 'maroondot', 'orangedot', 'greendot']
def __init__(self, parent=None):
super().__init__(parent)
self.setObjectName('plotWidget')
self.setTitle('Real time motor variables: ')
self.horizontalLayout = QtWidgets.QVBoxLayout(self)
self.device = SimpleFOCDevice.getInstance()
self.numberOfSamples = 300
pg.setConfigOptions(antialias=True)
self.plotWidget = pg.PlotWidget()
self.plotWidget.showGrid(x=True, y=True, alpha=0.5)
self.plotWidget.addLegend()
# self.legend = pg.LegendItem()
# self.legend.setParentItem(self.plotWidget)
self.timeArray = np.arange(-self.numberOfSamples, 0, 1)
self.controlPlotWidget = ControlPlotPanel(controllerPlotWidget=self)
self.signalDataArrays = []
self.signalPlots = []
self.signalPlotFlags = []
for (sig, sigColor, checkBox, tooltip) in zip(self.signals, self.signalColors,self.controlPlotWidget.signalCheckBox, self.signal_tooltip):
# define signal plot data array
self.signalDataArrays.append(np.zeros(self.numberOfSamples))
# configure signal plot parameters
signalPen = pg.mkPen(color=sigColor, width=1.5)
self.signalPlots.append(pg.PlotDataItem(self.timeArray,
self.signalDataArrays[-1],
pen=signalPen, name=tooltip))
self.plotWidget.addItem(self.signalPlots[-1])
# is plotted flag
self.signalPlotFlags.append(True)
# add callback
checkBox.stateChanged.connect(self.signalPlotFlagUpdate)
self.horizontalLayout.addWidget(self.plotWidget)
self.horizontalLayout.addWidget(self.controlPlotWidget)
self.device.commProvider.monitoringDataReceived.connect(
self.upDateGraphic)
self.currentStatus = self.disconnectedState
self.controlPlotWidget.pauseContinueButton.setDisabled(True)
self.device.addConnectionStateListener(self)
self.connectionStateChanged(self.device.isConnected)
def connectionStateChanged(self, deviceConnected):
if deviceConnected is True:
self.currentStatus = self.initialConnectedState
self.enabeUI()
else:
self.controlPlotWidget.startStoPlotAction()
self.controlPlotWidget.stopAndResetPlot()
self.currentStatus = self.disconnectedState
self.disableUI()
def enabeUI(self):
self.setEnabled(True)
def disableUI(self):
self.setEnabled(False)
def signalPlotFlagUpdate(self):
self.controlPlotWidget.updateMonitorVariables()
for i, (checkBox, plotFlag) in enumerate(zip(self.controlPlotWidget.signalCheckBox, self.signalPlotFlags)):
if checkBox.isChecked() and (not plotFlag):
self.signalPlotFlags[i] = True
self.plotWidget.addItem( self.signalPlots[i] )
elif (not checkBox.isChecked()) and plotFlag:
self.signalPlotFlags[i] = False
self.plotWidget.removeItem( self.signalPlots[i] )
def connectioStatusUpdate(self, connectedFlag):
if connectedFlag:
self.currentStatus = self.initialConnectedState
else:
self.currentStatus = self.disconnectedState
def upDateGraphic(self, signalList):
if self.currentStatus is self.connectedPlottingStartedState or \
self.currentStatus is self.connectedPausedState:
signals = np.array(signalList, dtype=float)
signalIndex = 0
enabled = np.where(np.array(self.signalPlotFlags) == True)[0]
if(len(enabled) != len(signals)):
logging.warning('Arrived corrupted data')
return
else:
for i, ind in enumerate(enabled):
self.signalDataArrays[ind] = np.roll(self.signalDataArrays[ind], -1)
self.signalDataArrays[ind][-1] = signals[i]
if self.currentStatus is self.connectedPlottingStartedState:
self.updatePlot()
def computeStatic(self, array):
mean = np.mean(array)
std = np.std(array)
max = np.max(array)
min =
|
np.min(array)
|
numpy.min
|
import numpy as np
import pytest
import tensorflow as tf
from packaging.version import parse as version
from tensorflow.keras.models import load_model
from tf_keras_vis.activation_maximization import \
ActivationMaximization as CurrentActivationMaximization # noqa: E501
from tf_keras_vis.activation_maximization.input_modifiers import Jitter, Rotate, Scale
from tf_keras_vis.activation_maximization.legacy import \
ActivationMaximization as LegacyActivationMaximization # noqa: E501
from tf_keras_vis.activation_maximization.regularizers import Norm, TotalVariation2D
from tf_keras_vis.utils import listify
from tf_keras_vis.utils.regularizers import LegacyRegularizer
from tf_keras_vis.utils.regularizers import Norm as LegacyNorm
from tf_keras_vis.utils.regularizers import TotalVariation2D as LegacyTotalVariation2D
from tf_keras_vis.utils.scores import BinaryScore, CategoricalScore
from tf_keras_vis.utils.test import (NO_ERROR, MockCallback, MockLegacyCallback, assert_raises,
dummy_sample, mock_conv_model,
mock_conv_model_with_float32_output, mock_multiple_io_model,
score_with_list, score_with_tuple)
ActivationMaximization = CurrentActivationMaximization
@pytest.fixture(scope='function',
params=[CurrentActivationMaximization, LegacyActivationMaximization])
def legacy(request):
global ActivationMaximization
ActivationMaximization = request.param
yield
ActivationMaximization = CurrentActivationMaximization
class TestActivationMaximization():
@pytest.mark.parametrize("scores,expected_error", [
(None, ValueError),
(score_with_tuple, NO_ERROR),
(score_with_list, NO_ERROR),
(CategoricalScore(0), NO_ERROR),
([None], ValueError),
([score_with_tuple], NO_ERROR),
([score_with_list], NO_ERROR),
([CategoricalScore(0)], NO_ERROR),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_score_is_(self, scores, expected_error, conv_model):
activation_maximization = ActivationMaximization(conv_model)
with assert_raises(expected_error):
result = activation_maximization(scores)
assert result.shape == (1, 8, 8, 3)
@pytest.mark.parametrize("seed_input,expected", [
(None, (1, 8, 8, 3)),
(dummy_sample((8, 8, 3)), (1, 8, 8, 3)),
([dummy_sample((8, 8, 3))], [(1, 8, 8, 3)]),
(dummy_sample((1, 8, 8, 3)), (1, 8, 8, 3)),
([dummy_sample((1, 8, 8, 3))], [(1, 8, 8, 3)]),
(dummy_sample((4, 8, 8, 3)), (4, 8, 8, 3)),
([dummy_sample((4, 8, 8, 3))], [(4, 8, 8, 3)]),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_seed_input_is_(self, seed_input, expected, conv_model):
activation_maximization = ActivationMaximization(conv_model)
result = activation_maximization(CategoricalScore(0), seed_input=seed_input)
if type(expected) is list:
assert type(result) == list
result = result[0]
expected = expected[0]
assert result.shape == expected
@pytest.mark.parametrize("input_range,,expected_error", [
(None, NO_ERROR),
((None, None), NO_ERROR),
((0, None), NO_ERROR),
((None, 255), NO_ERROR),
((0, 255), NO_ERROR),
((-1.0, 1.0), NO_ERROR),
((-1.0, 255), TypeError),
((0, 1.0), TypeError),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test_call__if_input_range_is_(self, input_range, expected_error, conv_model):
activation_maximization = ActivationMaximization(conv_model)
with assert_raises(expected_error):
result = activation_maximization(CategoricalScore(0), input_range=input_range)
assert result.shape == (1, 8, 8, 3)
@pytest.mark.parametrize("input_modifiers,expected_error", [
(None, NO_ERROR),
(Jitter(), NO_ERROR),
([], NO_ERROR),
([None], TypeError),
([Jitter()], NO_ERROR),
([Jitter(), None], TypeError),
([None, Jitter()], TypeError),
([Jitter(), Rotate(), Scale()], NO_ERROR),
([[]], NO_ERROR),
([[None]], TypeError),
([[Jitter()]], NO_ERROR),
([[Jitter(), None]], TypeError),
([[None, Jitter()]], TypeError),
([[Jitter(), Rotate(), Scale()]], NO_ERROR),
([[Jitter(), Rotate(), Scale()], [Jitter(), Rotate(), Scale()]], ValueError),
(dict(input_1=None), NO_ERROR),
(dict(input_1=Jitter()), NO_ERROR),
(dict(input_1=[]), NO_ERROR),
(dict(input_1=[None]), TypeError),
(dict(input_1=[Jitter()]), NO_ERROR),
(dict(input_1=[Jitter(), None]), TypeError),
(dict(input_1=[None, Jitter()]), TypeError),
(dict(input_1=[Jitter(), Rotate(), Scale()]), NO_ERROR),
(dict(input_2=[Jitter(), Rotate(), Scale()]), ValueError),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=[Jitter(), Rotate(),
Scale()]), ValueError),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_input_modifiers_are_(self, input_modifiers, expected_error, conv_model):
activation_maximization = ActivationMaximization(conv_model)
with assert_raises(expected_error):
result = activation_maximization(CategoricalScore(0), input_modifiers=input_modifiers)
assert result.shape == (1, 8, 8, 3)
@pytest.mark.parametrize("regularizers,expected_error", [
(None, NO_ERROR),
(TotalVariation2D(), NO_ERROR),
(LegacyTotalVariation2D(), NO_ERROR),
([], NO_ERROR),
([None], TypeError),
([TotalVariation2D()], NO_ERROR),
([LegacyNorm()], NO_ERROR),
([TotalVariation2D(), None], TypeError),
([None, TotalVariation2D()], TypeError),
([TotalVariation2D(), LegacyTotalVariation2D()], ValueError),
([TotalVariation2D(), Norm()], NO_ERROR),
(dict(input_1=None), NO_ERROR),
(dict(input_1=[]), NO_ERROR),
(dict(input_1=TotalVariation2D()), NO_ERROR),
(dict(input_1=LegacyTotalVariation2D()), ValueError),
(dict(input_2=None), ValueError),
(dict(input_2=[]), ValueError),
(dict(input_2=TotalVariation2D()), ValueError),
(dict(input_2=LegacyTotalVariation2D()), ValueError),
(dict(input_1=TotalVariation2D(), input_2=TotalVariation2D()), ValueError),
(dict(input_1=LegacyTotalVariation2D(), input_2=TotalVariation2D()), ValueError),
(dict(input_1=LegacyTotalVariation2D(), input_2=LegacyTotalVariation2D()), ValueError),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_regularizer_is_(self, regularizers, expected_error, conv_model):
activation_maximization = ActivationMaximization(conv_model)
with assert_raises(expected_error):
result = activation_maximization(CategoricalScore(0), regularizers=regularizers)
assert result.shape == (1, 8, 8, 3)
@pytest.mark.parametrize("regularizer_container", [list, tuple, dict])
@pytest.mark.parametrize("regularizers,expected_error", [
([[]], NO_ERROR),
([[None]], TypeError),
([[TotalVariation2D()]], NO_ERROR),
([[LegacyTotalVariation2D()]], ValueError),
([[TotalVariation2D(), None]], TypeError),
([[None, TotalVariation2D()]], TypeError),
([[TotalVariation2D(), Norm()]], NO_ERROR),
([[TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
([[LegacyNorm(), LegacyTotalVariation2D()]], ValueError),
([[], [Norm()]], ValueError),
([[None], [Norm()]], ValueError),
([[TotalVariation2D()], [Norm()]], ValueError),
([[LegacyTotalVariation2D()], [Norm()]], ValueError),
([[TotalVariation2D(), None], [Norm()]], ValueError),
([[None, TotalVariation2D()], [Norm()]], ValueError),
([[TotalVariation2D(), Norm()], [Norm()]], ValueError),
([[TotalVariation2D(), LegacyTotalVariation2D()], [Norm()]], ValueError),
([[Norm()], []], ValueError),
([[Norm()], [None]], ValueError),
([[Norm()], [TotalVariation2D()]], ValueError),
([[Norm()], [LegacyTotalVariation2D()]], ValueError),
([[Norm()], [TotalVariation2D(), None]], ValueError),
([[Norm()], [None, TotalVariation2D()]], ValueError),
([[Norm()], [TotalVariation2D(), Norm()]], ValueError),
([[Norm()], [TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
([[TotalVariation2D(), LegacyTotalVariation2D()], None], ValueError),
([None, [TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_regularizers_are_(self, regularizer_container, regularizers, expected_error,
conv_model):
if regularizer_container is tuple:
regularizers = tuple(regularizers)
if regularizer_container is dict:
regularizers = zip(['input_1', 'input_2'], regularizers)
regularizers = dict(regularizers)
has_legacy = ((isinstance(r, LegacyRegularizer) for r in listify(_regularizers))
for _regularizers in regularizers.values())
if any((any(f) for f in has_legacy)):
expected_error = ValueError
activation_maximization = ActivationMaximization(conv_model)
with assert_raises(expected_error):
result = activation_maximization(CategoricalScore(0), regularizers=regularizers)
assert result.shape == (1, 8, 8, 3)
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__with_gradient_modifier(self, conv_model):
activation_maximization = ActivationMaximization(conv_model)
result = activation_maximization(CategoricalScore(0), gradient_modifier=lambda x: x * 0.0)
assert result.shape == (1, 8, 8, 3)
@pytest.mark.parametrize("is_legacy", [False, True])
@pytest.mark.parametrize("callbacks,expected,expected_error", [
(None, [], NO_ERROR),
(MockCallback(), [True], NO_ERROR),
([], [], NO_ERROR),
([MockCallback()], [True], NO_ERROR),
([MockCallback(), MockCallback()], [True, True], NO_ERROR),
([MockCallback(raise_error_on_begin=True),
MockCallback()], [False, False], ValueError),
([MockCallback(), MockCallback(raise_error_on_begin=True)], [True, False], ValueError),
([MockCallback(raise_error_on_call=True),
MockCallback()], [True, True], ValueError),
([MockCallback(), MockCallback(raise_error_on_call=True)], [True, True], ValueError),
([MockCallback(raise_error_on_end=True),
MockCallback()], [True, True], ValueError),
([MockCallback(raise_error_on_end=True),
MockCallback()], [True, True], ValueError),
([MockCallback(), MockCallback(raise_error_on_end=True)], [True, True], ValueError),
([MockCallback(raise_error_on_end=True),
MockCallback(raise_error_on_end=True)], [True, True], ValueError),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__with_callbacks(self, is_legacy, callbacks, expected, expected_error,
conv_model):
if is_legacy:
if isinstance(callbacks, MockCallback):
callbacks = MockLegacyCallback(callbacks)
if isinstance(callbacks, list):
callbacks = [
MockLegacyCallback(c) if isinstance(c, MockCallback) else c for c in callbacks
]
activation_maximization = ActivationMaximization(conv_model)
try:
result = activation_maximization(CategoricalScore(0), callbacks=callbacks)
assert expected_error == NO_ERROR
assert result.shape == (1, 8, 8, 3)
except ValueError:
assert expected_error != NO_ERROR
finally:
for c, e in zip(listify(callbacks), expected):
if is_legacy:
assert c.callback.on_end_was_called == e
else:
assert c.on_end_was_called == e
@pytest.mark.parametrize("activation_modifiers,modified,expected_error", [
(None, False, NO_ERROR),
(lambda x: np.ones(x.shape, np.float), True, NO_ERROR),
(dict(input_1=None), False, NO_ERROR),
(dict(input_1=lambda x: np.ones(x.shape, np.float)), True, NO_ERROR),
(dict(input_2=lambda x: np.ones(x.shape, np.float)), False, ValueError),
(dict(input_1=lambda x: np.ones(x.shape, np.float),
input_2=lambda x: np.ones(x.shape, np.float)), False, ValueError),
])
@pytest.mark.usefixtures("mixed_precision")
def test__call__with_activation_modifiers(self, activation_modifiers, modified, expected_error,
conv_model):
seed_inputs = dummy_sample((1, 8, 8, 3))
activation_maximization = ActivationMaximization(conv_model)
with assert_raises(expected_error):
result = activation_maximization(CategoricalScore(0), seed_input=seed_inputs)
assert not np.all(result == 0.0)
result = activation_maximization(CategoricalScore(0),
seed_input=seed_inputs,
activation_modifiers=activation_modifiers)
if modified:
assert np.all(result == 1.0)
else:
assert not np.all(result == 1.0)
class TestActivationMaximizationWithMultipleInputsModel():
@pytest.mark.parametrize("scores,expected_error", [
(None, ValueError),
(score_with_tuple, NO_ERROR),
(score_with_list, NO_ERROR),
(CategoricalScore(0), NO_ERROR),
([None], ValueError),
([score_with_tuple], NO_ERROR),
([score_with_list], NO_ERROR),
([CategoricalScore(0)], NO_ERROR),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_score_is_(self, scores, expected_error, multiple_inputs_model):
activation_maximization = ActivationMaximization(multiple_inputs_model)
with assert_raises(expected_error):
result = activation_maximization(scores)
assert result[0].shape == (1, 8, 8, 3)
assert result[1].shape == (1, 10, 10, 3)
@pytest.mark.parametrize("seed_inputs,expected_error", [
(None, NO_ERROR),
(dummy_sample((1, 8, 8, 3)), ValueError),
([dummy_sample((1, 8, 8, 3))], ValueError),
([dummy_sample((1, 8, 8, 3)), None], ValueError),
([None, dummy_sample((1, 10, 10, 3))], ValueError),
([dummy_sample((8, 8, 3)), dummy_sample((10, 10, 3))], NO_ERROR),
([dummy_sample((1, 8, 8, 3)), dummy_sample((10, 10, 3))], NO_ERROR),
([dummy_sample((8, 8, 3)), dummy_sample((1, 10, 10, 3))], NO_ERROR),
([dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], NO_ERROR),
([dummy_sample((4, 8, 8, 3)), dummy_sample((4, 10, 10, 3))], NO_ERROR),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_seed_input_is_(self, seed_inputs, expected_error, multiple_inputs_model):
activation_maximization = ActivationMaximization(multiple_inputs_model)
with assert_raises(expected_error):
result = activation_maximization(CategoricalScore(0), seed_input=seed_inputs)
if seed_inputs is not None and seed_inputs[0].shape[0] == 4:
assert result[0].shape == (4, 8, 8, 3)
assert result[1].shape == (4, 10, 10, 3)
else:
assert result[0].shape == (1, 8, 8, 3)
assert result[1].shape == (1, 10, 10, 3)
@pytest.mark.parametrize("input_modifiers,expected_error", [
(None, NO_ERROR),
(Jitter(), NO_ERROR),
([], NO_ERROR),
([None], TypeError),
([Jitter()], NO_ERROR),
([Jitter(), None], TypeError),
([None, Jitter()], TypeError),
([Jitter(), Rotate(), Scale()], NO_ERROR),
([[]], NO_ERROR),
([[None]], TypeError),
([[Jitter()]], NO_ERROR),
([[Jitter(), None]], TypeError),
([[None, Jitter()]], TypeError),
([[Jitter(), Rotate(), Scale()]], NO_ERROR),
([[Jitter(), Rotate(), Scale()], []], NO_ERROR),
([[Jitter(), Rotate(), Scale()], [None]], TypeError),
([[Jitter(), Rotate(), Scale()], [Jitter()]], NO_ERROR),
([[Jitter(), Rotate(), Scale()], [Jitter(), None]], TypeError),
([[Jitter(), Rotate(), Scale()], [None, Jitter()]], TypeError),
([[Jitter(), Rotate(), Scale()], [Jitter(), Rotate(), Scale()]], NO_ERROR),
(dict(input_1=None), NO_ERROR),
(dict(input_1=Jitter()), NO_ERROR),
(dict(input_1=[]), NO_ERROR),
(dict(input_1=[None]), TypeError),
(dict(input_1=[Jitter()]), NO_ERROR),
(dict(input_1=[Jitter(), None]), TypeError),
(dict(input_1=[None, Jitter()]), TypeError),
(dict(input_1=[Jitter(), Rotate(), Scale()]), NO_ERROR),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=None), NO_ERROR),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=Jitter()), NO_ERROR),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=[]), NO_ERROR),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=[None]), TypeError),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=[Jitter()]), NO_ERROR),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=[Jitter(), None]), TypeError),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=[None, Jitter()]), TypeError),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_input_modifiers_are_(self, input_modifiers, expected_error,
multiple_inputs_model):
activation_maximization = ActivationMaximization(multiple_inputs_model)
with assert_raises(expected_error):
result = activation_maximization(CategoricalScore(0), input_modifiers=input_modifiers)
assert result[0].shape == (1, 8, 8, 3)
assert result[1].shape == (1, 10, 10, 3)
@pytest.mark.parametrize("regularizers,expected_error", [
(None, NO_ERROR),
(TotalVariation2D(), NO_ERROR),
(LegacyTotalVariation2D(), NO_ERROR),
([], NO_ERROR),
([None], TypeError),
([TotalVariation2D()], NO_ERROR),
([LegacyTotalVariation2D()], NO_ERROR),
([TotalVariation2D(), None], TypeError),
([None, TotalVariation2D()], TypeError),
([TotalVariation2D(), LegacyTotalVariation2D()], ValueError),
([TotalVariation2D(), Norm()], NO_ERROR),
([LegacyTotalVariation2D(), LegacyNorm()], NO_ERROR),
(dict(input_1=None), NO_ERROR),
(dict(input_1=[]), NO_ERROR),
(dict(input_1=TotalVariation2D()), NO_ERROR),
(dict(input_1=LegacyTotalVariation2D()), ValueError),
(dict(input_2=None), NO_ERROR),
(dict(input_2=[]), NO_ERROR),
(dict(input_2=TotalVariation2D()), NO_ERROR),
(dict(input_2=LegacyTotalVariation2D()), ValueError),
(dict(input_3=None), ValueError),
(dict(input_3=[]), ValueError),
(dict(input_3=TotalVariation2D()), ValueError),
(dict(input_3=LegacyTotalVariation2D()), ValueError),
(dict(input_1=TotalVariation2D(), input_2=TotalVariation2D()), NO_ERROR),
(dict(input_1=LegacyTotalVariation2D(), input_2=TotalVariation2D()), ValueError),
(dict(input_1=LegacyTotalVariation2D(), input_2=LegacyTotalVariation2D()), ValueError),
(dict(input_1=TotalVariation2D(), input_2=TotalVariation2D(),
input_3=TotalVariation2D()), ValueError),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_regularizer_is_(self, regularizers, expected_error, multiple_inputs_model):
activation_maximization = ActivationMaximization(multiple_inputs_model)
with assert_raises(expected_error):
result = activation_maximization(CategoricalScore(0), regularizers=regularizers)
assert result[0].shape == (1, 8, 8, 3)
assert result[1].shape == (1, 10, 10, 3)
@pytest.mark.parametrize("regularizer_container", [list, tuple, dict])
@pytest.mark.parametrize("regularizers,expected_error", [
([[]], NO_ERROR),
([[None]], TypeError),
([[TotalVariation2D()]], NO_ERROR),
([[LegacyTotalVariation2D()]], ValueError),
([[TotalVariation2D(), None]], TypeError),
([[None, TotalVariation2D()]], TypeError),
([[TotalVariation2D(), Norm()]], NO_ERROR),
([[TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
([[LegacyNorm(), LegacyTotalVariation2D()]], ValueError),
([[], [Norm()]], NO_ERROR),
([[None], [Norm()]], TypeError),
([[TotalVariation2D()], [Norm()]], NO_ERROR),
([[LegacyTotalVariation2D()], [Norm()]], ValueError),
([[TotalVariation2D(), None], [Norm()]], TypeError),
([[None, TotalVariation2D()], [Norm()]], TypeError),
([[TotalVariation2D(), Norm()], [Norm()]], NO_ERROR),
([[TotalVariation2D(), LegacyTotalVariation2D()], [Norm()]], ValueError),
([[Norm()], []], NO_ERROR),
([[Norm()], [None]], TypeError),
([[Norm()], [TotalVariation2D()]], NO_ERROR),
([[Norm()], [LegacyTotalVariation2D()]], ValueError),
([[Norm()], [TotalVariation2D(), None]], TypeError),
([[Norm()], [None, TotalVariation2D()]], TypeError),
([[Norm()], [TotalVariation2D(), Norm()]], NO_ERROR),
([[Norm()], [TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
([[TotalVariation2D(), LegacyTotalVariation2D()], None], ValueError),
([None, [TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
([[Norm()], [Norm()], []], ValueError),
([[Norm()], [Norm()], [None]], ValueError),
([[Norm()], [Norm()], [TotalVariation2D()]], ValueError),
([[Norm()], [Norm()], [LegacyTotalVariation2D()]], ValueError),
([[Norm()], [Norm()], [TotalVariation2D(), None]], ValueError),
([[Norm()], [Norm()], [None, TotalVariation2D()]], ValueError),
([[Norm()], [Norm()], [TotalVariation2D(), Norm()]], ValueError),
([[Norm()], [Norm()], [TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
([[Norm()], [TotalVariation2D(), LegacyTotalVariation2D()], None], ValueError),
([None, [Norm()], [TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_regularizers_are_(self, regularizer_container, regularizers, expected_error,
multiple_inputs_model):
if regularizer_container is tuple:
regularizers = tuple(regularizers)
if regularizer_container is dict:
regularizers = zip(['input_1', 'input_2', 'input_3'], regularizers)
regularizers = dict(regularizers)
has_legacy = ((isinstance(r, LegacyRegularizer) for r in listify(_regularizers))
for _regularizers in regularizers.values())
if any((any(f) for f in has_legacy)):
expected_error = ValueError
activation_maximization = ActivationMaximization(multiple_inputs_model)
with assert_raises(expected_error):
result = activation_maximization(CategoricalScore(0), regularizers=regularizers)
assert result[0].shape == (1, 8, 8, 3)
assert result[1].shape == (1, 10, 10, 3)
@pytest.mark.parametrize("activation_modifiers,modified_0,modified_1,expected_error", [
(None, False, False, NO_ERROR),
(lambda x: np.ones(x.shape, np.float), True, False, NO_ERROR),
(dict(input_1=None), False, False, NO_ERROR),
(dict(input_2=None), False, False, NO_ERROR),
(dict(input_1=None, input_2=None), False, False, NO_ERROR),
(dict(input_1=lambda x: np.ones(x.shape, np.float)), True, False, NO_ERROR),
(dict(input_2=lambda x: np.ones(x.shape, np.float)), False, True, NO_ERROR),
(dict(input_1=lambda x: np.ones(x.shape, np.float), input_2=None), True, False, NO_ERROR),
(dict(input_1=None, input_2=lambda x: np.ones(x.shape, np.float)), False, True, NO_ERROR),
(dict(input_1=lambda x: np.ones(x.shape, np.float),
input_2=lambda x: np.ones(x.shape, np.float)), True, True, NO_ERROR),
(dict(input_1=None, input_2=None,
input_3=lambda x: np.ones(x.shape, np.float)), False, False, ValueError),
])
@pytest.mark.usefixtures("mixed_precision")
def test__call__with_activation_modifiers(self, activation_modifiers, modified_0, modified_1,
expected_error, multiple_inputs_model):
seed_inputs = [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))]
activation_maximization = ActivationMaximization(multiple_inputs_model)
with assert_raises(expected_error):
result = activation_maximization(CategoricalScore(0), seed_input=seed_inputs)
assert not np.all(result[0] == 1.0)
assert not np.all(result[1] == 1.0)
result = activation_maximization(CategoricalScore(0),
seed_input=seed_inputs,
activation_modifiers=activation_modifiers)
if modified_0:
assert np.all(result[0] == 1.0)
else:
assert not np.all(result[0] == 1.0)
if modified_1:
assert np.all(result[1] == 1.0)
else:
assert not np.all(result[1] == 1.0)
class TestActivationMaximizationWithMultipleOutputsModel():
@pytest.mark.parametrize("scores,expected_error", [
(None, ValueError),
(score_with_tuple, ValueError),
(score_with_list, ValueError),
(CategoricalScore(0), ValueError),
([None], ValueError),
([score_with_tuple], ValueError),
([score_with_list], ValueError),
([CategoricalScore(0)], ValueError),
([CategoricalScore(0), None], ValueError),
([CategoricalScore(0), score_with_tuple], NO_ERROR),
([CategoricalScore(0), score_with_list], NO_ERROR),
([CategoricalScore(0), BinaryScore(False)], NO_ERROR),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_score_is_(self, scores, expected_error, multiple_outputs_model):
activation_maximization = ActivationMaximization(multiple_outputs_model)
with assert_raises(expected_error):
result = activation_maximization(scores)
assert result.shape == (1, 8, 8, 3)
@pytest.mark.parametrize("seed_input,expected", [
(None, (1, 8, 8, 3)),
(dummy_sample((8, 8, 3)), (1, 8, 8, 3)),
([dummy_sample((8, 8, 3))], [(1, 8, 8, 3)]),
(dummy_sample((1, 8, 8, 3)), (1, 8, 8, 3)),
([dummy_sample((1, 8, 8, 3))], [(1, 8, 8, 3)]),
(dummy_sample((4, 8, 8, 3)), (4, 8, 8, 3)),
([dummy_sample((4, 8, 8, 3))], [(4, 8, 8, 3)]),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_seed_input_is_(self, seed_input, expected, multiple_outputs_model):
activation_maximization = ActivationMaximization(multiple_outputs_model)
result = activation_maximization(
[CategoricalScore(1), BinaryScore(False)], seed_input=seed_input)
if type(expected) is list:
assert type(result) == list
result = result[0]
expected = expected[0]
assert result.shape == expected
class TestActivationMaximizationWithMultipleIOModel():
@pytest.mark.parametrize("scores,expected_error", [
(None, ValueError),
(score_with_tuple, ValueError),
(score_with_list, ValueError),
(CategoricalScore(0), ValueError),
([None], ValueError),
([score_with_tuple], ValueError),
([score_with_list], ValueError),
([CategoricalScore(0)], ValueError),
([CategoricalScore(0), None], ValueError),
([CategoricalScore(0), score_with_tuple], NO_ERROR),
([CategoricalScore(0), score_with_list], NO_ERROR),
([CategoricalScore(0), BinaryScore(False)], NO_ERROR),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_score_is_(self, scores, expected_error, multiple_io_model):
activation_maximization = ActivationMaximization(multiple_io_model)
with assert_raises(expected_error):
result = activation_maximization(scores)
assert result[0].shape == (1, 8, 8, 3)
assert result[1].shape == (1, 10, 10, 3)
@pytest.mark.parametrize("seed_inputs,expected_error", [
(None, NO_ERROR),
(dummy_sample((1, 8, 8, 3)), ValueError),
([dummy_sample((1, 8, 8, 3))], ValueError),
([dummy_sample((1, 8, 8, 3)), None], ValueError),
([None, dummy_sample((1, 10, 10, 3))], ValueError),
([dummy_sample((8, 8, 3)), dummy_sample((10, 10, 3))], NO_ERROR),
([dummy_sample((1, 8, 8, 3)), dummy_sample((10, 10, 3))], NO_ERROR),
([dummy_sample((8, 8, 3)), dummy_sample((1, 10, 10, 3))], NO_ERROR),
([dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], NO_ERROR),
([dummy_sample((4, 8, 8, 3)), dummy_sample((4, 10, 10, 3))], NO_ERROR),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_seed_input_is_(self, seed_inputs, expected_error, multiple_io_model):
activation_maximization = ActivationMaximization(multiple_io_model)
with assert_raises(expected_error):
result = activation_maximization(
[CategoricalScore(1), BinaryScore(True)], seed_input=seed_inputs)
if seed_inputs is not None and seed_inputs[0].shape[0] == 4:
assert result[0].shape == (4, 8, 8, 3)
assert result[1].shape == (4, 10, 10, 3)
else:
assert result[0].shape == (1, 8, 8, 3)
assert result[1].shape == (1, 10, 10, 3)
@pytest.mark.parametrize("input_modifiers,expected_error", [
(None, NO_ERROR),
(Jitter(), NO_ERROR),
([], NO_ERROR),
([None], TypeError),
([Jitter()], NO_ERROR),
([Jitter(), None], TypeError),
([None, Jitter()], TypeError),
([Jitter(), Rotate(), Scale()], NO_ERROR),
([[]], NO_ERROR),
([[None]], TypeError),
([[Jitter()]], NO_ERROR),
([[Jitter(), None]], TypeError),
([[None, Jitter()]], TypeError),
([[Jitter(), Rotate(), Scale()]], NO_ERROR),
([[Jitter(), Rotate(), Scale()], []], NO_ERROR),
([[Jitter(), Rotate(), Scale()], [None]], TypeError),
([[Jitter(), Rotate(), Scale()], [Jitter()]], NO_ERROR),
([[Jitter(), Rotate(), Scale()], [Jitter(), None]], TypeError),
([[Jitter(), Rotate(), Scale()], [None, Jitter()]], TypeError),
([[Jitter(), Rotate(), Scale()], [Jitter(), Rotate(), Scale()]], NO_ERROR),
(dict(input_1=None), NO_ERROR),
(dict(input_1=Jitter()), NO_ERROR),
(dict(input_1=[]), NO_ERROR),
(dict(input_1=[None]), TypeError),
(dict(input_1=[Jitter()]), NO_ERROR),
(dict(input_1=[Jitter(), None]), TypeError),
(dict(input_1=[None, Jitter()]), TypeError),
(dict(input_1=[Jitter(), Rotate(), Scale()]), NO_ERROR),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=None), NO_ERROR),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=Jitter()), NO_ERROR),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=[]), NO_ERROR),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=[None]), TypeError),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=[Jitter()]), NO_ERROR),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=[Jitter(), None]), TypeError),
(dict(input_1=[Jitter(), Rotate(), Scale()], input_2=[None, Jitter()]), TypeError),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_input_modifiers_are_(self, input_modifiers, expected_error,
multiple_io_model):
activation_maximization = ActivationMaximization(multiple_io_model)
with assert_raises(expected_error):
result = activation_maximization(
[CategoricalScore(1), BinaryScore(True)], input_modifiers=input_modifiers)
assert result[0].shape == (1, 8, 8, 3)
assert result[1].shape == (1, 10, 10, 3)
@pytest.mark.parametrize("regularizers,expected_error", [
(None, NO_ERROR),
(TotalVariation2D(), NO_ERROR),
(LegacyTotalVariation2D(), NO_ERROR),
([], NO_ERROR),
([None], TypeError),
([TotalVariation2D()], NO_ERROR),
([LegacyTotalVariation2D()], NO_ERROR),
([TotalVariation2D(), None], TypeError),
([None, TotalVariation2D()], TypeError),
([TotalVariation2D(), LegacyTotalVariation2D()], ValueError),
([TotalVariation2D(), Norm()], NO_ERROR),
([LegacyTotalVariation2D(), LegacyNorm()], NO_ERROR),
(dict(input_1=None), NO_ERROR),
(dict(input_1=[]), NO_ERROR),
(dict(input_1=TotalVariation2D()), NO_ERROR),
(dict(input_1=LegacyTotalVariation2D()), ValueError),
(dict(input_2=None), NO_ERROR),
(dict(input_2=[]), NO_ERROR),
(dict(input_2=TotalVariation2D()), NO_ERROR),
(dict(input_2=LegacyTotalVariation2D()), ValueError),
(dict(input_3=None), ValueError),
(dict(input_3=[]), ValueError),
(dict(input_3=TotalVariation2D()), ValueError),
(dict(input_3=LegacyTotalVariation2D()), ValueError),
(dict(input_1=TotalVariation2D(), input_2=TotalVariation2D()), NO_ERROR),
(dict(input_1=LegacyTotalVariation2D(), input_2=TotalVariation2D()), ValueError),
(dict(input_1=LegacyTotalVariation2D(), input_2=LegacyTotalVariation2D()), ValueError),
(dict(input_1=TotalVariation2D(), input_2=TotalVariation2D(),
input_3=TotalVariation2D()), ValueError),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_regularizer_is_(self, regularizers, expected_error, multiple_io_model):
activation_maximization = ActivationMaximization(multiple_io_model)
with assert_raises(expected_error):
result = activation_maximization(
[CategoricalScore(0), BinaryScore(True)], regularizers=regularizers)
assert result[0].shape == (1, 8, 8, 3)
assert result[1].shape == (1, 10, 10, 3)
@pytest.mark.parametrize("regularizer_container", [list, tuple, dict])
@pytest.mark.parametrize("regularizers,expected_error", [
([[]], NO_ERROR),
([[None]], TypeError),
([[TotalVariation2D()]], NO_ERROR),
([[LegacyTotalVariation2D()]], ValueError),
([[TotalVariation2D(), None]], TypeError),
([[None, TotalVariation2D()]], TypeError),
([[TotalVariation2D(), Norm()]], NO_ERROR),
([[TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
([[LegacyNorm(), LegacyTotalVariation2D()]], ValueError),
([[], [Norm()]], NO_ERROR),
([[None], [Norm()]], TypeError),
([[TotalVariation2D()], [Norm()]], NO_ERROR),
([[LegacyTotalVariation2D()], [Norm()]], ValueError),
([[TotalVariation2D(), None], [Norm()]], TypeError),
([[None, TotalVariation2D()], [Norm()]], TypeError),
([[TotalVariation2D(), Norm()], [Norm()]], NO_ERROR),
([[TotalVariation2D(), LegacyTotalVariation2D()], [Norm()]], ValueError),
([[Norm()], []], NO_ERROR),
([[Norm()], [None]], TypeError),
([[Norm()], [TotalVariation2D()]], NO_ERROR),
([[Norm()], [LegacyTotalVariation2D()]], ValueError),
([[Norm()], [TotalVariation2D(), None]], TypeError),
([[Norm()], [None, TotalVariation2D()]], TypeError),
([[Norm()], [TotalVariation2D(), Norm()]], NO_ERROR),
([[Norm()], [TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
([[TotalVariation2D(), LegacyTotalVariation2D()], None], ValueError),
([None, [TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
([[Norm()], [Norm()], []], ValueError),
([[Norm()], [Norm()], [None]], ValueError),
([[Norm()], [Norm()], [TotalVariation2D()]], ValueError),
([[Norm()], [Norm()], [LegacyTotalVariation2D()]], ValueError),
([[Norm()], [Norm()], [TotalVariation2D(), None]], ValueError),
([[Norm()], [Norm()], [None, TotalVariation2D()]], ValueError),
([[Norm()], [Norm()], [TotalVariation2D(), Norm()]], ValueError),
([[Norm()], [Norm()], [TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
([[Norm()], [TotalVariation2D(), LegacyTotalVariation2D()], None], ValueError),
([None, [Norm()], [TotalVariation2D(), LegacyTotalVariation2D()]], ValueError),
])
@pytest.mark.usefixtures("mixed_precision", "legacy")
def test__call__if_regularizers_are_(self, regularizer_container, regularizers, expected_error,
multiple_io_model):
if regularizer_container is tuple:
regularizers = tuple(regularizers)
if regularizer_container is dict:
regularizers = zip(['input_1', 'input_2', 'input_3'], regularizers)
regularizers = dict(regularizers)
has_legacy = ((isinstance(r, LegacyRegularizer) for r in listify(_regularizers))
for _regularizers in regularizers.values())
if any((any(f) for f in has_legacy)):
expected_error = ValueError
activation_maximization = ActivationMaximization(multiple_io_model)
with assert_raises(expected_error):
result = activation_maximization(
[CategoricalScore(0), BinaryScore(True)], regularizers=regularizers)
assert result[0].shape == (1, 8, 8, 3)
assert result[1].shape == (1, 10, 10, 3)
@pytest.mark.parametrize("activation_modifiers,modified_0,modified_1,expected_error", [
(None, False, False, NO_ERROR),
(lambda x: np.ones(x.shape, np.float), True, False, NO_ERROR),
(dict(input_1=None), False, False, NO_ERROR),
(dict(input_2=None), False, False, NO_ERROR),
(dict(input_1=None, input_2=None), False, False, NO_ERROR),
(dict(input_1=lambda x: np.ones(x.shape, np.float)), True, False, NO_ERROR),
(dict(input_2=lambda x: np.ones(x.shape, np.float)), False, True, NO_ERROR),
(dict(input_1=lambda x: np.ones(x.shape, np.float), input_2=None), True, False, NO_ERROR),
(dict(input_1=None, input_2=lambda x: np.ones(x.shape, np.float)), False, True, NO_ERROR),
(dict(input_1=lambda x: np.ones(x.shape, np.float),
input_2=lambda x:
|
np.ones(x.shape, np.float)
|
numpy.ones
|
# Copyright (c) 2016-2018 The Regents of the University of Michigan
# This file is part of the General Simulation Data (GSD) project, released under the BSD 2-Clause License.
""" hoomd schema reference implementation
The main package :py:mod:`gsd.hoomd` is a reference implementation of the
GSD schema ``hoomd``. It is a simple, but high performance and memory
efficient, reader and writer for the schema. See :ref:`hoomd-examples`
for full examples.
* :py:func:`create` - Create a hoomd schema GSD file (deprecated).
* :py:func:`open` - Open a hoomd schema GSD file.
* :py:class:`HOOMDTrajectory` - Read and write hoomd schema GSD files.
* :py:class:`Snapshot` - Store the state of a single frame.
* :py:class:`ConfigurationData` - Store configuration data in a snapshot.
* :py:class:`ParticleData` - Store particle data in a snapshot.
* :py:class:`BondData` - Store topology data in a snapshot.
"""
import numpy
from collections import OrderedDict
import logging
try:
from gsd import fl
except ImportError:
fl = None;
try:
import gsd
except ImportError:
gsd = None;
logger = logging.getLogger('gsd.hoomd')
class ConfigurationData(object):
""" Store configuration data.
Users should not need to instantiate this class. Use the ``configuration``
attribute of a :py:class:`Snapshot`.
Attributes:
step (int): Time step of this frame (:chunk:`configuration/step`).
dimensions (int): Number of dimensions (:chunk:`configuration/dimensions`).
box (numpy.ndarray[float, ndim=1, mode='c']): Box dimensions (:chunk:`configuration/box`)
- [lx, ly, lz, xy, xz, yz].
"""
_default_value = OrderedDict();
_default_value['step'] = numpy.uint64(0);
_default_value['dimensions'] = numpy.uint8(3);
_default_value['box'] = numpy.array([1,1,1,0,0,0], dtype=numpy.float32);
def __init__(self):
self.step = None;
self.dimensions = None;
self.box = None;
def validate(self):
""" Validate all attributes.
First, convert every array attribute to a numpy array of the
proper type. Then validate that all attributes have the correct
dimensions.
Ignore any attributes that are ``None``.
Warning:
Array attributes that are not contiguous numpy arrays will
be replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating ConfigurationData');
if self.box is not None:
self.box = numpy.ascontiguousarray(self.box, dtype=numpy.float32);
self.box = self.box.reshape([6,])
class ParticleData(object):
""" Store particle data chunks.
Users should not need to instantiate this class. Use the ``particles``
attribute of a :py:class:`Snapshot`.
Instances resulting from file read operations will always store per particle
quantities in numpy arrays of the defined types. User created snapshots can
provide input data as python lists, tuples, numpy arrays of different types,
etc... Such input elements will be converted to the appropriate array type
by :py:meth:`validate()` which is called when writing a frame.
Attributes:
N (int): Number of particles in the snapshot (:chunk:`particles/N`).
types (list[str]): Names of the particle types (:chunk:`particles/types`).
position (numpy.ndarray[float, ndim=2, mode='c']): Nx3 array defining particle position (:chunk:`particles/position`).
orientation (numpy.ndarray[float, ndim=2, mode='c']): Nx4 array defining particle position (:chunk:`particles/orientation`).
typeid (numpy.ndarray[uint32, ndim=1, mode='c']): N length array defining particle type ids (:chunk:`particles/typeid`).
mass (numpy.ndarray[float, ndim=1, mode='c']): N length array defining particle masses (:chunk:`particles/mass`).
charge (numpy.ndarray[float, ndim=1, mode='c']): N length array defining particle charges (:chunk:`particles/charge`).
diameter (numpy.ndarray[float, ndim=1, mode='c']): N length array defining particle diameters (:chunk:`particles/diameter`).
body (numpy.ndarray[int32, ndim=1, mode='c']): N length array defining particle bodies (:chunk:`particles/body`).
moment_inertia (numpy.ndarray[float, ndim=2, mode='c']): Nx3 array defining particle moments of inertia (:chunk:`particles/moment_inertia`).
velocity (numpy.ndarray[float, ndim=2, mode='c']): Nx3 array defining particle velocities (:chunk:`particles/velocity`).
angmom (numpy.ndarray[float, ndim=2, mode='c']): Nx4 array defining particle angular momenta (:chunk:`particles/angmom`).
image (numpy.ndarray[int32, ndim=2, mode='c']): Nx3 array defining particle images (:chunk:`particles/image`).
"""
_default_value = OrderedDict();
_default_value['N'] = numpy.uint32(0);
_default_value['types'] = ['A'];
_default_value['typeid'] = numpy.uint32(0);
_default_value['mass'] = numpy.float32(1.0);
_default_value['charge'] = numpy.float32(0);
_default_value['diameter'] = numpy.float32(1.0);
_default_value['body'] = numpy.int32(-1);
_default_value['moment_inertia'] = numpy.array([0,0,0], dtype=numpy.float32);
_default_value['position'] = numpy.array([0,0,0], dtype=numpy.float32);
_default_value['orientation'] = numpy.array([1,0,0,0], dtype=numpy.float32);
_default_value['velocity'] = numpy.array([0,0,0], dtype=numpy.float32);
_default_value['angmom'] = numpy.array([0,0,0,0], dtype=numpy.float32);
_default_value['image'] = numpy.array([0,0,0], dtype=numpy.int32);
def __init__(self):
self.N = 0;
self.position = None;
self.orientation = None;
self.types = None;
self.typeid = None;
self.mass = None;
self.charge = None;
self.diameter = None;
self.body = None;
self.moment_inertia = None;
self.velocity = None;
self.angmom = None;
self.image = None;
def validate(self):
""" Validate all attributes.
First, convert every per particle attribute to a numpy array of the
proper type. Then validate that all attributes have the correct
dimensions.
Ignore any attributes that are ``None``.
Warning:
Per particle attributes that are not contiguous numpy arrays will
be replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating ParticleData');
if self.position is not None:
self.position = numpy.ascontiguousarray(self.position, dtype=numpy.float32);
self.position = self.position.reshape([self.N, 3])
if self.orientation is not None:
self.orientation = numpy.ascontiguousarray(self.orientation, dtype=numpy.float32);
self.orientation = self.orientation.reshape([self.N, 4])
if self.typeid is not None:
self.typeid = numpy.ascontiguousarray(self.typeid, dtype=numpy.uint32);
self.typeid = self.typeid.reshape([self.N])
if self.mass is not None:
self.mass = numpy.ascontiguousarray(self.mass, dtype=numpy.float32);
self.mass = self.mass.reshape([self.N])
if self.charge is not None:
self.charge = numpy.ascontiguousarray(self.charge, dtype=numpy.float32);
self.charge = self.charge.reshape([self.N])
if self.diameter is not None:
self.diameter = numpy.ascontiguousarray(self.diameter, dtype=numpy.float32);
self.diameter = self.diameter.reshape([self.N])
if self.body is not None:
self.body = numpy.ascontiguousarray(self.body, dtype=numpy.int32);
self.body = self.body.reshape([self.N])
if self.moment_inertia is not None:
self.moment_inertia = numpy.ascontiguousarray(self.moment_inertia, dtype=numpy.float32);
self.moment_inertia = self.moment_inertia.reshape([self.N, 3]);
if self.velocity is not None:
self.velocity = numpy.ascontiguousarray(self.velocity, dtype=numpy.float32);
self.velocity = self.velocity.reshape([self.N, 3]);
if self.angmom is not None:
self.angmom = numpy.ascontiguousarray(self.angmom, dtype=numpy.float32);
self.angmom = self.angmom.reshape([self.N, 4]);
if self.image is not None:
self.image = numpy.ascontiguousarray(self.image, dtype=numpy.int32);
self.image = self.image.reshape([self.N, 3]);
class BondData(object):
""" Store bond data chunks.
Users should not need to instantiate this class. Use the ``bonds``,
``angles``, ``dihedrals``, or ``impropers`` attribute of a :py:class:`Snapshot`.
Instances resulting from file read operations will always store per bond
quantities in numpy arrays of the defined types. User created snapshots can
provide input data as python lists, tuples, numpy arrays of different types,
etc... Such input elements will be converted to the appropriate array type
by :py:meth:`validate()` which is called when writing a frame.
Note:
*M* varies depending on the type of bond. The same python class represents all types of bonds.
======== ===
Type *M*
======== ===
Bond 2
Angle 3
Dihedral 4
Improper 4
======== ===
Attributes:
N (int): Number of particles in the snapshot (:chunk:`bonds/N`, :chunk:`angles/N`, :chunk:`dihedrals/N`, :chunk:`impropers/N`, :chunk:`pairs/N`).
types (list[str]): Names of the particle types (:chunk:`bonds/types`, :chunk:`angles/types`, :chunk:`dihedrals/types`, :chunk:`impropers/types`, :chunk:`pairs/types`).
typeid (numpy.ndarray[uint32, ndim=1, mode='c']): N length array defining bond type ids (:chunk:`bonds/typeid`, :chunk:`angles/typeid`, :chunk:`dihedrals/typeid`, :chunk:`impropers/typeid`, :chunk:`pairs/types`).
group (numpy.ndarray[uint32, ndim=2, mode='c']): NxM array defining tags in the particle bonds (:chunk:`bonds/group`, :chunk:`angles/group`, :chunk:`dihedrals/group`, :chunk:`impropers/group`, :chunk:`pairs/group`).
"""
def __init__(self, M):
self.M = M;
self.N = 0;
self.types = None;
self.typeid = None;
self.group = None;
self._default_value = OrderedDict();
self._default_value['N'] = numpy.uint32(0);
self._default_value['types'] = [];
self._default_value['typeid'] = numpy.uint32(0);
self._default_value['group'] = numpy.array([0]*M, dtype=numpy.int32);
def validate(self):
""" Validate all attributes.
First, convert every per bond attribute to a numpy array of the
proper type. Then validate that all attributes have the correct
dimensions.
Ignore any attributes that are ``None``.
Warning:
Per bond attributes that are not contiguous numpy arrays will
be replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating BondData');
if self.typeid is not None:
self.typeid = numpy.ascontiguousarray(self.typeid, dtype=numpy.uint32);
self.typeid = self.typeid.reshape([self.N])
if self.group is not None:
self.group = numpy.ascontiguousarray(self.group, dtype=numpy.int32);
self.group = self.group.reshape([self.N, self.M]);
class ConstraintData(object):
""" Store constraint data chunks.
Users should not need to instantiate this class. Use the ``constraints``,
attribute of a :py:class:`Snapshot`.
Instances resulting from file read operations will always store per constraint
quantities in numpy arrays of the defined types. User created snapshots can
provide input data as python lists, tuples, numpy arrays of different types,
etc... Such input elements will be converted to the appropriate array type
by :py:meth:`validate()` which is called when writing a frame.
Attributes:
N (int): Number of particles in the snapshot (:chunk:`constraints/N`).
value (numpy.ndarray[float32, ndim=1, mode='c']): N length array defining constraint lengths (:chunk:`constraints/value`).
group (numpy.ndarray[uint32, ndim=2, mode='c']): Nx2 array defining tags in the particle constraints (:chunk:`constraints/group`).
"""
def __init__(self):
self.M = 2;
self.N = 0;
self.value = None;
self.group = None;
self._default_value = OrderedDict();
self._default_value['N'] = numpy.uint32(0);
self._default_value['value'] = numpy.float32(0);
self._default_value['group'] = numpy.array([0]*self.M, dtype=numpy.int32);
def validate(self):
""" Validate all attributes.
First, convert every per constraint attribute to a numpy array of the
proper type. Then validate that all attributes have the correct
dimensions.
Ignore any attributes that are ``None``.
Warning:
Per bond attributes that are not contiguous numpy arrays will
be replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating ConstraintData');
if self.value is not None:
self.value = numpy.ascontiguousarray(self.value, dtype=numpy.float32);
self.value = self.value.reshape([self.N])
if self.group is not None:
self.group = numpy.ascontiguousarray(self.group, dtype=numpy.int32);
self.group = self.group.reshape([self.N, self.M]);
class Snapshot(object):
""" Top level snapshot container.
Attributes:
configuration (:py:class:`ConfigurationData`): Configuration data.
particles (:py:class:`ParticleData`): Particle data snapshot.
bonds (:py:class:`BondData`): Bond data snapshot.
angles (:py:class:`BondData`): Angle data snapshot.
dihedrals (:py:class:`BondData`): Dihedral data snapshot.
impropers (:py:class:`BondData`): Improper data snapshot.
pairs (:py:class: `BondData`): Special pair interactions snapshot
state (dict): Dictionary containing state data
See the HOOMD schema specification for details on entries in the state dictionary. Entries in this dict are the
chunk name without the state prefix. For example, :chunk:`state/hpmc/sphere/radius` is stored in the dictionary
entry ``state['hpmc/sphere/radius']``.
"""
def __init__(self):
self.configuration = ConfigurationData();
self.particles = ParticleData();
self.bonds = BondData(2);
self.angles = BondData(3);
self.dihedrals = BondData(4);
self.impropers = BondData(4);
self.constraints = ConstraintData();
self.pairs = BondData(2);
self.state = {}
self._valid_state = ['hpmc/integrate/d',
'hpmc/integrate/a',
'hpmc/sphere/radius',
'hpmc/ellipsoid/a',
'hpmc/ellipsoid/b',
'hpmc/ellipsoid/c',
'hpmc/convex_polyhedron/N',
'hpmc/convex_polyhedron/vertices',
'hpmc/convex_spheropolyhedron/N',
'hpmc/convex_spheropolyhedron/vertices',
'hpmc/convex_spheropolyhedron/sweep_radius',
'hpmc/convex_polygon/N',
'hpmc/convex_polygon/vertices',
'hpmc/convex_spheropolygon/N',
'hpmc/convex_spheropolygon/vertices',
'hpmc/convex_spheropolygon/sweep_radius',
'hpmc/simple_polygon/N',
'hpmc/simple_polygon/vertices']
def validate(self):
""" Validate all contained snapshot data.
"""
logger.debug('Validating Snapshot');
self.configuration.validate();
self.particles.validate();
self.bonds.validate();
self.angles.validate();
self.dihedrals.validate();
self.impropers.validate();
self.constraints.validate();
self.pairs.validate();
# validate HPMC state
if self.particles.types is not None:
NT = len(self.particles.types)
else:
NT = 1;
if 'hpmc/integrate/d' in self.state:
self.state['hpmc/integrate/d'] = numpy.ascontiguousarray(self.state['hpmc/integrate/d'], dtype=numpy.float64);
self.state['hpmc/integrate/d'] = self.state['hpmc/integrate/d'].reshape([1])
if 'hpmc/integrate/a' in self.state:
self.state['hpmc/integrate/a'] = numpy.ascontiguousarray(self.state['hpmc/integrate/a'], dtype=numpy.float64);
self.state['hpmc/integrate/a'] = self.state['hpmc/integrate/a'].reshape([1])
if 'hpmc/sphere/radius' in self.state:
self.state['hpmc/sphere/radius'] = numpy.ascontiguousarray(self.state['hpmc/sphere/radius'], dtype=numpy.float32);
self.state['hpmc/sphere/radius'] = self.state['hpmc/sphere/radius'].reshape([NT])
if 'hpmc/ellipsoid/a' in self.state:
self.state['hpmc/ellipsoid/a'] = numpy.ascontiguousarray(self.state['hpmc/ellipsoid/a'], dtype=numpy.float32);
self.state['hpmc/ellipsoid/a'] = self.state['hpmc/ellipsoid/a'].reshape([NT])
self.state['hpmc/ellipsoid/b'] = numpy.ascontiguousarray(self.state['hpmc/ellipsoid/b'], dtype=numpy.float32);
self.state['hpmc/ellipsoid/b'] = self.state['hpmc/ellipsoid/b'].reshape([NT])
self.state['hpmc/ellipsoid/c'] = numpy.ascontiguousarray(self.state['hpmc/ellipsoid/c'], dtype=numpy.float32);
self.state['hpmc/ellipsoid/c'] = self.state['hpmc/ellipsoid/c'].reshape([NT])
if 'hpmc/convex_polyhedron/N' in self.state:
self.state['hpmc/convex_polyhedron/N'] = numpy.ascontiguousarray(self.state['hpmc/convex_polyhedron/N'], dtype=numpy.uint32);
self.state['hpmc/convex_polyhedron/N'] = self.state['hpmc/convex_polyhedron/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_polyhedron/N'])
self.state['hpmc/convex_polyhedron/vertices'] = numpy.ascontiguousarray(self.state['hpmc/convex_polyhedron/vertices'], dtype=numpy.float32);
self.state['hpmc/convex_polyhedron/vertices'] = self.state['hpmc/convex_polyhedron/vertices'].reshape([sumN, 3])
if 'hpmc/convex_spheropolyhedron/N' in self.state:
self.state['hpmc/convex_spheropolyhedron/N'] = numpy.ascontiguousarray(self.state['hpmc/convex_spheropolyhedron/N'], dtype=numpy.uint32);
self.state['hpmc/convex_spheropolyhedron/N'] = self.state['hpmc/convex_spheropolyhedron/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_spheropolyhedron/N'])
self.state['hpmc/convex_spheropolyhedron/sweep_radius'] = numpy.ascontiguousarray(self.state['hpmc/convex_spheropolyhedron/sweep_radius'], dtype=numpy.float32);
self.state['hpmc/convex_spheropolyhedron/sweep_radius'] = self.state['hpmc/convex_spheropolyhedron/sweep_radius'].reshape([NT])
self.state['hpmc/convex_spheropolyhedron/vertices'] = numpy.ascontiguousarray(self.state['hpmc/convex_spheropolyhedron/vertices'], dtype=numpy.float32);
self.state['hpmc/convex_spheropolyhedron/vertices'] = self.state['hpmc/convex_spheropolyhedron/vertices'].reshape([sumN, 3])
if 'hpmc/convex_polygon/N' in self.state:
self.state['hpmc/convex_polygon/N'] = numpy.ascontiguousarray(self.state['hpmc/convex_polygon/N'], dtype=numpy.uint32);
self.state['hpmc/convex_polygon/N'] = self.state['hpmc/convex_polygon/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_polygon/N'])
self.state['hpmc/convex_polygon/vertices'] = numpy.ascontiguousarray(self.state['hpmc/convex_polygon/vertices'], dtype=numpy.float32);
self.state['hpmc/convex_polygon/vertices'] = self.state['hpmc/convex_polygon/vertices'].reshape([sumN, 2])
if 'hpmc/convex_spheropolygon/N' in self.state:
self.state['hpmc/convex_spheropolygon/N'] = numpy.ascontiguousarray(self.state['hpmc/convex_spheropolygon/N'], dtype=numpy.uint32);
self.state['hpmc/convex_spheropolygon/N'] = self.state['hpmc/convex_spheropolygon/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_spheropolygon/N'])
self.state['hpmc/convex_spheropolygon/sweep_radius'] =
|
numpy.ascontiguousarray(self.state['hpmc/convex_spheropolygon/sweep_radius'], dtype=numpy.float32)
|
numpy.ascontiguousarray
|
# -*- coding: utf-8 -*-
"""
Created on Thursday May 26 11:23:00 2016
@author: <NAME>, <NAME>, <NAME>
"""
from __future__ import division, print_function, absolute_import, unicode_literals
from os import path, remove # File Path formatting
from warnings import warn
import numpy as np # For array operations
from scipy.io.matlab import loadmat # To load parameters stored in Matlab .mat file
import h5py
from sidpy.sid import Translator
from sidpy.hdf.hdf_utils import write_simple_attrs
from pyUSID.io.write_utils import INDICES_DTYPE, Dimension
from pyUSID.io.hdf_utils import create_indexed_group, write_main_dataset
from .df_utils.be_utils import trimUDVS, getSpectroscopicParmLabel, \
generatePlotGroups, createSpecVals, maxReadPixels, nf32
class BEodfRelaxationTranslator(Translator):
"""
Translates old Relaxation data into the new H5 format. This is for the files generated from
the old BEPSDAQ program utilizing two cards simultaneously.
At present, this version of the translator only works for Out of field measurements
It will not work for in-field. This should be fixed at a later date.
"""
def __init__(self, max_mem_mb=1024):
super(BEodfRelaxationTranslator, self).__init__(max_mem_mb)
self.FFT_BE_wave = None
self.h5_file = None
self.ds_main = None
self.mean_resp = None
self.max_resp = None
self.min_resp = None
def translate(self, file_path, show_plots=True, save_plots=True, do_histogram=False):
"""
Basic method that translates .dat data file(s) to a single .h5 file
Inputs:
file_path -- Absolute file path for one of the data files.
It is assumed that this file is of the OLD data format.
Outputs:
Nothing
"""
file_path = path.abspath(file_path)
(folder_path, basename) = path.split(file_path)
(basename, path_dict) = self._parse_file_path(file_path)
h5_path = path.join(folder_path, basename + '.h5')
if path.exists(h5_path):
remove(h5_path)
self.h5_file = h5py.File(h5_path, 'w')
isBEPS = True
parm_dict = self.__getParmsFromOldMat(path_dict['old_mat_parms'])
ignored_plt_grps = ['in-field'] # Here we assume that there is no in-field.
# If in-field data is captured then the translator would have to be modified.
# Technically, we could do away with this if statement, as isBEPS is always true for this translation
if isBEPS:
parm_dict['data_type'] = 'BEPSData'
std_expt = parm_dict['VS_mode'] != 'load user defined VS Wave from file'
if not std_expt:
warn('This translator does not handle user defined voltage spectroscopy')
return
spec_label = getSpectroscopicParmLabel(parm_dict['VS_mode'])
# Check file sizes:
if 'read_real' in path_dict.keys():
real_size = path.getsize(path_dict['read_real'])
imag_size = path.getsize(path_dict['read_imag'])
else:
real_size = path.getsize(path_dict['write_real'])
imag_size = path.getsize(path_dict['write_imag'])
if real_size != imag_size:
raise ValueError("Real and imaginary file sizes DON'T match!. Ending")
num_rows = int(parm_dict['grid_num_rows'])
num_cols = int(parm_dict['grid_num_cols'])
num_pix = num_rows * num_cols
tot_bins = real_size / (num_pix * 4) # Finding bins by simple division of entire datasize
# Check for case where only a single pixel is missing.
check_bins = real_size / ((num_pix - 1) * 4)
if tot_bins % 1 and check_bins % 1:
warn('Aborting! Some parameter appears to have changed in-between')
return
elif not tot_bins % 1:
# Everything's ok
pass
elif not check_bins % 1:
tot_bins = check_bins
warn('Warning: A pixel seems to be missing from the data. File will be padded with zeros.')
tot_bins = int(tot_bins)
(bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec) = self.__readOldMatBEvecs(path_dict['old_mat_parms'])
"""
Because this is the old data format and there is a discrepancy in the number of bins (they seem to be 2 less
than the actual number), we need to re-calculate it based on the available data. This is done below.
"""
band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
bin_freqs = np.linspace(st_f, en_f, len(bin_inds), dtype=np.float32)
# Forcing standardized datatypes:
bin_inds =
|
np.int32(bin_inds)
|
numpy.int32
|
import collections
import os
import pathlib
import sys
import unittest
import numpy as np
# There's no package set up so do something hacky to import.
sys.path.append(str(pathlib.Path(__file__).parent.absolute().parent))
from ucb_learner import UCBLearner
class TestUCBLearner(unittest.TestCase):
def test_converges(self):
learner = UCBLearner(["a", "b", "c"],
num_steps_per_update=5,
mean_reward_alpha=0.1,
q_alpha=0.01,
lmbda=0.25,
ucb_c=0.005,
verbose=True)
num_train_itr = 1000
num_batch_itr = 5
action = 0
means = np.array([0.001, -0.001, 0.0005])
sigmas =
|
np.array([0.01, 0.02, 0.003])
|
numpy.array
|
#
# Test for the operator class
#
import pybamm
from tests import get_2p1d_mesh_for_testing, get_unit_2p1D_mesh_for_testing
import numpy as np
import unittest
class TestScikitFiniteElement(unittest.TestCase):
def test_not_implemented(self):
mesh = get_2p1d_mesh_for_testing()
spatial_method = pybamm.ScikitFiniteElement(mesh)
self.assertEqual(spatial_method.mesh, mesh)
with self.assertRaises(NotImplementedError):
spatial_method.gradient(None, None, None)
with self.assertRaises(NotImplementedError):
spatial_method.divergence(None, None, None)
with self.assertRaises(NotImplementedError):
spatial_method.indefinite_integral(None, None)
def test_discretise_equations(self):
# get mesh
mesh = get_2p1d_mesh_for_testing()
spatial_methods = {
"macroscale": pybamm.FiniteVolume,
"current collector": pybamm.ScikitFiniteElement,
}
disc = pybamm.Discretisation(mesh, spatial_methods)
# discretise some equations
var = pybamm.Variable("var", domain="current collector")
y = pybamm.SpatialVariable("y", ["current collector"])
z = pybamm.SpatialVariable("z", ["current collector"])
disc.set_variable_slices([var])
y_test = np.ones(mesh["current collector"][0].npts)
unit_source = pybamm.Broadcast(1, "current collector")
disc.bcs = {
var.id: {
"negative tab": (pybamm.Scalar(0), "Neumann"),
"positive tab": (pybamm.Scalar(0), "Neumann"),
}
}
for eqn in [
pybamm.laplacian(var),
pybamm.source(unit_source, var),
pybamm.laplacian(var) - pybamm.source(unit_source, var),
pybamm.source(var, var),
pybamm.laplacian(var) - pybamm.source(2 * var, var),
pybamm.laplacian(var) - pybamm.source(unit_source ** 2 + 1 / var, var),
pybamm.Integral(var, [y, z]) - 1,
pybamm.source(var, var, boundary=True),
pybamm.laplacian(var) - pybamm.source(unit_source, var, boundary=True),
pybamm.laplacian(var)
- pybamm.source(unit_source ** 2 + 1 / var, var, boundary=True),
pybamm.grad_squared(var),
]:
# Check that equation can be evaluated in each case
# Dirichlet
disc.bcs = {
var.id: {
"negative tab": (pybamm.Scalar(0), "Dirichlet"),
"positive tab": (pybamm.Scalar(1), "Dirichlet"),
}
}
eqn_disc = disc.process_symbol(eqn)
eqn_disc.evaluate(None, y_test)
# Neumann
disc.bcs = {
var.id: {
"negative tab": (pybamm.Scalar(0), "Neumann"),
"positive tab": (pybamm.Scalar(1), "Neumann"),
}
}
eqn_disc = disc.process_symbol(eqn)
eqn_disc.evaluate(None, y_test)
# One of each
disc.bcs = {
var.id: {
"negative tab": (pybamm.Scalar(0), "Neumann"),
"positive tab": (pybamm.Scalar(1), "Dirichlet"),
}
}
eqn_disc = disc.process_symbol(eqn)
eqn_disc.evaluate(None, y_test)
# One of each
disc.bcs = {
var.id: {
"negative tab": (pybamm.Scalar(0), "Dirichlet"),
"positive tab": (pybamm.Scalar(1), "Neumann"),
}
}
eqn_disc = disc.process_symbol(eqn)
eqn_disc.evaluate(None, y_test)
# check ValueError raised for non Dirichlet or Neumann BCs
eqn = pybamm.laplacian(var) - pybamm.source(unit_source, var)
disc.bcs = {
var.id: {
"negative tab": (pybamm.Scalar(0), "Dirichlet"),
"positive tab": (pybamm.Scalar(1), "Other BC"),
}
}
with self.assertRaises(ValueError):
eqn_disc = disc.process_symbol(eqn)
disc.bcs = {
var.id: {
"negative tab": (pybamm.Scalar(0), "Other BC"),
"positive tab": (pybamm.Scalar(1), "Neumann"),
}
}
with self.assertRaises(ValueError):
eqn_disc = disc.process_symbol(eqn)
# raise ModelError if no BCs provided
new_var = pybamm.Variable("new_var", domain="current collector")
disc.set_variable_slices([new_var])
eqn = pybamm.laplacian(new_var)
with self.assertRaises(pybamm.ModelError):
eqn_disc = disc.process_symbol(eqn)
# check GeometryError if using scikit-fem not in y or z
x = pybamm.SpatialVariable("x", ["current collector"])
with self.assertRaises(pybamm.GeometryError):
disc.process_symbol(x)
def test_manufactured_solution(self):
mesh = get_unit_2p1D_mesh_for_testing(ypts=32, zpts=32)
spatial_methods = {
"macroscale": pybamm.FiniteVolume,
"current collector": pybamm.ScikitFiniteElement,
}
disc = pybamm.Discretisation(mesh, spatial_methods)
# linear u = z (to test coordinates to degree of freedom mapping)
var = pybamm.Variable("var", domain="current collector")
disc.set_variable_slices([var])
var_disc = disc.process_symbol(var)
z_vertices = mesh["current collector"][0].coordinates[1, :]
np.testing.assert_array_almost_equal(
var_disc.evaluate(None, z_vertices), z_vertices[:, np.newaxis]
)
# linear u = 6*y (to test coordinates to degree of freedom mapping)
y_vertices = mesh["current collector"][0].coordinates[0, :]
np.testing.assert_array_almost_equal(
var_disc.evaluate(None, 6 * y_vertices), 6 * y_vertices[:, np.newaxis]
)
# mixed u = y*z (to test coordinates to degree of freedom mapping)
np.testing.assert_array_almost_equal(
var_disc.evaluate(None, y_vertices * z_vertices),
y_vertices[:, np.newaxis] * z_vertices[:, np.newaxis],
)
# laplace of u = sin(pi*z)
var = pybamm.Variable("var", domain="current collector")
eqn_zz = pybamm.laplacian(var)
# set boundary conditions ("negative tab" = bottom of unit square,
# "positive tab" = top of unit square, elsewhere normal derivative is zero)
disc.bcs = {
var.id: {
"negative tab": (pybamm.Scalar(0), "Dirichlet"),
"positive tab": (pybamm.Scalar(0), "Dirichlet"),
}
}
disc.set_variable_slices([var])
eqn_zz_disc = disc.process_symbol(eqn_zz)
z_vertices = mesh["current collector"][0].coordinates[1, :][:, np.newaxis]
u =
|
np.sin(np.pi * z_vertices)
|
numpy.sin
|
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match='NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.to_dense()),
vals.astype(typ))
@pytest.mark.parametrize('array, dtype, expected', [
(SparseArray([0, 1]), 'float',
SparseArray([0., 1.], dtype=SparseDtype(float, 0.0))),
(SparseArray([0, 1]), bool, SparseArray([False, True])),
(SparseArray([0, 1], fill_value=1), bool,
SparseArray([False, True], dtype=SparseDtype(bool, True))),
pytest.param(
SparseArray([0, 1]), 'datetime64[ns]',
SparseArray(np.array([0, 1], dtype='datetime64[ns]'),
dtype=SparseDtype('datetime64[ns]',
pd.Timestamp('1970'))),
marks=[pytest.mark.xfail(reason="NumPy-7619")],
),
(SparseArray([0, 1, 10]), str,
SparseArray(['0', '1', '10'], dtype=SparseDtype(str, '0'))),
(SparseArray(['10', '20']), float, SparseArray([10.0, 20.0])),
(SparseArray([0, 1, 0]), object,
SparseArray([0, 1, 0], dtype=SparseDtype(object, 0))),
])
def test_astype_more(self, array, dtype, expected):
result = array.astype(dtype)
tm.assert_sp_array_equal(result, expected)
def test_astype_nan_raises(self):
arr = SparseArray([1.0, np.nan])
with pytest.raises(ValueError, match='Cannot convert non-finite'):
arr.astype(int)
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3],
|
np.array([1, 2])
|
numpy.array
|
"""
base_lines.py - microscopy images class
update: 20191001 - modify gaussian fit function
"""
import os
import sys
from tqdm import tqdm_notebook
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pandas as pd
from scipy.signal import argrelmax
# local library
from .rfit import robust_line_fit
from .rfit import robust_inverseabs_fit
from .rfit import gaussian, gaussian2, gaussian3, line, inverseabs
from .base_filters import ImageFilter
from .lineobject import LineObject
from .lineobject import _smooth
__author__ = '<NAME> <<EMAIL>>'
__version__ = '1.1.0'
class ImageLine(ImageFilter):
""" Image based on channel experiments - lineprofile and migration angle detection """
def __init__(self, objects, method='tifffile', debug=False, **kwargs):
""" initialization """
super().__init__(objects, method=method, debug=debug, **kwargs)
""" TifFile class initialization """
# for graphic representation
self._ax1 = None
self._ax2 = None
# fitting info
self._peakdatafname = '.{}_peakinfo.csv'.format(self._meta['basename'])
if os.path.isfile(self._peakdatafname):
if self._debug:
print('... read peak information: {}'.format(self._peakdatafname))
self._peakdata = pd.read_csv(self._peakdatafname, index_col=0)
else:
self._peakdata = pd.DataFrame()
self._lineobject = []
self._kfit = []
self._baseline = 0.0
self._beaminfo = []
self._beamdirection = 'right'
# full width injection case
self._smoothing = 13
self._searchrange = 10
def __repr__(self):
""" representation """
msg = super().__repr__()
msg += '-'*50 + '\n'
msg += '... Wall positions: ({}, {})\n'.format(self._meta['wall1'], self._meta['wall2'])
msg += '... Array positions: ({}, {})\n'.format(self._meta['range1'], self._meta['range2'])
msg += '... Migration Angle: {:.4f} [deg]\n'.format(self._meta['mangle'])
msg += '... Frame Angle: {:.4f} [deg]\n'.format(self._meta['fangle'])
msg += '... Diffusion constant: {:.4f} [um2/s]\n'.format(self._meta['D'])
msg += '... Peclet number: {:.4f}\n'.format(self._meta['Pe'])
msg += '... Pressure: {:.4f} [bar]\n'.format(self._meta['p'])
msg += '... Velocity: {:.5f} [um/s]\n'.format(self._meta['u'])
msg += '... Particle size: {:.4f} [nm]\n'.format(self._meta['psize'])
return msg
def set_wallinfo(self, wallinfo=[0, 512, 0, 512], show=False):
""" manually set wall information """
if len(wallinfo) == 4:
self._meta.update_wall(wallinfo)
else:
print('... wallinfo = [wall1, wall2, range1, range2]')
return
def set_expInfo(self, magnification=None, velocity=-1, p=-1, fangle=0.0, psize=-1, ccd_length=16.0):
""" set experimental values """
if isinstance(magnification, str):
self._meta.update_mag(magnification)
self._meta.update_wall([0, 0, 0, 0])
if velocity > -1: self._meta['u'] = velocity
if p > -1: self._meta['p'] = p
if fangle != 0: self._meta['fangle'] = fangle
if psize > -1: self._meta['psize'] = psize
# TODO add bulk diffusion contant
# line profile
def getline_obj(self, frame=-1, coords=None, dtypes='orig'):
""" generate line object using coordinates """
if frame == -1: img = self.tmean(dtypes=dtypes)
else: img = self.getframe(frame=frame, dtypes=dtypes)
self._lineobject = _getline_obj(img, coords=coords)
return self._lineobject
def getline_x(self, frame=-1, y=-1, dtypes='orig', **kwargs):
""" get line profile along x axis """
if frame == -1: img = self.tmean(dtypes=dtypes)
else: img = self.getframe(frame=frame, dtypes=dtypes)
return self._getline_x(img, y=y, **kwargs)
def getline_y(self, frame=-1, x=-1, dtypes='orig'):
""" get line profile along y axis """
if frame == -1: img = self.tmean(dtypes=dtypes)
else: img = self.getframe(frame=frame, dtypes=dtypes)
return self._getline_y(img, x=x)
def _getline_x(self, img, y=-1, ignore_wall=False):
""" get line profile along x axis using coordinates """
if (not ignore_wall):
xs, xf = self._meta['wall1'], self._meta['wall2']
return _getline_obj(img, coords=[xs, y, xf, y])
def _getline_y(self, img, x=-1, ignore_wall=False):
""" get line profile along y axis using coordinates """
if (not ignore_wall):
ys, yf = self._meta['range1'], self._meta['range2']
return _getline_obj(img, coords=[x, ys, x, yf])
def getlines_x_shadow(self, locations, raw=False):
""" get lines with background subtraction """
results = []
results_raw = []
for i in range(len(locations)):
line = _zprofile(self.tmean(), locations[i])
results_raw.append(np.copy(line))
line -= self._findShadowline(locations[i])
results.append(_smooth(line, window_len=self._smoothing))
if raw:
return (results, results_raw)
else:
return results
# fit line profile with various methods
def fitline_x(self, frame=-1, y=-1, method='peaks', **kwargs):
""" fitting line intensity profile along x axis """
line_obj = self.getline_x(frame=frame, dtypes='float', y=y)
self._lineobject = line_obj
return self._fitline(line_obj, method=method, **kwargs)
def fitline_y(self, frame=-1, x=-1, method='peaks', **kwargs):
""" fitting line intensity profile along y axis """
line_obj = self.getline_y(frame=frame, dtypes='float', x=x)
self._lineobject = line_obj
return self._fitline(line_obj, method=method, **kwargs)
def _fitline_x(self, img, y=-1, method='peaks', **kwargs):
line_obj = self._getline_x(img, y=y)
return self._fitline(line_obj, method=method, **kwargs)
def _fitline_y(self, img, x=-1, method='peaks', **kwargs):
line_obj = self._getline_y(img, x=x)
return self._fitline(line_obj, method=method, **kwargs)
def _fitline(self, line_obj, method='gaussian', **kwargs):
""" get line profile and find peak """
nu = kwargs.pop('nu', 100)
if method == 'gaussian':
if self._debug: print('... gaussian fit')
line_obj.fit_gaussian_from(nu=nu, kfit=self._kfit, **kwargs)
elif method == 'gaussian2':
if self._debug: print('... double gaussian fit')
line_obj.fit_gaussian2_from(nu=nu, kfit=self._kfit, **kwargs)
elif method == 'gaussian3':
if self._debug: print('... triple gaussian fit')
line_obj.fit_gaussian3_from(nu=nu, kfit=self._kfit, **kwargs)
elif method == 'peaks':
if self._debug: print('... peak find fit')
line_obj.fit_peaks(**kwargs)
elif method == 'gcdf':
if self._debug: print('... gaussian cdf fit')
line_obj.fit_gcdf(nu=nu)
return line_obj
def fitlines_x(self, locs=-1, method='gaussian', update=False, **kwargs):
return self._fitlines_x(self.tmean(), locs=locs, method=method, update=update, **kwargs)
def _fitlines_x(self, img, locs=-1, method='gaussian', update=False, **kwargs):
""" get peak position datasheet at locs """
# set all y
if locs == -1:
locs = range(int(self._meta['range1']), int(self._meta['range2']))
# read from cache
if (not update) and os.path.isfile(self._peakdatafname):
if self._debug: print('... read from %s' % self._peakdatafname)
self._peakdata = pd.read_csv(self._peakdatafname, index_col=0)
else:
if self._debug: print('... create %s' % self._peakdatafname)
d = np.zeros((img.shape[1], 6)) - 1
self._peakdata = pd.DataFrame(d, index=range(img.shape[1]), columns=['loc', 'peak', 'delta', 'rdelta', 'l_inp', 'r_inp'])
# iterate over all range
count = 0
keep_debug = self._debug
self._debug = False
for i in tqdm_notebook(locs):
if update or (self._peakdata['loc'].loc[i] == -1):
line_obj = self._fitline(self._getline_x(img, y=i), method=method, **kwargs)
self._peakdata['loc'].loc[i] = i
try:
self._peakdata['peak'].loc[i] = line_obj._peaks[0]
self._peakdata['delta'].loc[i] = line_obj._peaks[0] - line_obj._left_inp[0]
self._peakdata['rdelta'].loc[i] = line_obj._right_inp[0] - line_obj._peaks[0]
self._peakdata['l_inp'].loc[i] = line_obj._left_inp[0]
self._peakdata['r_inp'].loc[i] = line_obj._right_inp[0]
except: pass
count += 1
# save if updated
if count > 0:
if self._debug: print('... save to %s' % self._peakdatafname)
self._peakdata.to_csv(self._peakdatafname)
self._debug = keep_debug
return self._peakdata
# show line profiles
def _show_lineobj(self, line_obj, msg='', save=False, **kwargs):
""" show image and line profile """
x0, y0, x1, y1 = line_obj.get_coords()
_, z = line_obj.get()
plt.clf()
fig = plt.figure(figsize=(11, 5))
# plot image with colormap bar
self._ax1 = fig.add_subplot(121)
self._meta._zrange = [z.min(), z.max()]
self._showimage(line_obj._img, autorange=False, wall=True, ax=self._ax1)
self._ax1.plot([x0, x1], [y0, y1], 'ro-')
self._ax1.annotate(str(line_obj._loc), xy=(x1+3, y1), va='top', ha='left', color='red')
# plot line profile
self._ax2 = plt.axes([0.62, 0.32, 0.35, 0.55])
line_obj.plot(msg=msg, ax=self._ax2, **kwargs)
if save:
savename = self._meta['basename'] + '_rfit_l{}_{}.pdf'.format(line_obj._loc, method)
plt.savefig(savename, dpi=200)
if self._debug: print('... save to {}'.format(savename))
def showline_x(self, frame=-1, y=-1, dtypes='orig', msg='', **kwargs):
self._lineobject = self.getline_x(frame=frame, y=y, dtypes=dtypes)
self._show_lineobj(self._lineobject, msg=msg, **kwargs)
def showfit_x(self, frame=-1, y=-1, method='gaussian', msg='', **kwargs):
self._lineobject = self.fitline_x(frame=frame, y=y, method=method)
self._show_lineobj(self._lineobject, msg=msg, **kwargs)
def showfit_peaks(self, ranges=[], method='gaussian', ax=None):
""" show peakdata with image """
if len(self._peakdata) == 0:
self.fitlines_x(method='gaussian')
if len(ranges) == 2:
self._meta['range1'] = ranges[0]
self._meta['range2'] = ranges[1]
if ax is None:
plt.clf()
fig = plt.figure(figsize=(10, 5))
ax = plt.gcf().gca()
# plot image
self._showimage(self.tmean(), frameNumber=False, ax=ax, wall=True)
# plot transition
tmp = self._peakdata.iloc[self._meta['range1']: self._meta['range2']]
x = tmp['loc']
y = tmp['peak']
dy1 = tmp['l_inp']
dy2 = tmp['r_inp']
ax.plot(dy1, x, '.', color='gray', markersize=1, alpha=0.8, label='')
ax.plot(dy2, x, '.', color='gray', markersize=1, alpha=0.8, label=r'$\sigma$')
ax.plot(y, x, '.', color='red', markersize=1, label='peak', alpha=0.8)
ax.legend(loc='best')
def showfit_angles(self, ranges=[], method='gaussian', show=True, save=True, **kwargs):
""" show peak positions and calculate angle """
# calculate migration angle
if len(ranges) == 2:
self._meta['range1'] = ranges[0]
self._meta['range2'] = ranges[1]
if len(self._peakdata) == 0:
self.fitlines_x(method=method)
tmp = self._peakdata.iloc[self._meta['range1']: self._meta['range2']]
x = np.asarray(tmp['loc'])
y = np.asarray(tmp['peak'].ffill())
if self._debug: print('... initial: {}, {}'.format(y.ptp()/x.ptp(), y[0]))
kr = robust_line_fit(x, y, nu=100.0, debug=self._debug, initial=[y.ptp()/x.ptp(), y[0]], **kwargs)
if self._debug: print('... final: {}, {}'.format(kr.x[0], kr.x[1]))
yr = line(kr.x, x)
self._meta['mangle'] = np.arctan(kr.x[0]) * 180.0 / np.pi
self._meta.save()
if show:
# plot peak positions
plt.clf()
fig = plt.figure(figsize=(11, 5))
ax1 = fig.add_subplot(121)
self.showfit_peaks(ranges=ranges, ax=ax1)
ax2 = plt.axes([0.62, 0.32, 0.35, 0.55])
# plot peak position lines
ax2.plot(x, y)
ax2.plot(x, yr, '--', label='fit')
ax2.set_xlabel('locations [pixel]')
ax2.set_ylabel('peak positions [pixel]')
msg = 'Shift: {:8.4f} over {:3d} Slope: {:12.4f}\nAngle: {:12.4f} [deg] y0: {:12.4f}'.format(yr.ptp(), self._meta['range2'] - self._meta['range1'], kr.x[0], self._meta['mangle'], kr.x[1])
ax2.text(0.1, -0.4, msg, ha='left', transform=ax2.transAxes)
ax2.legend(loc='best')
if show and save:
savename = self._meta['basename'] + '_angle.pdf'
if self._debug: print('... save to %s' % savename)
plt.savefig(savename, dpi=300)
plt.show()
def showfit_sigmas(self, ranges=[], show=True, save=True, colname="delta", **kwargs):
""" show sigma and calculate diffusion coefficient """
if len(ranges) == 2:
self._meta['range1'] = ranges[0]
self._meta['range2'] = ranges[1]
if len(self._peakdata) == 0:
self.fitlines_x()
tmp = self._peakdata.iloc[self._meta['range1']: self._meta['range2']]
x = np.asarray(tmp['loc'])
y = np.asarray(tmp[colname].ffill())
if self._debug: print('... initial: {}, {}'.format(y.ptp()/x.ptp(), y[0]))
kr = robust_line_fit(x, y**2, nu=300.0, debug=self._debug, initial=[y.ptp()/x.ptp(), y[0]], **kwargs)
if self._debug: print('... final: {}, {}'.format(kr.x[0], kr.x[1]))
yr = line(kr.x, x)
self._meta['Pe'] = 2.0 * self._meta['channelWidth'] / kr.x[0]
self._meta['D'] = self._meta['u'] * self._meta['arrayWidth'] / self._meta['Pe'] # um^2/s
self._meta.save()
if show:
plt.clf()
fig = plt.figure(figsize=(11, 5))
# plot peak positions
ax1 = fig.add_subplot(121)
self.showfit_peaks(ranges=ranges, ax=ax1)
# plot peak position lines
ax2 = plt.axes([0.62, 0.32, 0.35, 0.55])
ax2.plot(x, y**2, label=r'$\sigma^2$')
ax2.plot(x, yr, '--', label='fit')
ax2.set_xlabel(r'locations [pixel]')
ax2.set_ylabel(r'sigma^2 [pixel^2]')
msg = 'D: {:8.4f} [um2/s] Peclet Number: {:8.4f}\nSlope: {:13.4f} y0: {:12.4f}'.format(self._meta['D'], self._meta['Pe'], kr.x[0], kr.x[1])
ax2.text(0.1, -0.4, msg, ha='left', transform=ax2.transAxes)
ax2.legend(loc='best')
if show and save:
savename = self._meta['basename'] + '_sigma.pdf'
if self._debug: print('... save to %s' % savename)
plt.savefig(savename, dpi=300)
plt.show()
# show multiple line profiles
def showlines(self, lines=-1, dir='y', log=False, window=[0, 0], save=False, fit=False, smooth=False):
""" plot line profiles at multiple locations """
if lines == -1:
if dir == 'y':
lines = range(0, self._width, 50)
else:
lines = range(0, self._height, 50)
tmp = self.tmean()
fig = plt.gcf()
fig.set_size_inches(12, 6)
ax1 = fig.add_subplot(121)
self._im = ax1.imshow(tmp, clim=self._range, cmap=self._cmapname, origin='upper')
ax1.axis('image')
ax1.yaxis.set_major_locator(ticker.MultipleLocator(50))
ax2 = fig.add_subplot(122)
if log:
ax2.set_yscale('log')
# print("... Locations ",lines)
# set window
if window[1] == 0:
wl = 0
if dir == 'x':
wr = self._width
else:
wr = self._height
else:
wl = window[0]
wr = window[1]
if dir == 'x':
ax1.hlines(window, 0, self._width, color='gray', alpha=0.5, linestyle='dashed')
else:
ax1.vlines(window, 0, self._height, color='gray', alpha=0.5, linestyle='dashed')
print("... Window: %i, %i" % (wl, wr))
res_line = np.zeros((len(lines), wr-wl))
ax2.set_xlim(wl, wr)
ax2.set_ylabel('Normalized Intensity')
cmap = plt.get_cmap(self._cmapname)
if dir == 'x':
ax1.vlines(lines, wl, wr, color='yellow', alpha=0.5, linestyle='dashed')
ax2.set_xlabel('y (pixel)')
i = 0
for l in lines:
color = cmap(float(i) / len(lines))
if smooth:
ax2.plot(range(wl, wr + 1), _smooth(tmp[wl:wr, l]), label=l, color=color)
res_line[i, :] = _smooth(tmp[wl:wr, l])[:-1]
else:
ax2.plot(range(wl, wr), tmp[wl:wr, l], label=l, color=color)
res_line[i, :] = tmp[wl:wr, l]
ax1.annotate(l, xy=(l, wl), va='bottom', color='white')
print("... Loc: %i Peak: %i Value: %f" % (l, np.argmax(tmp[wl:wr, l]), np.max(tmp[wl:wr, l])))
if fit:
out = _fitGaussian(np.range(wl, wr), tmp[wl:wr, l], baseline=self._baseline)
ax2.plot(range(wl, wr), out.best_fit + self._baseline, '--', color=color)
msg = "%.2f,%.2f" % (out.best_values['center'], out.best_values['sigma'])
fx = np.argmax(tmp[:, l])
fy = np.max(tmp[:, l]) + self._baseline
ax2.annotate(msg, xy=(fx, fy), va='baseline', ha=self._beamdirection)
self._beaminfo.append([l, out.best_values['center'], out.best_values['sigma']**2])
i += 1
elif dir == 'y':
ax1.hlines(lines, wl, wr, color='yellow', alpha=0.5, linestyle='dashed')
ax2.set_xlabel('x (pixel)')
i = 0
for l in lines:
color = cmap(float(i) / len(lines))
if smooth:
ax2.plot(range(wl, wr + 1), _smooth(tmp[l, wl:wr]), label=l, color=color)
res_line[i, :] = _smooth(tmp[l, wl:wr])[:-1]
else:
ax2.plot(range(wl, wr), tmp[l, wl:wr], label=l, color=color)
res_line[i, :] = tmp[l, wl:wr]
ax1.annotate(l, xy=(wl, l), ha='right', color='white')
print("... Loc: %i Peak: %i Value: %f" % (l, np.argmax(tmp[l, wl:wr]), np.max(tmp[l, wl:wr])))
if fit:
out = _fitGaussian(np.arange(wl, wr), tmp[l, wl:wr], baseline=self._baseline)
ax2.plot(range(wl, wr), out.best_fit + self._baseline, '--', color=color)
msg = "%.2f,%.2f" % (out.best_values['center'], out.best_values['sigma'])
fx = np.argmax(tmp[l, :])
fy = np.max(tmp[l, :]) + self._baseline
ax2.annotate(msg, xy=(fx, fy), va='baseline', ha=self._beamdirection)
self._beaminfo.append([l, out.best_values['center'], out.best_values['sigma']**2])
i += 1
else:
return False
plt.tight_layout()
plt.legend(loc='best')
if save:
linestr = "_" + dir + "l".join(['_' + str(l) for l in lines])
filename = self._fname[:-4] + linestr + '.pdf'
plt.savefig(filename, dpi=100)
plt.show()
def showlines_allx(self, bg=0.0, bgstd=0.0, window=None):
""" show line profile along x axis """
_plotAxis(self.tmean(), 0, background=bg, backstd=bgstd, window=window)
def showlines_ally(self, bg=0.0, bgstd=0.0, window=None):
""" show line profile along y axis """
_plotAxis(self.tmean(), 1, background=bg, backstd=bgstd, window=window)
# experimental data
def detect_channel(self, window=11, compareRatio=0.2, minimumIntensity=0.1, angle=0.0, show=True):
""" find channel wall positions """
if angle > 0:
img = _rotate_bound(self.tmean('float'), angle)
else:
img = self.tmean(dtypes='float')
result = _find_channel(img, window=window, compareRatio=compareRatio, minimumIntensity=minimumIntensity)
self._meta.update_wall(result)
if show:
ax = plt.gcf().gca()
self._showimage(img, simple=False, ax=ax, wall=True)
return result
def detect_angles(self, frame=-1, method1='canny', method2=None, min_length=100, show=True):
""" find angle of camera based on line properties """
# preprocess image
self.reverse(frame=frame)
img = self.filters(frame=frame, method=method1)
if method2 is not None:
img = self.threshold(frame=frame, method=method2, show=False)
# detect lines
res = _detect_angles(img, min_length=min_length, show=show)
# reverse filtered image
self.reverse(frame=frame)
return res
# adjust line profiles
def _find_shadowline_x(self, img, y=-1, xf=-1, show=True):
""" find shadow line using inverse abs function """
wallinfo = self._wallinfo
if len(wallinfo) < 2:
raise ValueError('... need wall information: {}'.format(wallinfo))
x, z = self._getline_x(img, y=y, ignore_wall=True).get()
xf = len(x) if xf == -1 else xf
# adjust background
xmin1 = np.argmin(_smooth(z[:wallinfo[0]], window_len=7))
xmin2 = np.argmin(_smooth(z[wallinfo[1]:xf], window_len=7)) + wallinfo[1]
zmin1, zmin2 = z[xmin1], z[xmin2]
background = (zmin2 - zmin1)/(xmin2 - xmin1) * (x - xmin1) + zmin1
# peak_position = np.argmax(xline)
# if peak_position < self._peak[0] or peak_position > self._peak[1]:
# return np.zeros_like(xline)
# fit with right side of wall
zRight = z[wallinfo[1]:xf] - background[wallinfo[1]:xf]
xRight = x[wallinfo[1]:xf]
kr = robust_inverseabs_fit(xRight, zRight, initial=[zRight.max(), xRight[0], 1.0], verb=self._debug)
result = inverseabs(kr.x, x) + background
if show:
plt.plot(x, z, label='raw')
plt.plot(x, result, label='shadow')
plt.plot(x, background, label='background')
msg = 'y: {} back: {:.4f}, {:.4f} \nk: {:.2f} {:.2f} {:.2f}'.format(y, zmin1, zmin2, kr.x[0], kr.x[1], kr.x[2])
plt.annotate(msg, xy=(0, result.max()))
plt.vlines(wallinfo[:2], z.min(), z.max(), linestyles='dashed', alpha=0.5)
plt.legend(loc='best')
return (result, background)
def _inflectionpoints(self, xdata, verb=False):
""" find inflection points in lineprofile """
x_smooth = _smooth(xdata, window_len=self._smoothing)
dx = np.gradient(x_smooth)
dx_smooth = _smooth(dx, window_len=self._smoothing)
wall1 = self._wall1 + self._wallpos
wall2 = self._wall2 + self._wallpos
infmax = np.argmax(dx_smooth[wall1:wall2]) + wall1 - 1
(localmaxs, ) = argrelmax(dx_smooth[wall1:wall2], order=self._searchrange)
if verb:
print('... smoothing: %i, order: %i' % (self._smoothing, self._searchrange))
print('... find %i local maximums' % len(localmaxs))
# check wall
if np.abs(infmax - wall2) < 0.1*self._arrayWidth:
# 10% of array width
infmax = _find_before(localmaxs + wall1, infmax)
return (localmaxs+wall1, infmax)
def plot_lines_3d(self, frame=-1, locations=[]):
""" plot line profile in 3d """
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
fig = plt.figure()
ax = fig.gca(projection='3d')
if len(locations) == 0:
locations = range(0, self._height, 10)
lines = []
for i in locations:
lines.append(self.getline_x(frame=-1, y=i).get()[1])
if len(self._wallinfo) > 1:
xs = range(self._wallinfo[0], self._wallinfo[1])
else:
xs = range(0, self._width)
verts = []
for i in range(len(locations)):
verts.append(list(zip(xs, lines[i])))
poly = PolyCollection(verts)
poly.set_alpha(0.7)
ax.add_collection3d(poly, zs=locations, zdir='y')
ax.set_xlabel('x (pixels)')
ax.set_ylabel('y (pixels)')
ax.set_zlabel('Intensity')
ax.set_xlim3d(np.min(xs), np.max(xs))
ax.set_ylim3d(np.max(locations), np.min(locations))
ax.set_zlim3d(0, np.max(lines))
plt.show()
def _getline_obj(img, coords=None, debug=False):
""" get line profile from (x0, y0) to (x1, y1) """
if coords is not None:
x0, y0, x1, y1 = coords
else:
x0, y0, x1, y1 = 0, 0, img.shape[1], img.shape[0]
if coords[1] == -1:
y1 = y0 = img.shape[0]//2
if coords[0] == -1:
x1 = x0 = img.shape[1]//2
return LineObject(img, x0, y0, x1, y1, debug=debug)
def _detect_angles(threshold_image, min_length=100, max_line_gap=10, threshold=50, show=True, debug=False):
""" check rotation of image - detect edges and lines and calculate angle """
if 'cv2' not in dir(): import cv2
lines = cv2.HoughLinesP(threshold_image, 0.1, np.pi / 720.0, threshold, None, min_length, max_line_gap)
if debug: print(lines)
res = []
if show:
fig = plt.figure(figsize=(11, 5))
plt.imshow(threshold_image)
for line in lines:
x1, y1, x2, y2 = line[0]
theta = math.atan2(x1-x2, y1-y2) * 180.0/math.pi
x0 = (x1 + x2)*0.5
y0 = (y1 + y2)*0.5
if show:
plt.plot((x1, x2), (y1, y2))
plt.annotate('{:.2f}'.format(theta), xy=(x0, y0), va='top', color='white')
res.append(theta)
if show:
plt.xlim(0, threshold_image.shape[1])
plt.ylim(threshold_image.shape[0], 0)
plt.show()
return np.median(
|
np.array(res)
|
numpy.array
|
import sys
import numpy as np
import tensorflow as tf
import argparse
import matplotlib.pyplot as plt
from sygnals import Signal, generate_sampling
def display_time_series(x, y):
plt.plot(x, y)
plt.show()
def get_windowed_data(window_size, step, data):
start = 0
end = start + window_size
x = []
y = []
while end < data.shape[0]:
x.append(data[start:end])
y.append(data[end])
start += step
end = start + window_size
return np.asarray(x, dtype=np.float64), np.asarray(y, dtype=np.float64)
def reshape_input_for_lstm(x):
# ILOŚĆ ELEMENTÓW, ROZMIAR_OKNA, WYMIAR_WEJŚCIA
return
|
np.reshape(x, (x.shape[0], x.shape[1], 1))
|
numpy.reshape
|
# -*- coding: utf-8 -*-
import numpy as np
from platformx.plat_tensorflow.tools.processor.np_utils import ops
import random
import colorsys
import cv2
import config
import os
from PIL import Image, ImageDraw
def read_coco_labels():
path = config.cfg.POSTPROCESSOR.PATH_TO_LABELS
f = open(path)
class_names = []
for l in f.readlines():
l = l.strip() # 去掉回车'\n'
class_names.append(l)
f.close()
# print("class_names:", class_names)
return class_names
def load_coco_names():
file_name = config.cfg.POSTPROCESSOR.PATH_TO_LABELS
names = {}
with open(file_name) as f:
for id, name in enumerate(f):
names[id] = name
# print("names:", names)
return names
class_names = read_coco_labels()
def decode(model_output):
"""
yolov2 decode
:param model_output: darknet19 网络输出的特征图
:param output_sizes: darknet19网络输出的特征图大小,默认是 13*13(默认输入416*416,下采样32)
:param num_class:
:param anchors:
:return:
"""
# output_sizes=(13, 13)
output_sizes = config.cfg.PREPROCESS.HEIGHT // 32, config.cfg.PREPROCESS.WIDTH // 32 # 特征图尺寸是图片下采样 32 倍
num_class = config.cfg.POSTPROCESSOR.NUM_CLASSES
# if num_class is None:
# num_class = len(class_names)
anchors = config.cfg.POSTPROCESSOR.ANCHORS
anchors = np.array(anchors)
H, W = output_sizes
num_anchors = len(anchors) # 这里的 anchor 是在configs文件中设置的
print("num_anchors:", num_anchors)
# anchors = tf.constant(anchors, dtype=tf.float32) # 将传入的 anchors 转变成 tf 格式的常量列表
# 13*13*num_anchors*(num_class+5),第一个维度自适应 batchsize
print("model_output:", model_output.shape)
detection_result = np.reshape(model_output, [-1, H * W, num_anchors, num_class + 5])
print("detection_result:", detection_result.shape)
# darknet19 网络输出转化——偏移量、置信度、类别概率
xy_offset = ops.sigmoid(detection_result[:, :, :, 0:2]) # 中心坐标相对于该 cell 左上角的偏移量,sigmoid 函数归一化到0-1
wh_offset = np.exp(detection_result[:, :, :, 2:4]) # 相对于 anchor 的 wh 比例,通过 e 指数解码
obj_probs = ops.sigmoid(detection_result[:, :, :, 4]) # 置信度,sigmoid 函数归一化到 0-1
class_probs = ops.softmax(detection_result[:, :, :, 5:]) # 网络回归的是'得分',用 softmax 转变成类别概率
# 构建特征图每个cell的左上角的xy坐标
height_index = range(H) # range(0,13)
width_index = range(W) # range(0,13)
# 变成x_cell=[[0,1,...,12],...,[0,1,...,12]]和y_cell=[[0,0,...,0],[1,...,1]...,[12,...,12]]
x_cell, y_cell = np.meshgrid(height_index, width_index)
x_cell = np.reshape(x_cell, [1, -1, 1]) # 和上面[H*W,num_anchors,num_class+5]对应
print("x_cell:", x_cell.shape)
y_cell = np.reshape(y_cell, [1, -1, 1])
print("y_cell:", y_cell.shape)
# decode
bbox_x = (x_cell + xy_offset[:, :, :, 0]) / W
bbox_y = (y_cell + xy_offset[:, :, :, 1]) / H
bbox_w = (anchors[:, 0] * wh_offset[:, :, :, 0]) / W
bbox_h = (anchors[:, 1] * wh_offset[:, :, :, 1]) / H
# 中心坐标+宽高 box(x,y,w,h) -> xmin=x-w/2 -> 左上+右下box(xmin,ymin,xmax,ymax)
bboxes = np.stack([bbox_x - bbox_w / 2, bbox_y - bbox_h / 2,
bbox_x + bbox_w / 2, bbox_y + bbox_h / 2], axis=3)
bboxes = np.reshape(bboxes, bboxes.shape)
print("yolov2 decode bboxes:", bboxes.shape)
print("yolov2 decode obj_probs:", obj_probs.shape)
print("yolov2 decode class_probs:", class_probs.shape)
return bboxes, obj_probs, class_probs
def _get_image_path():
img_dir = config.cfg.PREPROCESS.IMG_LIST
file_list = os.listdir(img_dir)
image_path = os.path.join(img_dir, file_list[0])
return image_path
def _get_image():
image_path = _get_image_path()
image = cv2.imread(image_path)
return image
def _get_image_PIL():
image_path = _get_image_path()
image = Image.open(image_path)
return image
# 【2】筛选解码后的回归边界框——NMS(post process后期处理)
def postprocess(bboxes, obj_probs, class_probs):
threshold = config.cfg.POSTPROCESSOR.SCORE_THRESHOLD
image = _get_image()
image_shape = image.shape[:2]
print("image_shape ", image_shape)
if image_shape is None:
image_shape = (416, 416)
# bboxes 表示为:图片中有多少 box 就多少行;4 列分别是 box(xmin,ymin,xmax,ymax)
bboxes = np.reshape(bboxes, [-1, 4])
# 将所有 box 还原成图片中真实的位置
bboxes[:, 0:1] *= float(image_shape[1]) # xmin*width
bboxes[:, 1:2] *= float(image_shape[0]) # ymin*height
bboxes[:, 2:3] *= float(image_shape[1]) # xmax*width
bboxes[:, 3:4] *= float(image_shape[0]) # ymax*height
bboxes = bboxes.astype(np.int32)
# (1)cut the box:将边界框超出整张图片(0,0)—(415,415)的部分cut掉
bbox_min_max = [0, 0, image_shape[1] - 1, image_shape[0] - 1]
bboxes = bboxes_cut(bbox_min_max, bboxes)
# ※※※置信度*max类别概率=类别置信度scores※※※
obj_probs = np.reshape(obj_probs, [-1])
class_probs = np.reshape(class_probs, [len(obj_probs), -1])
class_max_index = np.argmax(class_probs, axis=1) # 得到max类别概率对应的维度
class_probs = class_probs[np.arange(len(obj_probs)), class_max_index]
scores = obj_probs * class_probs
# ※※※类别置信度scores>threshold的边界框bboxes留下※※※
keep_index = scores > threshold
class_max_index = class_max_index[keep_index]
scores = scores[keep_index]
bboxes = bboxes[keep_index]
# (2)排序 top_k (默认为400)
class_max_index, scores, bboxes = bboxes_sort(class_max_index, scores, bboxes)
# ※※※(3)NMS※※※
class_max_index, scores, bboxes = bboxes_nms(class_max_index, scores, bboxes)
draw_detection(image, bboxes, scores, class_max_index, class_names)
return bboxes, scores, class_max_index
# 【3】绘制筛选后的边界框
def draw_detection(im, bboxes, scores, cls_inds, labels, thr=0.3):
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / float(len(labels)), 1., 1.) for x in range(len(labels))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
# draw image
imgcv = np.copy(im)
h, w, _ = imgcv.shape
for i, box in enumerate(bboxes):
if scores[i] < thr:
continue
cls_indx = cls_inds[i]
thick = int((h + w) / 300)
cv2.rectangle(imgcv, (box[0], box[1]), (box[2], box[3]), colors[cls_indx], thick)
mess = '%s: %.3f' % (labels[cls_indx], scores[i])
if box[1] < 20:
text_loc = (box[0] + 2, box[1] + 15)
else:
text_loc = (box[0], box[1] - 10)
# cv2.rectangle(imgcv, (box[0], box[1]-20), ((box[0]+box[2])//3+120, box[1]-8), (125, 125, 125), -1) # puttext函数的背景
cv2.putText(imgcv, mess, text_loc, cv2.FONT_HERSHEY_SIMPLEX, 1e-3 * h, (255, 255, 255), thick // 3)
cv2.imwrite("yolov2_detect_result.jpg", imgcv)
print('YOLO_v2 detection has done!')
######################## 对应【2】:筛选解码后的回归边界框 #########################################
# (1)cut the box:将边界框超出整张图片(0,0)—(415,415)的部分cut掉
def bboxes_cut(bbox_min_max, bboxes):
bboxes = np.copy(bboxes)
bboxes = np.transpose(bboxes)
bbox_min_max = np.transpose(bbox_min_max)
# cut the box
bboxes[0] =
|
np.maximum(bboxes[0], bbox_min_max[0])
|
numpy.maximum
|
import os
import cv2
import numpy as np
import codecs
import json
import xml.etree.ElementTree as et
# tree = et.parse("W:\\2021-12-07-01.xml")
# root = tree.getroot()
# for cam in root:
# ...
# ------------------------------------------------------------------------
def changeCamName(camname):
"""
Change camera names to match actual image data.
:param camname: camera name with "bottom", "top" or "colour"
:return:
"""
realcamname = camname.replace("bottom", "primary")
realcamname = realcamname.replace("top", "secondary")
return realcamname.replace("colour", "texture")
# ------------------------------------------------------------------------
def calibrate(objpoints, imgpoints, img, xml_cam):
"""
Calibrate camera with a set of calibration images. Parameters from:
https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d
:param objpoints:
:param imgpoints:
:param img:
:return:
"""
imgpoints = np.asarray(imgpoints, dtype=np.float32)
assert imgpoints.shape[0] == objpoints.shape[0]
assert img is not None
# initial guess for intrinsic matrix and distCoeffs
intrmatrix = np.array([[6700.0, 0.0, 800.0], [0.0, 6700.0, 600.0], [0.0, 0.0, 1.0]], dtype=np.float32)
# intrmatrix = np.array([float(x[1]) for x in xml_cam[0].getchildren()[0].items()], dtype=np.float32).reshape((3,3))
# distCoeffs = np.array([float(x[1]) for x in xml_cam[0].getchildren()[1].items()] + [0.0], dtype=np.float32)
distCoeffs = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[::-1], intrmatrix, distCoeffs,
flags= cv2.CALIB_ZERO_TANGENT_DIST |
cv2.CALIB_USE_INTRINSIC_GUESS |
cv2.CALIB_FIX_K1 | cv2.CALIB_FIX_K2
| cv2.CALIB_FIX_K3)
print(f"mtx: {mtx}")
print(f"dist: {dist}")
print(f"rvecs: {rvecs[1]}")
print(f"tvecs: {tvecs[1]}")
rmat = np.asarray([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]], dtype=np.float64)
cv2.Rodrigues(rvecs[0], rmat)
print(f"rmat: {rmat}")
if ret:
return {"intrinsic": mtx.tolist(), "rotation": rmat.tolist(),
"translation": tvecs[0].tolist(), "distortion": dist.tolist()}
# ------------------------------------------------------------------------
# 3d object points are known: 10x10 circle grid target, 2cm offsets. 3D origin is at the target's center crosshair.
objpoints = [] # 3d point in real world space
for y in range(9, -10, -2):
x = [x for x in range(-9, 10, 2)]
objpoints.append(list([list(a) for a in zip(x, [y] * 10, [0] * 10)]))
objpoints = [i for sub in objpoints for i in sub]
print(f"objpoints:\n{objpoints}")
objpoints = [objpoints] * 18
objpoints = np.asarray(objpoints, dtype=np.float32)
# Blob detector for better circle center detection
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 1
params.minCircularity = 0.05
params.minConvexity = 0.50
blobdetector = cv2.SimpleBlobDetector_create(params)
calibdict = {}
imgpoints = [] # 2d points in image plane.
img = None
prevcamname = "pod1primary"
path = "C:/Users/Henkka/Projects/invrend-fpc/data/calibration/combined/extracted"
# path = "C:/Users/Henkka/Projects/invrend-fpc/data/calibration/2021-07-01"
# path = r"\\rmd.remedy.fi\Capture\System\RAW\Calibrations\2021-12-07"
images = os.listdir(path)
# DI xmls
tree = et.parse(r"C:\Users\Henkka\Projects\invrend-fpc\data\cube\20220310\2021-12-07-01.dicx")
xml_cams = tree.getroot()[3]
# different threshold values to try to account for reflections in the calibration target
thresholds = [200, 190, 180, 170, 160, 150, 140]
# for root, dirs, files in os.walk(path):
for fname in images:
camname = fname.split("_")[0]
# print(f"cam: {fname}")
# assume images are processed in camera order
if camname != prevcamname:
# all images from one camera have been processed
realcamname = changeCamName(prevcamname)
print("Calibrating...")
calibdict[realcamname] = calibrate(objpoints, imgpoints, img, [x for x in xml_cams if x.get('name') == camname+"_0001"][0])
imgpoints = []
# read image as grayscale
# invert, blur, and threshold filter for easier circle detection
img = cv2.imread(f"{path}/{fname}", flags=cv2.IMREAD_GRAYSCALE)
img = cv2.bitwise_not(img)
kernel = np.ones((3, 3), np.float32) / 25
preimg = cv2.filter2D(img, -1, kernel)
# Find the circle centers
# TODO: one could also do blobDetector.detect() and drawKeypoints() before findCirclesGrid() for easier detection
for thres in thresholds:
ret, img = cv2.threshold(img, thres, 255, cv2.THRESH_BINARY)
cv2.imshow('thresh', img)
cv2.waitKey(100)
ret, centers = cv2.findCirclesGrid(img,
|
np.asarray([10, 10])
|
numpy.asarray
|
#!/usr/bin/env python
# Copyright (c) 2017 The Board of Trustees of the University of Illinois
# All rights reserved.
#
# Developed by: <NAME>, <NAME>, <NAME>
# NCSA Gravity Group
# National Center for Supercomputing Applications
# University of Illinois at Urbana-Champaign
# http://gravity.ncsa.illinois.edu/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal with the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimers.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the documentation
# and/or other materials provided with the distribution.
#
# Neither the names of the National Center for Supercomputing Applications,
# University of Illinois at Urbana-Champaign, nor the names of its
# contributors may be used to endorse or promote products derived from this
# Software without specific prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# WITH THE SOFTWARE.
# Based off of SimulationTools Mathematica Package
# http://www.simulationtools.org/
import numpy as np
import glob
import os
import h5py
import string
import math
import sys
import warnings
import scipy.optimize
import scipy.interpolate
#-----Function Definitions-----#
#Function used in getting psi4 from simulation
def joinDsets(dsets):
"""joints multiple datasets which each have a
time like first column, eg iteration number of
time. Removes overlapping segments, keeping the
last segment.
dsets = iterable of 2d array like objects with data"""
# joins multiple datasets of which the first column is assumed to be "time"
if(not dsets):
return None
length = 0
for d in dsets:
length += len(d)
newshape = list(dsets[0].shape)
newshape[0] = length
dset = np.empty(shape=newshape, dtype=dsets[0].dtype)
usedlength = 0
for d in dsets:
insertpointidx = np.where(dset[0:usedlength,0] >= d[0,0])
if(insertpointidx[0].size):
insertpoint = insertpointidx[0][0]
else:
insertpoint = usedlength
newlength = insertpoint+len(d)
dset[insertpoint:newlength] = d
usedlength = newlength
return dset[0:usedlength]
#Function used in getting psi4 from simulation
def loadHDF5Series(nameglob, series):
"""load HDF5 timeseries data and concatenate the content of multiple files
nameglob = a shell glob that matches all files to be loaded,
files are sorted alphabetically
series = HDF5 dataset name of dataset to load from files"""
dsets = list()
for fn in sorted(glob.glob(nameglob)):
fh = h5py.File(fn)
dsets.append(fh[series])
return joinDsets(dsets)
#Convert radial to tortoise coordinates
def RadialToTortoise(r, M):
"""
Convert the radial coordinate to the tortoise coordinate
r = radial coordinate
M = ADMMass used to convert coordinate
return = tortoise coordinate value
"""
return r + 2. * M * math.log( r / (2. * M) - 1.)
#Convert modified psi4 to strain
def psi4ToStrain(mp_psi4, f0):
"""
Convert the input mp_psi4 data to the strain of the gravitational wave
mp_psi4 = Weyl scalar result from simulation
f0 = cutoff frequency
return = strain (h) of the gravitational wave
"""
#TODO: Check for uniform spacing in time
t0 = mp_psi4[:, 0]
list_len = len(t0)
complexPsi = np.zeros(list_len, dtype=np.complex_)
complexPsi = mp_psi4[:, 1]+1.j*mp_psi4[:, 2]
freq, psif = myFourierTransform(t0, complexPsi)
dhf = ffi(freq, psif, f0)
hf = ffi(freq, dhf, f0)
time, h = myFourierTransformInverse(freq, hf, t0[0])
hTable = np.column_stack((time, h))
return hTable
#Fixed frequency integration
# See https://arxiv.org/abs/1508.07250 for method
def ffi(freq, data, f0):
"""
Integrates the data according to the input frequency and cutoff frequency
freq = fourier transform frequency
data = input on which ffi is performed
f0 = cutoff frequency
"""
f1 = f0/(2*math.pi)
fs = freq
gs = data
mask1 = (np.sign((fs/f1) - 1) + 1)/2
mask2 = (np.sign((-fs/f1) - 1) + 1)/2
mask = 1 - (1 - mask1) * (1 - mask2)
fs2 = mask * fs + (1-mask) * f1 * np.sign(fs - np.finfo(float).eps)
new_gs = gs/(2*math.pi*1.j*fs2)
return new_gs
#Fourier Transform
def myFourierTransform(t0, complexPsi):
"""
Transforms the complexPsi data to frequency space
t0 = time data points
complexPsi = data points of Psi to be transformed
"""
psif = np.fft.fft(complexPsi, norm="ortho")
l = len(complexPsi)
n = int(math.floor(l/2))
newpsif = psif[l-n:]
newpsif = np.append(newpsif, psif[:l-n])
T = np.amin(np.diff(t0))*l
freq = range(-n, l-n)/T
return freq, newpsif
#Inverse Fourier Transform
def myFourierTransformInverse(freq, hf, t0):
l = len(hf)
n = int(math.floor(l/2))
newhf = hf[n:]
newhf = np.append(newhf, hf[:n])
amp = np.fft.ifft(newhf, norm="ortho")
df = np.amin(np.diff(freq))
time = t0 + range(0, l)/(df*l)
return time, amp
def angular_momentum(x, q, m, chi1, chi2, LInitNR):
eta = q/(1.+q)**2.
m1 = (1.+(1.-4.*eta)**0.5)/2.
m2 = m - m1
S1 = m1**2. * chi1
S2 = m2**2. * chi2
Sl = S1+S2
Sigmal = S2/m2 - S1/m1
DeltaM = m1 - m2
mu = eta
nu = eta
GammaE = 0.5772156649;
e4 = -(123671./5760.)+(9037.* math.pi**2.)/1536.+(896.*GammaE)/15.+(-(498449./3456.)+(3157.*math.pi**2.)/576.)*nu+(301. * nu**2.)/1728.+(77.*nu**3.)/31104.+(1792. *math.log(2.))/15.
e5 = -55.13
j4 = -(5./7.)*e4+64./35.
j5 = -(2./3.)*e5-4988./945.-656./135. * eta;
a1 = -2.18522;
a2 = 1.05185;
a3 = -2.43395;
a4 = 0.400665;
a5 = -5.9991;
CapitalDelta = (1.-4.*eta)**0.5
l = (eta/x**(1./2.)*(
1. +
x*(3./2. + 1./6.*eta) +
x**2. *(27./8. - 19./8.*eta + 1./24.*eta**2.) +
x**3. *(135./16. + (-6889./144. + 41./24. * math.pi**2.)*eta + 31./24.*eta**2. + 7./1296.*eta**3.) +
x**4. *((2835./128.) + eta*j4 - (64.*eta*math.log(x)/3.))+
x**5. *((15309./256.) + eta*j5 + ((9976./105.) + (1312.*eta/15.))*eta*math.log(x))+
x**(3./2.)*(-(35./6.)*Sl - 5./2.*DeltaM* Sigmal) +
x**(5./2.)*((-(77./8.) + 427./72.*eta)*Sl + DeltaM* (-(21./8.) + 35./12.*eta)*Sigmal) +
x**(7./2.)*((-(405./16.) + 1101./16.*eta - 29./16.*eta**2.)*Sl + DeltaM*(-(81./16.) + 117./4.*eta - 15./16.*eta**2.)*Sigmal) +
(1./2. + (m1 - m2)/2. - eta)* chi1**2. * x**2. +
(1./2. + (m2 - m1)/2. - eta)* chi2**2. * x**2. +
2.*eta*chi1*chi2*x**2. +
((13.*chi1**2.)/9. +
(13.*CapitalDelta*chi1**2.)/9. -
(55.*nu*chi1**2.)/9. -
29./9.*CapitalDelta*nu*chi1**2. +
(14.*nu**2. *chi1**2.)/9. +
(7.*nu*chi1*chi2)/3. +
17./18.* nu**2. * chi1 * chi2 +
(13.* chi2**2.)/9. -
(13.*CapitalDelta*chi2**2.)/9. -
(55.*nu*chi2**2.)/9. +
29./9.*CapitalDelta*nu*chi2**2. +
(14.*nu**2. * chi2**2.)/9.)
* x**3.))
return l - LInitNR
#Get cutoff frequency
def getCutoffFrequency(sim_name):
"""
Determine cutoff frequency of simulation
sim_name = string of simulation
return = cutoff frequency
"""
filename = main_dir+"/output-0000/%s.par" % (sim_name)
with open(filename) as file:
contents = file.readlines()
for line in contents:
line_elems = line.split(" ")
if(line_elems[0] == "TwoPunctures::par_b"):
par_b = float(line_elems[-1])
if(line_elems[0] == "TwoPunctures::center_offset[0]"):
center_offset = float(line_elems[-1])
if(line_elems[0] == "TwoPunctures::par_P_plus[1]"):
pyp = float(line_elems[-1])
if(line_elems[0] == "TwoPunctures::par_P_minus[1]"):
pym = float(line_elems[-1])
if(line_elems[0] == "TwoPunctures::target_M_plus"):
m1 = float(line_elems[-1])
if(line_elems[0] == "TwoPunctures::target_M_minus"):
m2 = float(line_elems[-1])
if(line_elems[0] == "TwoPunctures::par_S_plus[2]"):
S1 = float(line_elems[-1])
if(line_elems[0] == "TwoPunctures::par_S_minus[2]"):
S2 = float(line_elems[-1])
xp = par_b + center_offset
xm = -1*par_b + center_offset
LInitNR = xp*pyp + xm*pym
M = m1+m2
q = m1/m2
chi1 = S1/math.pow(m1, 2.)
chi2 = S2/math.pow(m2, 2.)
# .014 is the initial guess for cutoff frequency
omOrbPN = scipy.optimize.fsolve(angular_momentum, .014, (q, M, chi1, chi2, LInitNR))[0]
omOrbPN = omOrbPN**(3./2.)
omGWPN = 2. * omOrbPN
omCutoff = 0.75 * omGWPN
return omCutoff
#Get Energy
def get_energy(sim):
"""
Save the energy radiated energy
sim = string of simulation
"""
python_strain = np.loadtxt("./Extrapolated_Strain/"+sim+"/"+sim+"_radially_extrapolated_strain_l2_m2.dat")
val = np.zeros(len(python_strain))
val = val.astype(np.complex_)
cur_max_time = python_strain[0][0]
cur_max_amp = abs(pow(python_strain[0][1], 2))
for i in python_strain[:]:
cur_time = i[0]
cur_amp = abs(pow(i[1], 2))
if(cur_amp>cur_max_amp):
cur_max_amp = cur_amp
cur_max_time = cur_time
max_idx = 0
for i in range(0, len(python_strain[:])):
if(python_strain[i][1] > python_strain[max_idx][1]):
max_idx = i
paths = glob.glob("./Extrapolated_Strain/"+sim+"/"+sim+"_radially_extrapolated_strain_l[2-4]_m*.dat")
for path in paths:
python_strain = np.loadtxt(path)
t = python_strain[:, 0]
t = t.astype(np.complex_)
h = python_strain[:, 1] + 1j * python_strain[:, 2]
dh = np.zeros(len(t), dtype=np.complex_)
for i in range(0, len(t)-1):
dh[i] = ((h[i+1] - h[i])/(t[i+1] - t[i]))
dh[len(t)-1] = dh[len(t)-2]
dh_conj = np.conj(dh)
prod = np.multiply(dh, dh_conj)
local_val = np.zeros(len(t))
local_val = local_val.astype(np.complex_)
for i in range(0, len(t)):
local_val[i] = np.trapz(prod[:i], x=(t[:i]))
val += local_val
val *= 1/(16 * math.pi)
np.savetxt("./Extrapolated_Strain/"+sim+"/"+sim+"_radially_extrapolated_energy.dat", val)
#Get angular momentum
def get_angular_momentum(python_strain):
"""
Save the energy radiated angular momentum
sim = string of simulation
"""
python_strain = np.loadtxt("./Extrapolated_Strain/"+sim+"/"+sim+"_radially_extrapolated_strain_l2_m2.dat")
val = np.zeros(len(python_strain))
val = val.astype(np.complex_)
cur_max_time = python_strain[0][0]
cur_max_amp = abs(pow(python_strain[0][1], 2))
for i in python_strain[:]:
cur_time = i[0]
cur_amp = abs(pow(i[1], 2))
if(cur_amp>cur_max_amp):
cur_max_amp = cur_amp
cur_max_time = cur_time
max_idx = 0
for i in range(0, len(python_strain[:])):
if(python_strain[i][1] > python_strain[max_idx][1]):
max_idx = i
paths = glob.glob("./Extrapolated_Strain/"+sim+"/"+sim+"_radially_extrapolated_strain_l[2-4]_m*.dat")
for path in paths:
python_strain = np.loadtxt(path)
t = python_strain[:, 0]
t = t.astype(np.complex_)
h = python_strain[:, 1] + 1j * python_strain[:, 2]
dh = np.zeros(len(t), dtype=np.complex_)
for i in range(0, len(t)-1):
dh[i] = ((h[i+1] - h[i])/(t[i+1] - t[i]))
dh[len(t)-1] = dh[len(t)-2]
dh_conj = np.conj(dh)
prod =
|
np.multiply(h, dh_conj)
|
numpy.multiply
|
PythonMax = max
from baseClasses import OOArray
from FuncDesigner.multiarray import multiarray
from ooFun import oofun
from constraints import Constraint
from numpy import isscalar, asscalar, ndarray, atleast_1d, asanyarray, array
import numpy as np
from FDmisc import FuncDesignerException
class ooarray(OOArray):
__array_priority__ = 25 # !!! it should exceed oofun.__array_priority__ !!!
_is_array_of_oovars = False
def __new__(self, *args, **kwargs):
#assert len(kwargs) == 0
tmp = args[0] if len(args) == 1 else args
obj = array(tmp, object).view(self)
#if obj.ndim != 1: raise FuncDesignerException('only 1-d ooarrays are implemented now')
#if obj.dtype != object:obj = np.asfarray(obj) #TODO: FIXME !
obj._id = oofun._id
obj.name = 'unnamed_ooarray_%d' % obj._id
oofun._id += 1
return obj
# def __init__(self, *args, **kw):
# self._id = oofun._id
# self.name = 'unnamed_ooarray_%d' % self._id
# oofun._id += 1
__hash__ = lambda self: self._id
def __len__(self):
return self.size
expected_kwargs = set(('tol', 'name'))
def __call__(self, *args, **kwargs):
#if self.dtype != object: return self.view(ndarray)
# TODO: give different names for each element while assigning name to ooarray
expected_kwargs = self.expected_kwargs
#if not set(kwargs.keys()).issubset(expected_kwargs):
#raise FuncDesignerException('Unexpected kwargs: should be in '+str(expected_kwargs)+' got: '+str(kwargs.keys()))
for elem in expected_kwargs:
if elem in kwargs:
setattr(self, elem, kwargs[elem])
if len(args) > 1: raise FuncDesignerException('No more than single argument is expected')
if len(args) == 0:
if len(kwargs) == 0: raise FuncDesignerException('You should provide at least one argument')
#return self
if len(args) != 0 and isinstance(args[0], str):
self.name = args[0]
for i, elem in enumerate(self.view(ndarray)):
if isinstance(elem, oofun):
elem(self.name + '_' + str(i))
args = args[1:]
if len(args) == 0:
return self
#tmp = asarray([asscalar(asarray(self[i](*args, **kwargs))) if isinstance(self[i], oofun) else self[i] for i in range(self.size)])
if self.size == 1 and type(self.item()) == oofun:
return self.item()(*args, **kwargs)
# TODO: get rid of self in args[0]
if self._is_array_of_oovars and isinstance(args[0], dict) and self in args[0] and len(args) == 1 and len(kwargs) == 0:
return args[0][self]
Tmp = [self[i](*args, **kwargs) if isinstance(self[i], oofun) else self[i] for i in range(self.size)]
tmp = asanyarray(Tmp)
if np.any([isinstance(elem, multiarray) for elem in Tmp]):
tmp = tmp.T.view(multiarray)
if tmp.ndim == 2 or tmp.dtype != object:
return tmp
else:
#tmp = tmp.flatten()
return ooarray(tmp)
def expression(self, *args, **kw):
return str([elem.expression(*args, **kw) if isinstance(elem, oofun) else str(elem) for elem in self.view(ndarray)])
def __getattr__(self, attr):
if attr == 'dep':
r = set.union(*[elem.dep for elem in self.view(ndarray) if isinstance(elem, (oofun, ooarray))])
self.dep = r
return r
elif attr == 'expr':
return str([elem.expr if isinstance(elem, oofun) else str(elem) for elem in self.view(ndarray)])
else:
raise AttributeError('incorrect attribute of ooarray')
def getOrder(self, *args, **kw):
return PythonMax([0] + [elem.getOrder(*args, **kw) for elem in self.view(ndarray) if isinstance(elem, (oofun, ooarray))])
def __mul__(self, other):
if self.size == 1:
return ooarray(asscalar(self)*other)
elif isscalar(other):
# TODO: mb return mere ooarray(self.view(ndarray)*other) or other.view(ndarray)
return ooarray(self.view(ndarray)*other if self.dtype != object else [self[i]*other for i in range(self.size)])
elif isinstance(other, oofun):
hasSize = 'size' in dir(other)
if not hasSize:
# print('''
# FuncDesigner warning:
# to perform the operation
# (ooarray multiplication on oofun)
# oofun size should be known.
# Assuming oofun size is 1,
# the value is ascribed to the oofun attributes.
# Handling of the issue is intended to be
# enhanced in future.''')
other.size = 1
#raise FuncDesignerException('to perform the operation oofun size should be known')
if other.size == 1:
if any([isinstance(elem, oofun) for elem in atleast_1d(self)]):
#if self.dtype == object:
s = atleast_1d(self)
return ooarray([s[i]*other for i in range(self.size)])
else:
return ooarray(self*other)
else: # other.size > 1
# and self.size != 1
s, o = atleast_1d(self), atleast_1d(other)
return ooarray([s[i]*o[i] for i in range(self.size)])
elif isinstance(other, ndarray):
# TODO: mb return mere ooarray(self.view(ndarray)*other)?or other.view(ndarray)
return ooarray(self*asscalar(other) if other.size == 1 else [self[i]*other[i] for i in range(other.size)])
elif type(other) in (list, tuple):
r = self * array(other)
return r
else:
raise FuncDesignerException('bug in multiplication')
def __div__(self, other):
if self.size == 1:
return asscalar(self)/other
elif isscalar(other) or (isinstance(other, ndarray) and other.size in (1, self.size)):
return self * (1.0/other)
elif isinstance(other, oofun):
if self.dtype != object:
return self.view(ndarray) / other
else:
s = atleast_1d(self)
return ooarray([s[i] / other for i in range(self.size)])
elif isinstance(other, ooarray):
if self.dtype != object:
return self.view(ndarray) / other.view(ndarray)
else:
# TODO: mb return mere ooarray(self.view(ndarray) / other)? or other.view(ndarray)
s, o = atleast_1d(self), atleast_1d(other)
return ooarray([s[i] / o[i] for i in range(self.size)])
else:
raise FuncDesignerException('unimplemented yet')
__truediv__ = __div__
__floordiv__ = __div__
def __rdiv__(self, other):
if self.size == 1:
return other / asscalar(self)
return ooarray([1.0 / elem for elem in self.view(ndarray)]) * other
__rtruediv__ = __rdiv__
def __add__(self, other):
if isinstance(other, list):
other = ooarray(other)
if isscalar(other) or (isinstance(other, ndarray) and other.size in (1, self.size)):
r = ooarray(self.view(ndarray) + other)
elif isinstance(other, oofun):
if self.dtype != object:
r = self.view(ndarray) + other
else:
s = atleast_1d(self)
r = ooarray([s[i] + other for i in range(self.size)])
elif isinstance(other, ndarray):
if self.dtype != object:
r = self.view(ndarray) + other.view(ndarray)
elif self.size == 1:
r = other + asscalar(self)
else:
# TODO: mb return mere ooarray(self.view(ndarray) + other) or ooarray(self.view(ndarray) + other.view(ndarray))?
r = ooarray([self[i] + other[i] for i in range(self.size)])
else:
raise FuncDesignerException('unimplemented yet')
if isinstance(r, ndarray) and r.size == 1:
r = asscalar(r)
return r
# # TODO: check why it doesn't work with oofuns
# def __radd__(self, other):
# return self + other
#
# def __rmul__(self, other):
# return self * other
__radd__ = __add__
__rmul__ = __mul__
# TODO : fix it
# def __rdiv__(self, other):
# return self * other
def __pow__(self, other):
if isinstance(other, ndarray) and other.size > 1 and self.size > 1:
return ooarray([self[i]**other[i] for i in range(self.size)])
Self = atleast_1d(self.view(ndarray))
if any(isinstance(elem, (ooarray, oofun)) for elem in Self):
#if self.dtype == object:
return ooarray([elem**other for elem in Self])
# TODO: is this part of code trigger any time?
return self.view(ndarray)**other
def __rpow__(self, other):
if
|
isscalar(other)
|
numpy.isscalar
|
"""
Model class to be used together with an existing/"physical" model to yield a full propagation
model.
Will also be combined with case specific parameters.
"""
import scipy.sparse as sps
import time
import numpy as np
import porepy as pp
import logging
from typing import Dict, Any
logger = logging.getLogger(__name__)
class TensilePropagation(pp.ConformingFracturePropagation):
"""
One more round of cleaning remains for this and related classes!
EK: On my reading, the only active function in this class is _candidate_faces(),
which is a simplification of the corresponding method in the superclass.
If correct, I suggest we try to integrate the present function as an option
in the superclass, and drop this extra class.
"""
def _sorted_propagation_faces(self, g_l: pp.Grid, d_l: Dict) -> np.ndarray:
parameters_l = d_l[pp.PARAMETERS][self.mechanics_parameter_key]
faces = parameters_l["propagate_faces"].nonzero()[0]
faces = faces[g_l.tags["tip_faces"][faces]]
K_equivalent = d_l[pp.PARAMETERS][self.mechanics_parameter_key][
"SIFs_equivalent"
]
ind = np.argsort(K_equivalent[faces])
faces = np.atleast_1d(faces[ind][::-1])
return faces
def _pick_propagation_face(
self,
g_h: pp.Grid,
g_l: pp.Grid,
data_h: Dict,
data_l: Dict,
data_edge: Dict,
face_l,
neighbor_threshold: int = 0,
force_neighbors: bool = False,
) -> None:
"""
Pick out which matrix face to split for a fracture faces tagged as propagating
using the precomputed propagation angle.
Workflow:
Check that the face_l is permissible
Identify the corresponding edges_h (= nodes if self.Nd==2)
The edges' faces_h are candidates for propagation
Pick the candidate based on the propagation angle
Parameters
----------
g_h : pp.Grid
Higer-dimensional grid.
g_l : pp.Grid
Lower-dimensional grid.
data_h : Dict
Data dictionary corresponding to g_h.
data_l : Dict
Data dictionary corresponding to g_l.
data_edge : Dict
Data dictionary corresponding to the edge formed by g_h and g_l.
Returns
-------
None
DESCRIPTION.
Stores the matrix "propagation_face_map" identifying pairs of
lower- and higherdimensional faces. During grid updates, the former will receive
a new neighbour cell and the latter will be split.
"""
nd = self.Nd
# EK: I am almost sure this method is not used, and can be deleted.
# Leave a breakpoint here, and take action if ever hit it.
# NOTE: If we hit it, the signature of this method is likely wrong (at least it
# is different from the corresponding method in the parent class), so we should
# revise the implementation.
print("The method was used after all. Remove breakpoint, do QC")
breakpoint()
face_l: np.ndarray = face_l[g_l.tags["tip_faces"][face_l]]
if face_l.size == 0:
face_faces = sps.csr_matrix((g_l.num_faces, g_h.num_faces))
data_edge["propagation_face_map"]: sps.spmatrix = face_faces
return
fracture_faces_h = g_h.tags["fracture_faces"].nonzero()[0]
tip_faces_l = g_l.tags["tip_faces"].nonzero()[0]
tip_edges_h = tip_faces_l_to_edges_h(g_l, tip_faces_l, g_h)
tip_edges_h.sort(axis=0)
fracture_edges_h = np.empty((g_l.dim, 0), dtype=int)
for frac_face_h in g_h.tags["fracture_faces"].nonzero()[0]:
for frac_e_h in np.sort(edges_of_face(g_h, frac_face_h), axis=0).T:
frac_e_h = frac_e_h.reshape((g_l.dim, 1))
is_found = np.isin(fracture_edges_h, frac_e_h)
is_found = np.any(np.all(is_found))
if not is_found or fracture_edges_h.size == 0:
fracture_edges_h = np.hstack((fracture_edges_h, frac_e_h))
edge_h = tip_faces_l_to_edges_h(g_l, face_l, g_h)
fracture_nodes_h = np.unique(
g_h.face_nodes[:, g_h.tags["fracture_faces"]].nonzero()[0]
)
faces_h_to_split = np.empty(0, dtype=int)
faces_l_to_split = np.empty(0, dtype=int)
candidate_faces_h, faces_l_loc = self._candidate_faces(
g_h,
edge_h,
g_l,
face_l,
tip_edges_h,
fracture_edges_h,
fracture_faces_h,
neighbor_threshold,
force_neighbors,
)
if force_neighbors:
face_h = candidate_faces_h
else:
faces_l_loc = np.empty(0, dtype=int)
## Pick the right candidate:
# Direction of h-dim face centers from the tip
tip_coords = np.reshape(g_l.face_centers[:nd, face_l], (nd, 1))
face_center_vecs = g_h.face_centers[:nd, candidate_faces_h] - tip_coords
face_center_vecs = face_center_vecs / np.linalg.norm(
face_center_vecs, axis=0
)
# Propagation vector, with sign assuring a positive orientation
# of the basis
propagation_vector = self._propagation_vector(g_l, data_l, face_l)
# Pick the candidate closest to the propagation point,
# i.e. smallest angle between propagation vector and face center vector
distances = pp.geometry.distances.point_pointset(
propagation_vector, face_center_vecs
)
ind = np.argsort(distances)
# There might be no candidate faces left after imposition of restriction
# of permissible candidates
if candidate_faces_h.size > 0:
face_h = candidate_faces_h[ind[0]]
edges_of_new_face = edges_of_face(g_h, face_h)
edges_of_new_face.sort(axis=0)
faces_l_loc = np.empty(0, dtype=int)
for edge in edges_of_new_face.T: # sort!
# Remove from tip edges if it was a tip, add if not
ind = np.all(np.isin(tip_edges_h, edge), axis=0)
if np.any(ind):
tip_edges_h = tip_edges_h[:, ~ind]
face_l_loc = tip_edge_h_to_face_l(g_l, g_h, edge)
if (
face_l_loc.size > 0
): # the else is a tip_edge_h arisen in this propagation step, and does not correspond to a tip to be opened
faces_l_loc = np.hstack((faces_l_loc, face_l_loc))
else:
tip_edges_h = np.hstack(
(tip_edges_h, edge.reshape((g_l.dim, 1)))
)
fracture_edges_h = np.hstack(
(fracture_edges_h, edge.reshape((g_l.dim, 1)))
)
n_neigh = faces_l_loc.size
if n_neigh > neighbor_threshold:
faces_h_to_split = np.hstack((faces_h_to_split, np.tile(face_h, n_neigh)))
faces_l_to_split = np.hstack((faces_l_to_split, faces_l_loc))
fracture_faces_h = np.hstack((fracture_faces_h, face_h))
face_faces = sps.csr_matrix(
(np.ones(faces_l_to_split.shape), (faces_l_to_split, faces_h_to_split)),
shape=(g_l.num_faces, g_h.num_faces),
)
data_edge["propagation_face_map"] = face_faces
def _candidate_faces(
self, g_h: pp.Grid, edge_h, g_l: pp.Grid, face_l: np.ndarray
) -> np.ndarray:
"""For a given edge (understood to be a fracture tip) in g_h, find the
candidate faces that may be ready for a split.
IMPLEMENTATION NOTE: This method is different from the identically named method
in the parent class ConformingFracturePropagation in that fewer checks are done
on the candidate faces. The present method is assumed to be used in a tensile
fracturing regime, where the propagating fracture stays planar, and where the
grid contains faces that fit this propagating geometry. In comparison, the method
in the parent class aims at non-planar fractures, and thus needs to do much more
checks to try to keep a reasonable fracture geometry also after propagation.
"""
def faces_of_edge(g: pp.Grid, e: np.ndarray) -> np.ndarray:
"""
Obtain indices of all faces sharing an edge.
Parameters
----------
g : pp.Grid
e : np.ndarray
The edge.
Returns
-------
faces : np.ndarray
Faces.
"""
if g.dim == 1:
faces = e
elif g.dim == 2:
faces = g.face_nodes[e].nonzero()[1]
elif g.dim == 3:
f_0 = g.face_nodes[e[0]].nonzero()[1]
f_1 = g.face_nodes[e[1]].nonzero()[1]
faces = np.intersect1d(f_0, f_1)
else:
raise ValueError("Grid dimension should be 1, 2 or 3")
return faces
# Find all the edge's neighboring faces
candidate_faces = faces_of_edge(g_h, edge_h)
# Exclude faces that are on a fracture
are_fracture = g_h.tags["fracture_faces"][candidate_faces]
candidate_faces = candidate_faces[np.logical_not(are_fracture)]
return candidate_faces
class THMPropagationModel(TensilePropagation):
def __init__(self, params):
super().__init__(params)
pp.THM.__init__(self, params)
# Set additional case specific fields
self.set_fields(params)
## THM + propagation specific methods
def _initialize_new_variable_values(
self, g: pp.Grid, d: Dict[str, Any], var: str, dofs: Dict[str, int]
) -> np.ndarray:
"""
Overwrite the corresponding method in superclasses: The pressure variable is
initialized to the atmospheric pressure. Apart from this, all other variables
are initialized to zero.
Parameters
----------
g : pp.Grid
Grid.
d : Dict
Data dictionary.
var : str
Name of variable.
dofs : int
Number of DOFs per cell (or face/node).
Returns
-------
vals : np.ndarray
Values for the new DOFs.
"""
cell_dof = dofs.get("cells")
n_new = d["cell_index_map"].shape[0] - d["cell_index_map"].shape[1]
if var == self.scalar_variable: # type: ignore
vals = (
np.ones(n_new * cell_dof) * pp.ATMOSPHERIC_PRESSURE / self.scalar_scale # type: ignore
)
else:
vals = np.zeros(n_new * cell_dof)
return vals
def _map_variables(self, solution: np.ndarray) -> np.ndarray:
"""
In addition to super's mapping an initialization of all primary variables,
map the face values (darcy_fluxes and stored boundary conditions) and
quantities to be exported.
Parameters
----------
solution : np.ndarray
Solution vector from before propagation.
Returns
-------
new_solution : np.ndarray
Mapped solution vector with initialized new DOFs.
"""
# Map solution, and initialize for newly defined dofs
new_solution = super()._map_variables(solution)
self._map_face_values()
return new_solution
def _map_face_values(self) -> None:
"""
Maps the following face values:
old_bc_values, used by DivU
darcy_fluxes, used by Upwind
Returns
-------
None.
"""
# g_h Darcy fluxes are first copied to both the split faces, then mapped
# to the mortar grid and finally removed from d_h.
# In d_l, we initialize zero fluxes on the new faces, since there was
# no flux across fracture tips previous to propagation.
t_key = self.temperature_parameter_key
keys = (
self.mechanics_parameter_key,
self.mechanics_temperature_parameter_key,
)
gb = self.gb
for g, d in gb:
face_map: sps.spmatrix = d["face_index_map"]
mapping = sps.kron(face_map, sps.eye(self.Nd))
# Map darcy fluxes
d[pp.PARAMETERS][t_key]["darcy_flux"] = (
face_map * d[pp.PARAMETERS][t_key]["darcy_flux"]
)
if g.dim == self.Nd:
# Duplicate darcy_fluxes for new faces ("other" side of new fracture)
new_faces = d["new_faces"]
old_faces = d["split_faces"]
d[pp.PARAMETERS][t_key]["darcy_flux"][new_faces] = -d[pp.PARAMETERS][
t_key
]["darcy_flux"][old_faces]
# Map bc values
for key in keys:
old_vals = d[pp.PARAMETERS][key]["bc_values"]
new_vals = mapping * old_vals
new_ind = pp.fvutils.expand_indices_nd(d["new_faces"], self.Nd)
if new_ind.size > 0:
old_ind = pp.fvutils.expand_indices_nd(
d["split_faces"], self.Nd
)
new_vals[new_ind] = old_vals[old_ind]
d[pp.STATE][key]["bc_values"] = new_vals
for e, d in gb.edges():
cell_map: sps.spmatrix = d["cell_index_map"]
mg: pp.MortarGrid = d["mortar_grid"]
d[pp.PARAMETERS][t_key]["darcy_flux"] = (
cell_map * d[pp.PARAMETERS][t_key]["darcy_flux"]
)
g_l, g_h = gb.nodes_of_edge(e)
d_h = gb.node_props(g_h)
new_ind = self._new_dof_inds(cell_map)
fluxes_h: np.ndarray = d_h[pp.PARAMETERS][t_key]["darcy_flux"]
new_mortar_fluxes = mg.primary_to_mortar_int() * fluxes_h
d[pp.PARAMETERS][t_key]["darcy_flux"] += new_mortar_fluxes
g = self._nd_grid()
d = gb.node_props(g)
d[pp.PARAMETERS][t_key]["darcy_flux"][g.tags["fracture_faces"]] = 0
def before_newton_loop(self):
self.convergence_status = False
self._iteration = 0
def update_discretizations(self):
# For the moment, do a full rediscretization. A more targeted approach
# should be possible.
self._minimal_update_discretization()
def before_newton_iteration(self) -> None:
"""Rediscretize non-linear terms.
QUESTION: Should the parent be updated?
"""
# First update parameters, then discretize all terms except those treated
# by mpfa and mpsa in the highest dimension.
# NOTE: We may end up unnecessarily rediscretizing a few terms, but the cost
# of this is insignificant.
self._iteration += 1
## First update parameters.
# The Darcy fluxes were updated right after the previous Newton iteration
# or in self.prepare_for_simulation(), thus no need to update these here.
# Update apertures and specific volumes (e.g. compute from displacement jumps).
# Store as iterate information.
self.update_all_apertures(to_iterate=True)
# Update parameters.
# Depending on the implementation of set_parameters, this can for instance
# update permeability as a function of aperture. Similarly, various other
# quantities can be updated.
self.set_parameters()
###
# With updated parameters (including Darcy fluxes), we can now discretize
# non-linear terms.
# Discretize everything except terms relating to poro-elasticity and
# diffusion (that is, discretize everything not handled by mpfa or mpsa).
# NOTE: Accumulation terms in self.Nd could also have been excluded.
term_list = [
"!mpsa",
"!stabilization",
"!div_u",
"!grad_p",
"!diffusion",
]
filt = pp.assembler_filters.ListFilter(term_list=term_list)
# NOTE: No grid filter here, in pratice, all terms on lower-dimensional grids
# (apart from diffusion) are discretized here, so is everything on the mortars
self.assembler.discretize(filt=filt)
# Discretize diffusion terms on lower-dimensional grids.
for dim in range(self.Nd):
grid_list = self.gb.grids_of_dimension(dim)
if len(grid_list) == 0:
continue
filt = pp.assembler_filters.ListFilter(
grid_list=grid_list,
term_list=["diffusion"],
)
self.assembler.discretize(filt=filt)
def after_propagation_loop(self):
"""
TODO: Purge.
Returns
-------
None.
"""
ValueError("should not call this")
def after_newton_iteration(self, solution: np.ndarray) -> None:
super().after_newton_iteration(solution)
# Update Darcy fluxes based on the newly converged pressure solution.
# NOTE: For consistency between the discretization and solution, this is
# done before updates to permeability or geometry (by fracture propagation).
self.compute_fluxes()
def after_newton_convergence(self, solution, errors, iteration_counter):
"""Propagate fractures if relevant. Update variables and parameters
according to the newly calculated solution.
"""
gb = self.gb
# We export the converged solution *before* propagation:
self.update_all_apertures(to_iterate=True)
self.export_step()
# NOTE: Darcy fluxes were updated in self.after_newton_iteration().
# The fluxes are mapped to the new geometry (and fluxes are assigned for
# newly formed faces) by the below call to self._map_variables().
# Propagate fractures:
# i) Identify which faces to open in g_h
# ii) Split faces in g_h
# iii) Update g_l and the mortar grid. Update projections.
self.evaluate_propagation()
if self.propagated_fracture:
# Update parameters and discretization
for g, d in gb:
if g.dim < self.Nd - 1:
# Should be really careful in this situation. Fingers crossed.
continue
# Transfer information on new faces and cells from the format used
# by self.evaluate_propagation to the format needed for update of
# discretizations (see Discretization.update_discretization()).
# TODO: This needs more documentation.
new_faces = d.get("new_faces", np.array([], dtype=np.int))
split_faces = d.get("split_faces", np.array([], dtype=np.int))
modified_faces = np.hstack((new_faces, split_faces))
update_info = {
"map_cells": d["cell_index_map"],
"map_faces": d["face_index_map"],
"modified_cells": d.get("new_cells", np.array([], dtype=np.int)),
"modified_faces": d.get("new_faces", modified_faces),
}
# d["update_discretization"] = update_info
# Map variables after fracture propagation. Also initialize variables
# for newly formed cells, faces and nodes.
# Also map darcy fluxes and time-dependent boundary values (advection
# and the div_u term in poro-elasticity).
new_solution = self._map_variables(solution)
# Update apertures: Both state (time step) and iterate.
self.update_all_apertures(to_iterate=False)
self.update_all_apertures(to_iterate=True)
# Set new parameters.
self.set_parameters()
# For now, update discretizations will do a full rediscretization
# TODO: Replace this with a targeted rediscretization.
# We may want to use some of the code below (after return), but not all of
# it.
self._minimal_update_discretization()
else:
# No updates to the solution
new_solution = solution
# Finally, use super's method to do updates not directly related to
# fracture propgation
super().after_newton_convergence(new_solution, errors, iteration_counter)
self.adjust_time_step()
# Done!
return
def _minimal_update_discretization(self):
# NOTE: Below here is an attempt at local updates of the discretization
# matrices. For now, these are replaced by a full discretization at the
# begining of each time step.
# EK: Discretization is a pain, because of the flux term.
# The advective term needs an updated (expanded faces) flux term,
# to compute this, we first need to expand discretization of the
# pressure diffusion terms.
# It should be possible to do something smarter here, perhaps compute
# fluxes before splitting, then transfer numbers and populate with other
# values. Or something else.
gb = self.gb
t_0 = time.time()
g_max = gb.grids_of_dimension(gb.dim_max())[0]
grid_list = gb.grids_of_dimension(gb.dim_max() - 1).tolist()
grid_list.append(g_max)
data = gb.node_props(g_max)[pp.DISCRETIZATION_MATRICES]
flow = {}
for key in data["flow"]:
flow[key] = data["flow"][key].copy()
mech = {}
for key in data["mechanics"]:
mech[key] = data["mechanics"][key].copy()
self.discretize_biot(update_after_geometry_change=False)
for e, _ in gb.edges_of_node(g_max):
grid_list.append((e[0], e[1], e))
filt = pp.assembler_filters.ListFilter(
variable_list=[self.scalar_variable, self.mortar_scalar_variable],
term_list=[self.scalar_coupling_term],
grid_list=grid_list,
)
self.assembler.discretize(filt=filt)
grid_list = gb.grids_of_dimension(gb.dim_max() - 1).tolist()
filt = pp.assembler_filters.ListFilter(
term_list=["diffusion", "mass", "source"],
variable_list=[self.scalar_variable],
grid_list=grid_list,
)
# self.assembler.update_discretization(filt=filt)
self.assembler.discretize(filt=filt)
# Now that both variables and discretizations for the flux term have been
# updated, we can compute the fluxes on the new grid.
# self.compute_fluxes()
# Update biot. Should be cheap.
self.copy_biot_discretizations()
# No need to update source term
# Then the temperature discretizations. These are updated, to avoid full mpfa
# in g_max
temperature_terms = ["source", "diffusion", "mass", self.advection_term]
filt = pp.assembler_filters.ListFilter(
grid_list=[self._nd_grid()],
variable_list=[self.temperature_variable],
term_list=temperature_terms,
)
# self.assembler.update_discretization(filt=filt)
self.assembler.discretize(filt=filt)
# Pressure-temperature coupling terms
coupling_terms = [self.s2t_coupling_term, self.t2s_coupling_term]
filt = pp.assembler_filters.ListFilter(
grid_list=[self._nd_grid()],
variable_list=[self.temperature_variable, self.scalar_variable],
term_list=coupling_terms,
)
self.assembler.discretize(filt=filt)
# Build a list of all edges, and all couplings
edge_list = []
for e, _ in self.gb.edges():
edge_list.append(e)
edge_list.append((e[0], e[1], e))
if len(edge_list) > 0:
filt = pp.assembler_filters.ListFilter(grid_list=edge_list)
self.assembler.discretize(filt=filt)
# Finally, discretize terms on the lower-dimensional grids. This can be done
# in the traditional way, as there is no Biot discretization here.
for dim in range(0, self.Nd):
grid_list = self.gb.grids_of_dimension(dim)
if len(grid_list) > 0:
filt = pp.assembler_filters.ListFilter(grid_list=grid_list)
self.assembler.discretize(filt=filt)
logger.info("Rediscretized in {} s.".format(time.time() - t_0))
## Methods specific to this project, but common to (some of) the examples
def set_fields(self, params):
"""
Set various fields to be used in the model.
"""
# We operate on the temperature difference T-T_0, with T in Kelvin
self.T_0_Kelvin = 500
self.background_temp_C = pp.KELKIN_to_CELSIUS(self.T_0_Kelvin)
# Scaling coefficients
self.scalar_scale = 1e7
self.temperature_scale = 1e0
self.file_name = self.params["file_name"]
self.folder_name = self.params["folder_name"]
self.export_fields = [
"u_exp",
"p_exp",
"T_exp",
"traction_exp",
"aperture_exp",
"fluxes_exp",
"cell_centers",
]
# Geometry
def create_grid(self) -> None:
"""
Method that creates the GridBucket of a 2d or 3d domain.
The geometry is defined through the method self._fractures() and the
domain sizes stored in the dictionary self.box.
This method sets self.gb and self.Nd.
"""
# Define fractures
self._fractures()
x = self.box["xmax"] - self.box["xmin"]
y = self.box["ymax"] - self.box["ymin"]
nx = self.params.get("nx", 10)
ny = self.params.get("ny", nx)
ncells = [nx, ny]
dims = [x, y]
if "zmax" in self.box:
ncells.append(self.params.get("nz", nx))
dims.append(self.box["zmax"] - self.box["zmin"])
gb = pp.meshing.cart_grid(self.fracs, ncells, physdims=dims)
pp.contact_conditions.set_projections(gb)
self.gb = gb
self.Nd = self.gb.dim_max()
# Tag the wells
self._tag_well_cells()
self.n_frac = len(gb.grids_of_dimension(self.Nd - 1))
# Numerics
def assign_discretizations(self) -> None:
"""
For long time steps, scaling the diffusive interface fluxes in the non-default
way turns out to actually be beneficial for the condition number.
"""
# Call parent class for disrcetizations.
super().assign_discretizations()
for e, d in self.gb.edges():
d[pp.COUPLING_DISCRETIZATION][self.temperature_coupling_term][e][
1
].kinv_scaling = False
d[pp.COUPLING_DISCRETIZATION][self.scalar_coupling_term][e][
1
].kinv_scaling = True
def assemble_and_solve_linear_system(self, tol):
if getattr(self, "report_A", True):
A, b = self.assembler.assemble_matrix_rhs(add_matrices=False)
for key in A.keys():
logger.debug("{:.2e} {}".format(np.max(np.abs(A[key])), key))
A, b = self.assembler.assemble_matrix_rhs()
prepare_umfpack = self.params.get("prepare_umfpack", False)
if prepare_umfpack:
A.indices = A.indices.astype(np.int64)
A.indptr = A.indptr.astype(np.int64)
logger.debug("Max element in A {0:.2e}".format(np.max(np.abs(A))))
logger.info(
"Max {0:.2e} and min {1:.2e} A sum.".format(
np.max(np.sum(np.abs(A), axis=1)), np.min(np.sum(np.abs(A), axis=1))
)
)
t_0 = time.time()
x = sps.linalg.spsolve(A, b)
logger.info("Solved in {} s.".format(time.time() - t_0))
return x
def check_convergence(self, solution, prev_solution, init_solution, nl_params=None):
g_max = self._nd_grid()
uh_dof = self.assembler.dof_ind(g_max, self.displacement_variable)
p_dof = np.array([], dtype=np.int)
T_dof = np.array([], dtype=np.int)
contact_dof = np.array([], dtype=np.int)
for g, _ in self.gb:
p_dof = np.hstack((p_dof, self.assembler.dof_ind(g, self.scalar_variable)))
T_dof = np.hstack(
(T_dof, self.assembler.dof_ind(g, self.temperature_variable))
)
if g.dim == self.Nd - 1:
contact_dof = np.hstack(
(
contact_dof,
self.assembler.dof_ind(g, self.contact_traction_variable),
)
)
# Also find indices for the contact variables
uj_dof = np.array([], dtype=np.int)
for e, _ in self.gb.edges():
if e[0].dim == self.Nd:
uj_dof = np.hstack(
(
uj_dof,
self.assembler.dof_ind(e, self.mortar_displacement_variable),
)
)
# Pick out the solution from current, previous iterates, as well as the
# initial guess.
def differences(dofs):
sol_now = solution[dofs]
sol_prev = prev_solution[dofs]
sol_init = init_solution[dofs]
diff_iterates = np.sqrt(np.sum((sol_now - sol_prev) ** 2)) / sol_now.size
diff_init = np.sqrt(np.sum((sol_now - sol_init) ** 2)) / sol_now.size
norm = np.sqrt(np.sum(sol_now ** 2)) / sol_now.size
return diff_iterates, diff_init, norm
iterate_diff_T, init_diff_T, norm_T = differences(T_dof)
iterate_diff_p, init_diff_p, norm_p = differences(p_dof)
iterate_diff_uh, init_diff_uh, norm_uh = differences(uh_dof)
iterate_diff_uj, init_diff_uj, norm_uj = differences(uj_dof)
tol_convergence = nl_params["nl_convergence_tol"]
# Not sure how to use the divergence criterion
# tol_divergence = nl_params["nl_divergence_tol"]
diverged = False
# Check absolute convergence criterion
def convergence(val, ref, atol, rtol=None):
if rtol is None:
rtol = atol
if val < atol:
return True, val
error = val / ref
return error < rtol, error
scaled_convergence = 100 * tol_convergence
converged_uh, error_uh = convergence(iterate_diff_uh, norm_uh, tol_convergence)
converged_T, error_T = convergence(iterate_diff_T, norm_T, scaled_convergence)
converged_p, error_p = convergence(iterate_diff_p, norm_p, tol_convergence)
converged_uj, error_uj = convergence(iterate_diff_uj, norm_uj, tol_convergence)
converged = (
converged_uj
# and converged_contact
and converged_uh
and converged_T
and converged_p
)
logger.info(
"Errors: displacement jump {:.2e}, matrix displacement {:.2e}, temperature {:.2e} and pressure {:.2e}".format(
error_uj, error_uh, error_T, error_p
)
)
logger.info(
"Difference: displacement jump {:.2e}, matrix displacement {:.2e}, temperature {:.2e} and pressure {:.2e}".format(
iterate_diff_uj, iterate_diff_uh, iterate_diff_T, iterate_diff_p
)
)
return error_uh, converged, diverged
def adjust_time_step(self):
"""
Adjust the time step so that smaller time steps are used when the driving forces
are changed. Also make sure to exactly reach the start and end time for
each phase.
"""
# Default is to just increase the time step somewhat
self.time_step = getattr(self, "time_step_factor", 1.0) * self.time_step
# We also want to make sure that we reach the end of each simulation phase
for dt, lim in zip(self.phase_time_steps, self.phase_limits):
diff = self.time - lim
if diff < 0 and -diff <= self.time_step:
self.time_step = -diff
if np.isclose(self.time, lim):
self.time_step = dt
# And that the time step doesn't grow too large after the equilibration phase
if self.time > 0:
self.time_step = min(self.time_step, self.max_time_step)
def compute_fluxes(self):
"""
Compute fluxes.
For 3d, the fluxes are damped after the fourth iteration.
"""
use_smoothing = self.Nd == 3
gb = self.gb
for g, d in gb:
pa = d[pp.PARAMETERS][self.temperature_parameter_key]
if self._iteration > 1:
pa["darcy_flux_1"] = pa["darcy_flux"].copy()
for e, d in gb.edges():
pa = d[pp.PARAMETERS][self.temperature_parameter_key]
if self._iteration > 1:
pa["darcy_flux_1"] = pa["darcy_flux"].copy()
super().compute_fluxes()
if not use_smoothing or self._iteration < 5:
return
a, b = 1, 1
node_update, edge_update = 0, 0
for g, d in gb:
pa = d[pp.PARAMETERS][self.temperature_parameter_key]
v1 = pa["darcy_flux_1"]
v2 = pa["darcy_flux"]
v_new = (a * v2 + b * v1) / (a + b)
pa["darcy_flux"] = v_new
node_update += np.sqrt(
np.sum(np.power(v2 - v_new, 2)) / np.sum(np.power(v2, 2))
)
for e, d in gb.edges():
pa = d[pp.PARAMETERS][self.temperature_parameter_key]
v1 = pa["darcy_flux_1"]
v2 = pa["darcy_flux"]
v_new = (a * v2 + b * v1) / (a + b)
pa["darcy_flux"] = v_new
edge_update += np.sqrt(
np.sum(np.power(v2 - v_new, 2)) / np.sum(np.power(v2, 2))
)
logger.info(
"Smoothed fluxes by {:.2e} and edge {:.2e} at time {:.2e}".format(
node_update, edge_update, self.time
)
)
# Initialization etc.
def initial_condition(self) -> None:
"""Initial values for the Darcy fluxes, p, T and u."""
for g, d in self.gb:
d[pp.PARAMETERS] = pp.Parameters()
d[pp.PARAMETERS].update_dictionaries(
[
self.mechanics_parameter_key,
self.mechanics_temperature_parameter_key,
self.scalar_parameter_key,
self.temperature_parameter_key,
]
)
self.update_all_apertures(to_iterate=False)
self.update_all_apertures()
super().initial_condition()
for g, d in self.gb:
u0 = self.initial_displacement(g)
d[pp.PARAMETERS][self.temperature_parameter_key].update(
{"darcy_flux": np.zeros(g.num_faces)}
)
p0 = self.initial_scalar(g)
T0 = self.initial_temperature(g)
state = {
self.scalar_variable: p0,
self.temperature_variable: T0,
}
iterate = {
self.scalar_variable: p0,
self.temperature_variable: T0,
self.displacement_variable: u0,
}
pp.set_state(d, state)
pp.set_iterate(d, iterate)
for e, d in self.gb.edges():
update = {self.mortar_displacement_variable: self.initial_displacement(e)}
pp.set_state(d, update)
pp.set_iterate(d, update)
def initial_scalar(self, g) -> np.ndarray:
"""Hydrostatic pressure depending on _depth, which is set to 0 in exII."""
depth = self._depth(g.cell_centers)
return self.hydrostatic_pressure(g, depth) / self.scalar_scale
def initial_temperature(self, g) -> np.ndarray:
"""Initial temperature is 0, but set to f(z) in exIV."""
return
|
np.zeros(g.num_cells)
|
numpy.zeros
|
from abc import ABC
import numpy as np
import pytest
from numpy.testing import assert_, assert_allclose
from cobyqa import minimize
class TestBase(ABC):
@staticmethod
def arwhead(x):
x = np.asarray(x)
fvx = (x[:-1] ** 2.0 + x[-1] ** 2.0) ** 2.0 - 4.0 * x[:-1] + 3.0
return np.sum(fvx)
@staticmethod
def perm0d(x):
x = np.asarray(x)
n = x.size
nrg = np.arange(1, n + 1)
fx = 0.0
for i in range(n):
fx += np.sum((nrg + 10.0) * (x ** i - 1.0 / nrg ** i)) ** 2.0
return fx
@staticmethod
def permd(x):
x = np.asarray(x)
n = x.size
nrg =
|
np.arange(1, n + 1)
|
numpy.arange
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 10:53:54 2018
@author: lhe39759
"""
import sys
sys.path.append(r'C:\Users\lhe39759\Documents\GitHub')
from SliceOPy import NetSlice, DataSlice
import keras
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage.filters as fi
from mpl_toolkits.mplot3d import Axes3D # This import has side effects required for the kwarg projection='3d' in the call to fig.add_subplot
def gkern2(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
# create nxn zeros
inp = np.zeros((kernlen, kernlen))
# set element at the middle to one, a dirac delta
inp[kernlen//2, kernlen//2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter mask
guass = fi.gaussian_filter(inp, nsig)/kernlen
max1 = np.amax(guass)
gauss = guass/max1
return gauss
def generateGaussianHill(xmin,xmax,ymin,ymax,spacer,sig):
gauss = gkern2(spacer,sig)
gauss = gauss + (np.random.random_sample(gauss.shape)*0.05)
x =np.arange(xmin,xmax, (np.abs(xmin)+np.abs(xmax))/spacer)
y = np.arange(ymin, ymax, (np.abs(ymin)+np.abs(ymax))/spacer)
X, Y = np.meshgrid(x, y)
features = []
for x1 in x:
for y1 in y:
item = []
item.append(x1)
item.append(y1)
features.append(np.array(item))
features = np.array(features)
labels = gauss.flatten()
return features, labels
def generateGaussianHillValley(xmin,xmax,ymin,ymax,spacer,sig):
gauss = np.append(gkern2(spacer,9),-1*gkern2(spacer,9),axis=0)
x =np.arange(xmin,xmax, (np.abs(xmin)+np.abs(xmax))/spacer)
y = np.arange(ymin, ymax, (np.abs(ymin)+np.abs(ymax))/(2*spacer))
X, Y = np.meshgrid(x, y)
features = []
for x1 in x:
for y1 in y:
item = []
item.append(x1)
item.append(y1)
features.append(np.array(item))
features = np.array(features)
labels = gauss.flatten()
return features, labels
def plotGaussian(labels,xmin,xmax,ymin,ymax,spacerx,spacery,label):
x =np.arange(xmin,xmax, (np.abs(xmin)+
|
np.abs(xmax)
|
numpy.abs
|
from ..grt import misc
from scipy.interpolate import griddata
from matplotlib.pyplot import figure
from pyhdf.SD import SD, SDC
from pyhdf.HDF import *
from pyhdf.VS import *
from scipy.interpolate.interpnd import _ndim_coords_from_arrays
from scipy.spatial import cKDTree
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
import numpy as np
import seaborn as sns; sns.set()
from cartopy import config
import cartopy.crs as ccrs
##########################################################################################
def plot_modis_rgb_image(file):
fig_style_dict = {}
fig_style_dict['fig_type'] = 'analysis' #'analysis'
fig_style_dict['facecolor'] = '#E8E8E8'
fig_style_dict['label_font_size'] = 18
fig_style_dict['title_font_size'] = 22
fig_style_dict['heatmap_annot_font_size'] = 22
fig_style_dict['facecolor'] = 'white'
fig_style_dict['label_font_size'] = 12
fig_style_dict['title_font_size'] = 14
fig_style_dict['heatmap_annot_font_size'] = 14
#file = SD(myd021km_granule, SDC.READ)
selected_sds = file.select('EV_250_Aggr1km_RefSB')
selected_sds_attributes = selected_sds.attributes()
for key, value in selected_sds_attributes.items():
if key == 'reflectance_scales':
reflectance_scales_250_Aggr1km_RefSB = np.asarray(value)
if key == 'reflectance_offsets':
reflectance_offsets_250_Aggr1km_RefSB = np.asarray(value)
sds_data_250_Aggr1km_RefSB = selected_sds.get()
selected_sds = file.select('EV_500_Aggr1km_RefSB')
selected_sds_attributes = selected_sds.attributes()
for key, value in selected_sds_attributes.items():
if key == 'reflectance_scales':
reflectance_scales_500_Aggr1km_RefSB = np.asarray(value)
if key == 'reflectance_offsets':
reflectance_offsets_500_Aggr1km_RefSB = np.asarray(value)
sds_data_500_Aggr1km_RefSB = selected_sds.get()
print( reflectance_scales_500_Aggr1km_RefSB.shape)
data_shape = sds_data_250_Aggr1km_RefSB.shape
along_track = data_shape[1]
cross_trak = data_shape[2]
z = np.zeros((along_track, cross_trak,3))
z[:,:,0] = ( sds_data_250_Aggr1km_RefSB[0,:,:] - reflectance_offsets_250_Aggr1km_RefSB[0] ) * reflectance_scales_250_Aggr1km_RefSB[0]
z[:,:,1] = ( sds_data_500_Aggr1km_RefSB[1,:,:] - reflectance_offsets_500_Aggr1km_RefSB[1] ) * reflectance_scales_500_Aggr1km_RefSB[1]
z[:,:,2] = ( sds_data_500_Aggr1km_RefSB[0,:,:] - reflectance_offsets_500_Aggr1km_RefSB[0] ) * reflectance_scales_500_Aggr1km_RefSB[0]
x = np.array([0, 30, 60, 120, 190, 255], dtype=np.uint8)
y = np.array([0, 110, 160, 210, 240, 255], dtype=np.uint8)
def scale_image(image, x, y):
scaled = np.zeros((along_track, cross_trak), dtype=np.uint8)
for i in range(len(x)-1):
x1 = x[i]
x2 = x[i+1]
y1 = y[i]
y2 = y[i+1]
m = (y2 - y1) / float(x2 - x1)
b = y2 - (m *x2)
mask = ((image >= x1) & (image < x2))
scaled = scaled + mask * np.asarray(m * image + b, dtype=np.uint8)
mask = image >= x2
scaled = scaled + (mask * 255)
return scaled
z_color_enh = np.zeros((along_track, cross_trak,3), dtype=np.uint8)
z_color_enh[:,:,0] = scale_image(misc.bytescale(z[:,:,0]), x, y)
z_color_enh[:,:,1] = scale_image(misc.bytescale(z[:,:,1]), x, y)
z_color_enh[:,:,2] = scale_image(misc.bytescale(z[:,:,2]), x, y)
fig = figure(num=None, figsize=(12, 10), dpi=80, facecolor=fig_style_dict['facecolor'], edgecolor='k')
ax = fig.add_subplot(111)
img = plt.imshow(np.fliplr(z_color_enh), interpolation='nearest', origin='lower')
l = [int(i) for i in np.linspace(0,cross_trak,6)]
plt.xticks(l, [i for i in reversed(l)], rotation=0, fontsize=11 )
l = [int(i) for i in np.linspace(0,along_track,9)]
plt.yticks(l, l, rotation=0, fontsize=11 )
plt.xticks(fontsize=11)
plt.yticks(fontsize=11)
plt.title('MODIS RGB Image', fontsize=14)
plt.grid(None)
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.savefig("rgb.png", bbox_inches='tight', facecolor=fig.get_facecolor())
plt.show()
plt.close()
return z_color_enh
##########################################################################################
def plot_modis_rgb_image_with_orthographic_projection(myd021km, myd03_file):
z_color_enh = plot_modis_rgb_image(myd021km)
myd03_Latitude = myd03_file.select('Latitude')
myd03_Longitude = myd03_file.select('Longitude')
myd03_Latitude_data = myd03_Latitude.get()
myd03_Longitude_data = myd03_Longitude.get()
myd03_Latitude_data = np.fliplr(myd03_Latitude_data)
myd03_Longitude_data = np.fliplr(myd03_Longitude_data)
myd03_Latitude_shape = myd03_Latitude_data.shape
z = z_color_enh / 256.0
z = np.fliplr(z)
along_track = myd03_Latitude_shape[0]
cross_trak = myd03_Latitude_shape[1]
proj = ccrs.PlateCarree()
lat_long_grid = proj.transform_points(
x = myd03_Longitude_data,
y = myd03_Latitude_data,
src_crs = proj)
x_igrid = lat_long_grid[:,:,0] ## long
y_igrid = lat_long_grid[:,:,1] ## lat
xul =
|
np.min(myd03_Longitude_data)
|
numpy.min
|
# --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
import os
import sys
from transforms3d.quaternions import *
from transforms3d.euler import *
from transforms3d.axangles import *
import random
from tensorboardX import SummaryWriter
import scipy.io as sio
import IPython
import time
from torch import nn
from collections import deque
import tabulate
import torch.nn.functional as F
import cv2
import yaml
import torch
import ray
import core
import copy
import math
from easydict import EasyDict as edict
from pointnet2_ops.pointnet2_utils import furthest_point_sample, gather_operation
import psutil
import GPUtil
import itertools
import glob
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib import cm
import matplotlib
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0.
self.avg = 0.
self.sum = 0.
self.count = 0.
self.sum_2 = 0.
self.count_2 = 0.
self.means = []
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.sum_2 += val * n
self.count_2 += n
def set_mean(self):
self.means.append(self.sum_2 / self.count_2)
self.sum_2 = 0.
self.count_2 = 0.
def std(self):
return np.std(np.array(self.means) + 1e-4)
def __repr__(self):
return "{:.3f} ({:.3f})".format(self.val, self.avg)
def module_max_param(module):
def maybe_max(x):
return float(torch.abs(x).max()) if x is not None else 0
max_data = np.amax([(maybe_max(param.data))
for name, param in module.named_parameters()])
return max_data
def module_max_gradient(module):
def maybe_max(x):
return float(torch.abs(x).max()) if x is not None else 0
max_grad = np.amax(
[(maybe_max(param.grad)) for name, param in module.named_parameters()]
)
return max_grad
def normalize(v, axis=None, eps=1e-10):
"""L2 Normalize along specified axes."""
return v / max(np.linalg.norm(v, axis=axis, keepdims=True), eps)
def inv_lookat(eye, target=[0, 0, 0], up=[0, 1, 0]):
"""Generate LookAt matrix."""
eye = np.float32(eye)
forward = normalize(target - eye)
side = normalize(np.cross(forward, up))
up = np.cross(side, forward)
R = np.stack([side, up, -forward], axis=-1)
return R
def process_image_input(state):
state[:, :3] *= 255
if state.shape[1] >= 4:
state[:, 3] *= 5000
if state.shape[1] == 5:
state[:, -1][state[:, -1] == -1] = 50
return state.astype(np.uint16)
def process_image_output(sample):
sample = sample.astype(np.float32).copy()
n = len(sample)
if len(sample.shape) <= 2:
return sample
sample[:, :3] /= 255.0
if sample.shape[0] >= 4:
sample[:, 3] /= 5000
sample[:, -1] = sample[:, -1] != 0
return sample
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
def get_valid_index(arr, index):
return arr[min(len(arr) - 1, index)]
def fc(batchNorm, in_planes, out_planes):
if batchNorm:
return nn.Sequential(
nn.Linear(in_planes, out_planes),
nn.BatchNorm1d(out_planes),
nn.LeakyReLU(0.1, inplace=True),
)
else:
return nn.Sequential(
nn.Linear(in_planes, out_planes), nn.LeakyReLU(0.1, inplace=True)
)
def deg2rad(deg):
if type(deg) is list:
return [x/180.0*np.pi for x in deg]
return deg/180.0*np.pi
def rad2deg(rad):
if type(rad) is list:
return [x/np.pi*180 for x in rad]
return rad/np.pi*180
def make_video_writer(name, window_width, window_height):
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
return cv2.VideoWriter(name, fourcc, 10.0, (window_width, window_height))
def projection_to_intrinsics(mat, width=224, height=224):
intrinsic_matrix = np.eye(3)
mat = np.array(mat).reshape([4, 4]).T
fv = width / 2 * mat[0, 0]
fu = height / 2 * mat[1, 1]
u0 = width / 2
v0 = height / 2
intrinsic_matrix[0, 0] = fu
intrinsic_matrix[1, 1] = fv
intrinsic_matrix[0, 2] = u0
intrinsic_matrix[1, 2] = v0
return intrinsic_matrix
def view_to_extrinsics(mat):
pose = np.linalg.inv(np.array(mat).reshape([4, 4]).T)
return np.linalg.inv(pose.dot(rotX(np.pi)))
def concat_state_action_channelwise(state, action):
"""
concate the action in the channel space
"""
action = action.unsqueeze(2)
state = torch.cat((state, action.expand(-1, -1, state.shape[2])), 1)
return state
def safemat2quat(mat):
quat = np.array([1,0,0,0])
try:
quat = mat2quat(mat)
except:
pass
quat[np.isnan(quat)] = 0
return quat
def migrate_model(in_model, out_model, surfix="latest", in_policy_name="BC", out_policy_name="BC"):
files = [
"actor_PandaYCBEnv_{}".format(surfix),
"state_feat_PandaYCBEnv_{}".format(surfix),
"traj_feat_PandaYCBEnv_{}".format(surfix),
"traj_sampler_PandaYCBEnv_{}".format(surfix),
"critic_PandaYCBEnv_{}".format(surfix),
]
config_file = glob.glob(in_model + '/*.yaml')
config_file = [f for f in config_file if 'bc' in f]
in_policy_name = "BC" if len(config_file) >= 1 else "DQN_HRL"
for file in files:
cmd = "cp {}/{}_{} {}/{}_{}".format(
in_model, in_policy_name, file, out_model, out_policy_name, file)
if os.path.exists('{}/{}_{}'.format(in_model, in_policy_name, file)):
os.system(cmd)
print(cmd)
def get_info(state, opt="img", IMG_SIZE=(112, 112)):
if opt == "img":
return (state[0][1][:3].T * 255).astype(np.uint8)
if opt == "intr":
cam_proj = np.array(state[-2][48:]).reshape([4, 4])
return projection_to_intrinsics(cam_proj, IMG_SIZE[0], IMG_SIZE[1])[:3, :3]
if opt == "point":
return state[0][0]
def make_gripper_pts(points, color=(1, 0, 0)):
line_index = [[0, 1], [1, 2], [1, 3], [3, 5], [2, 4]]
cur_gripper_pts = points.copy()
cur_gripper_pts[1] = (cur_gripper_pts[2] + cur_gripper_pts[3]) / 2.0
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(cur_gripper_pts)
line_set.lines = o3d.utility.Vector2iVector(line_index)
line_set.colors = o3d.utility.Vector3dVector(
[color for i in range(len(line_index))]
)
return line_set
def _cross_matrix(x):
"""
cross product matrix
"""
return np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
def a2e(q):
p =
|
np.array([0, 0, 1])
|
numpy.array
|
"""obstacle_avoid_test controller."""
# You may need to import some classes of the controller module. Ex:
# from controller import Robot, LED, DistanceSensor
from controller import Supervisor
from odometry import Odometry
from data_collector import DataCollector
from predictor import Predictor
import matplotlib.pyplot as plt
import numpy as np
import math
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# hello = tf.constant("hello TensorFlow!")
# sess=tf.Session()
# print(sess.run(hello))
MAX_SPEED = 6
TIME_STEP = 8
WHEEL_RADIUS = 0.05
SAMPLING_PERIOD = 10
MAX_X = 2
MAX_Y = 1.5
ENCODER_UNIT = 159.23
INIT_X = 0.0
INIT_Y = 0.0
INIT_ANGLE = 0
PRED_STEPS = 450
correction_x = 0
correction_y = 0
correction_theta = 0
# create the Robot instance.
robot = Supervisor()
robot_sup = robot.getFromDef("e-puck")
robot_trans = robot_sup.getField("translation")
compass = robot.getCompass("compass")
motorLeft = robot.getMotor("left wheel motor")
motorRight = robot.getMotor("right wheel motor")
positionLeft = robot.getPositionSensor("left wheel sensor")
positionRight = robot.getPositionSensor("right wheel sensor")
predictor = Predictor()
timestep = int(robot.getBasicTimeStep())
x = []
y = []
theta = []
distance_sensors_info = []
x_odometry = []
y_odometry = []
theta_odometry = []
sensorNames = ['ds0', 'ds1', 'ds2', 'ds3', 'ds4', 'ds5', 'ds6', 'ds7']
x_pred = []
y_pred = []
theta_pred = []
data_collector = DataCollector()
def init():
compass.enable(timestep)
# motorLeft.setPosition(0.5/WHEEL_RADIUS)
# motorRight.setPosition(0.5/WHEEL_RADIUS)
motorLeft.setPosition(float('inf'))
motorRight.setPosition(float('inf'))
positionRight.enable(timestep)
positionLeft.enable(timestep)
def robot_to_xy(x, y):
return x+1, y+0.75
def xy_to_robot(x, y):
return x-1, y-0.75
def get_bearing_degrees():
north = compass.getValues()
rad = np.arctan2(north[0], north[2])
bearing = (rad) / np.pi * 180
if bearing < 0.0:
bearing += 360
bearing = 360 - bearing - 90
if bearing < 0.0:
bearing += 360
return bearing
def step():
return (robot.step(timestep) != -1)
def save_supervisor_coordinates():
# true robot position information
trans_info = robot_trans.getSFVec3f()
x_coordinate, y_coordinate = robot_to_xy(trans_info[2], trans_info[0])
x.append(x_coordinate)
y.append(y_coordinate)
theta.append((get_bearing_degrees()))
def save_odometry_coordinates(coordinate):
# convert robot coordinates into global coordinate system
x_odometry.append(1 + 2*INIT_X - coordinate.x + correction_x)
y_odometry.append(0.75 + 2*INIT_Y - coordinate.y + correction_y)
theta_odometry.append(convert_angle_to_xy_coordinates(coordinate.theta) + correction_theta)
def save_sensor_distances(distanceSensors):
distances = []
for distanceSensor in distanceSensors:
distance = distanceSensor.getValue()
#there is no real messure.
if distance == 10:
distance = None
distances.append(distance)
distance_sensors_info.append(distances)
def get_sensor_distance():
# Read the sensors, like:
distanceSensors = []
for sensorName in sensorNames:
sensor = robot.getDistanceSensor(sensorName)
sensor.enable(timestep)
distanceSensors.append(sensor)
return distanceSensors
def calculate_velocity(distanceSensors):
# Process sensor data here
sensorValues = [distanceSensor.getValue() + np.random.normal(0, 0.1) for distanceSensor in distanceSensors]
rightObstacle = sensorValues[0] < 0.15 or sensorValues[1] < 0.15
leftObstacle = sensorValues[6] < 0.15 or sensorValues[7] < 0.15
left_speed = .5 * MAX_SPEED
right_speed = .5 * MAX_SPEED
# avoid collition
if leftObstacle:
left_speed += .7 * MAX_SPEED
right_speed -= .7 * MAX_SPEED
elif rightObstacle:
left_speed -= .7 * MAX_SPEED
right_speed += .7 * MAX_SPEED
return left_speed, right_speed
def convert_angle_to_xy_coordinates(angle):
angle = angle*180/np.pi
angle = angle - 180
if angle < 0.0:
angle += 360
return angle
def plot():
# Enter here exit cleanup code.
plt.ylim([0, 1.5])
plt.xlim([0, 2])
plt.xlabel("x")
plt.ylabel("y")
plt.plot(x, y, label="real")
plt.plot(x_odometry, y_odometry, label="odometry")
plt.plot(x_pred, y_pred, 's', label="correction", marker='o')
plt.title("Robot position estimation")
plt.legend()
plt.savefig("results/position.eps", format='eps')
def correct_state(x, y, theta, sensors_data, delta = 10, omega = 3):
# corresponds to the E set
errors = []
# corresponds to the X set
predictions = []
xrange = [l/100 for l in range(max(0, int(x*100) - delta), min(MAX_X*100, int(x*100) + delta), 1)]
yrange = [l/100 for l in range(max(0, int(y*100) - delta), min(int(MAX_Y*100), int(y*100) + delta), 1)]
thetarange = [l for l in range(max(0, int(theta) - omega), min(360, int(theta) + omega), 1)]
print("XRANGE------------------")
print(x)
print(xrange)
print("YRANGE------------------")
print(y)
print(yrange)
print("THETARANGE------------------")
print("theta: ", theta)
print(thetarange)
for i in xrange:
for j in yrange:
for k in thetarange:
error, bad_data = predictor.predict(i, j, k, sensors_data)
if not bad_data:
predictions.append([i, j, k])
errors.append(math.log(error))
if len(errors) > 0:
ix = errors.index(min(errors))
return predictions[ix]
return -1
if __name__ == '__main__':
init()
step()
odometry = Odometry(ENCODER_UNIT * (positionLeft.getValue()),
ENCODER_UNIT * (positionRight.getValue()), INIT_X, INIT_Y, INIT_ANGLE)
count = 0
while(True):
odometry_info = odometry.track_step(ENCODER_UNIT * (positionLeft.getValue()),
ENCODER_UNIT * (positionRight.getValue()))
if not step():
# print('saving data')
data_collector.collect(x_odometry, y_odometry, theta_odometry, x, y, theta,
|
np.array(distance_sensors_info)
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 11 21:57:41 2020
@author: inderpreet
plot the average deviations from different filterin thresholds
"""
import matplotlib.pyplot as plt
import numpy as np
import stats as S
from ici import iciData
plt.rcParams.update({'font.size': 26})
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
from typhon.retrieval.qrnn import set_backend, QRNN
set_backend("pytorch")
import stats
#%%
def read_qrnn(file, inChannels, target):
data = iciData(test_file,
inChannels, target,
batch_size = batchSize)
# read QRNN
# file = 'qrnn_ici_%s_%s_%s_single.nc'%(depth, width, target)
# print (file)
qrnn = QRNN.load(file)
y_pre, y_prior, y0, y, y_pos_mean = S.predict(data, qrnn, add_noise = True)
return y_pre, y_prior, y0, y, y_pos_mean
#%% input parameters
depth = 4
width = 128
quantiles = np.array([0.002, 0.03, 0.16, 0.5, 0.84, 0.97, 0.998])
batchSize = 128
targets = ['I1V', 'I2V', 'I3V']
test_file = "TB_ICI_test.nc"
iq = np.argwhere(quantiles == 0.5)[0,0]
filters =
|
np.arange(5, 0, -0.5)
|
numpy.arange
|
# -*- coding: utf-8 -*-
"""
Title : Normalization Test Script
Script used for testing if normalization was done correctly
Created on Mon Aug 5 11:47:42 2019
@author: Tim
"""
#Sepecify these few things and the whole thing will test
number_random = 100
all_data_path =\
r"E:\PhD project\ozone\08212019_All_Data_Norm\All_Data.csv"
train_valid_path =\
r"E:\PhD project\ozone\08212019_All_Data_Norm\All_Data_norm.csv"
import numpy as np
import pandas as pd
from statistics import mode
#function that deletes unnecessary stuff
def clean_dataframe_list(df,delete_list):
header = list(df)
for i in range(0,len(delete_list)):
index = header.index(delete_list[i])
del df[header[index]]
del header[index]
return df
#load data in
df_data_all = pd.read_csv(all_data_path)
df_valid_train = pd.read_csv(train_valid_path)
header_all = list(df_data_all)
#delete the list of variables we dont care about
delete_list = ['date','Sensor']
df_valid_train = clean_dataframe_list(df_valid_train,delete_list)
header_vt = list(df_valid_train)
#get different random locations
shapes = df_valid_train.shape
#sets up storage
random_indexes =\
np.random.randint(0,shapes[0],number_random).reshape(-1,1)
fail_headers = []
pass_fail = np.zeros(number_random,dtype = bool)
value_save = np.zeros(number_random)
for i in range(0,number_random,1):
#ranodmely checks certain rows and finds a match
temp_train = df_valid_train.loc[random_indexes[i]]
header_temp = list(temp_train)
temp_store = []
for j in range(0,len(header_temp),1):
value_temp = np.array(temp_train[header_temp[j]])
array_check_temp =\
np.array(df_data_all[header_temp[j]])
error =
|
np.abs(value_temp-array_check_temp)
|
numpy.abs
|
import numpy as np
print(np.sum([1, 2, 3, 4, 5]))
print()
idades = np.random.randint(24, 50, size=30)
print(idades)
print()
# Media: soma todos e divide pela qtd de elementos
print(int(idades.mean()))
# Mediana: ordena a lista e pega o valor do meio.
# Se a lista conter qtd impar de elementos, pega os dois do meio, soma e divide por 2
print(np.median(idades))
print()
# Operações vetorizadas
idades1 =
|
np.random.randint(24, 50, size=31)
|
numpy.random.randint
|
# mgauss.py
"""
Multiple Gauss-function fits.
The multiple-Gauss function used in trace.py is specialized for IFU use, so here is a
general-purpose fitter that also takes care of fitting thick things with multiple
components.
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
from scipy.signal import find_peaks, peak_widths, peak_prominences
from astropy.modeling.models import Gaussian1D, Const1D, Polynomial1D
from astropy.modeling.fitting import LevMarLSQFitter
from pyFU.utils import smooth, multiple_gauss_function
class MultipleGaussians (object) :
def __init__ (self) :
self._model = None
self._ipeaks = None
self._wpeaks = None
self._apeaks = None
self._fit = None
def find_peaks (self, x, y, max_peaks=5) :
"""
Find main peaks in y. The default max_peaks=5 is simply so that pyFU's standard ampl_format
can list 16 coefficients (3*5+1) as a single hex number.
"""
ipeaks = list(np.array(find_peaks(y)[0],dtype=int))
wpeaks = list(np.array(peak_widths (y, ipeaks, rel_height=np.exp(-0.5))[0]/2,dtype=int)) # SIGMAS
apeaks = list(peak_prominences (y, ipeaks)[0])
"""
print (ipeaks)
print (wpeaks)
print ('#, ipeak, xpeak, xwid-, xwid+')
for i in range(len(ipeaks)) :
print (i,ipeaks[i],x[ipeaks[i]],x[int(ipeaks[i]-wpeaks[i])],x[int(ipeaks[i]+wpeaks[i])])
print (apeaks)
"""
# Separate really wide peak into multiple peaks
imin = np.argmin(wpeaks)
imax =
|
np.argmax(wpeaks)
|
numpy.argmax
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: <NAME>
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import sys
import h5py
import numpy as np
import dask.array as da
import shutil
sys.path.append("../../pyUSID/")
from pyUSID.io import hdf_utils, write_utils, USIDataset
from tests.io import data_utils
if sys.version_info.major == 3:
unicode = str
class TestModel(unittest.TestCase):
def setUp(self):
data_utils.make_beps_file()
data_utils.make_sparse_sampling_file()
data_utils.make_incomplete_measurement_file()
data_utils.make_relaxation_file()
def tearDown(self):
for file_path in [data_utils.std_beps_path,
data_utils.sparse_sampling_path,
data_utils.incomplete_measurement_path,
data_utils.relaxation_path]:
data_utils.delete_existing_file(file_path)
class TestGetDimensionality(TestModel):
def test_legal_no_sort(self):
self.__helper_no_sort(hdf_dsets=True)
self.__helper_no_sort(hdf_dsets=False)
def __helper_no_sort(self, hdf_dsets=True):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_dsets = [h5_f['/Raw_Measurement/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/Position_Indices']]
expected_shapes = [[7, 2],
[7],
[5, 3]]
for h5_dset, exp_shape in zip(h5_dsets, expected_shapes):
if not hdf_dsets:
h5_dset = h5_dset[()]
self.assertTrue(np.all(exp_shape == hdf_utils.get_dimensionality(h5_dset)))
def test_legal_w_sort(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_dsets = [h5_f['/Raw_Measurement/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/Position_Indices']]
expected_shapes = [[2, 7],
[7],
[3, 5]]
sort_orders = [[1, 0],
[0],
[1, 0]]
for h5_dset, s_oder, exp_shape in zip(h5_dsets, sort_orders, expected_shapes):
self.assertTrue(np.all(exp_shape == hdf_utils.get_dimensionality(h5_dset, index_sort=s_oder)))
def test_not_hdf_dset(self):
for obj in [15, 'srds']:
with self.assertRaises(TypeError):
_ = hdf_utils.get_dimensionality(obj)
def test_invalid_sort(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_dset = h5_f['/Raw_Measurement/Spectroscopic_Indices']
with self.assertRaises(ValueError):
_ = hdf_utils.get_dimensionality(h5_dset, index_sort=[3, 4])
_ = hdf_utils.get_dimensionality(h5_dset, index_sort=['a', np.arange(5)])
class TestGetSortOrder(TestModel):
def test_invalid_types(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
for obj in ['fdfdfd', h5_f]:
with self.assertRaises(TypeError):
_ = hdf_utils.get_sort_order(obj)
def test_simple(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_dsets = [h5_f['/Raw_Measurement/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/Position_Indices']]
expected_order = [[0, 1], [0], [0, 1]]
for h5_dset, exp_order in zip(h5_dsets, expected_order):
self.assertTrue(np.all(exp_order == hdf_utils.get_sort_order(h5_dset)))
def test_reversed(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_dsets = [np.flipud(h5_f['/Raw_Measurement/Spectroscopic_Indices']),
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
np.fliplr(h5_f['/Raw_Measurement/Position_Indices'])]
expected_order = [[1, 0], [0], [1, 0]]
for h5_dset, exp_order in zip(h5_dsets, expected_order):
self.assertTrue(np.all(exp_order == hdf_utils.get_sort_order(h5_dset)))
class TestGetUnitValues(TestModel):
def test_source_spec_all(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']
expected = {}
for dim_name in ['Bias', 'Cycle']:
expected[dim_name] = h5_f['/Raw_Measurement/' + dim_name][()]
ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals)
self.assertEqual(len(expected), len(ret_val))
for key, exp in expected.items():
self.assertTrue(np.allclose(exp, ret_val[key]))
def test_source_spec_all_explicit(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']
expected = {}
for dim_name in ['Bias', 'Cycle']:
expected[dim_name] = h5_f['/Raw_Measurement/' + dim_name][()]
ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Cycle', 'Bias'])
self.assertEqual(len(expected), len(ret_val))
for key, exp in expected.items():
self.assertTrue(np.allclose(exp, ret_val[key]))
def test_illegal_key(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']
with self.assertRaises(KeyError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Cycle', 'Does not exist'])
def test_illegal_dset(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_vals = h5_f['/Raw_Measurement/Ancillary']
with self.assertRaises(ValueError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Cycle', 'Bias'])
def test_source_spec_single(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']
expected = {'Bias': h5_f['/Raw_Measurement/Bias'][()]}
ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names='Bias')
self.assertEqual(len(expected), len(ret_val))
for key, exp in expected.items():
self.assertTrue(np.allclose(exp, ret_val[key]))
def test_source_pos_all(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Position_Indices']
h5_vals = h5_f['/Raw_Measurement/Position_Values']
expected = {}
for dim_name in ['X', 'Y']:
expected[dim_name] = h5_f['/Raw_Measurement/' + dim_name][()]
ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals)
self.assertEqual(len(expected), len(ret_val))
for key, exp in expected.items():
self.assertTrue(np.allclose(exp, ret_val[key]))
def test_source_pos_single(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Position_Indices']
h5_vals = h5_f['/Raw_Measurement/Position_Values']
expected = {'Y': h5_f['/Raw_Measurement/Y'][()]}
ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names='Y')
self.assertEqual(len(expected), len(ret_val))
for key, exp in expected.items():
self.assertTrue(np.allclose(exp, ret_val[key]))
def test_all_dim_names_not_provided(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Position_Indices'][()]
h5_vals = h5_f['/Raw_Measurement/Position_Values'][()]
with self.assertRaises(TypeError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Y'])
def test_dependent_dim(self):
with h5py.File(data_utils.relaxation_path, mode='r') as h5_f:
h5_inds = h5_f['/Measurement_000/Channel_000/Spectroscopic_Indices']
h5_vals = h5_f['/Measurement_000/Channel_000/Spectroscopic_Values']
spec_dim_names = hdf_utils.get_attr(h5_inds, 'labels')
ret_dict = hdf_utils.get_unit_values(h5_inds, h5_vals)
for dim_ind, dim_name in enumerate(spec_dim_names):
exp_val = hdf_utils.get_attr(h5_inds, 'unit_vals_dim_' + str(dim_ind))
act_val = ret_dict[dim_name]
self.assertTrue(np.allclose(exp_val, act_val))
def test_sparse_samp_no_attr(self):
# What should the user expect this function to do? throw an error.
# Without the attribute, this function will have no idea that it is looking at a sparse sampling case
# it will return the first and second columns of vals blindly
with h5py.File(data_utils.sparse_sampling_path, mode='r') as h5_f:
h5_inds = h5_f['/Measurement_000/Channel_000/Position_Indices']
h5_vals = h5_f['/Measurement_000/Channel_000/Position_Values']
dim_names = hdf_utils.get_attr(h5_inds, 'labels')
ret_dict = hdf_utils.get_unit_values(h5_inds, h5_vals)
for dim_ind, dim_name in enumerate(dim_names):
exp_val = h5_vals[:, dim_ind]
act_val = ret_dict[dim_name]
self.assertTrue(np.allclose(exp_val, act_val))
def test_sparse_samp_w_attr(self):
# What should the user expect this function to do? throw an error.
with h5py.File(data_utils.sparse_sampling_path, mode='r') as h5_f:
h5_inds = h5_f['/Measurement_000/Channel_001/Position_Indices']
h5_vals = h5_f['/Measurement_000/Channel_001/Position_Values']
with self.assertRaises(ValueError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Y'])
def test_incomp_dim_no_attr(self):
# What should the user expect this function to do? throw an error.
# Given that the unit values for each tile are different, it should throw a ValueError for X.
# Even though we know Y is incomplete, it won't know since it wasn't looking at X.
# However, now this function will automatically find unit values for ALL dimensions just to catch such scenarios
with h5py.File(data_utils.incomplete_measurement_path, mode='r') as h5_f:
h5_inds = h5_f['/Measurement_000/Channel_000/Position_Indices']
h5_vals = h5_f['/Measurement_000/Channel_000/Position_Values']
with self.assertRaises(ValueError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals)
with self.assertRaises(ValueError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['X'])
with self.assertRaises(ValueError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Y'])
class TestReshapeToNDims(TestModel):
def test_h5_already_sorted(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
nd_slow_to_fast = h5_f['/Raw_Measurement/n_dim_form'][()]
h5_main = h5_f['/Raw_Measurement/source_main']
# Data is always slowest to fastest
# Anc dims arranged from fastest to slowest
# Expecting data dims to be arranged according to anc dims order
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main, get_labels=True, sort_dims=False,
lazy=False, verbose=True)
self.assertTrue(np.all([x == y for x, y in zip(labels, ['X', 'Y', 'Bias', 'Cycle'])]))
self.assertTrue(success)
nd_fast_to_slow = nd_slow_to_fast.transpose(1, 0, 3, 2)
self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))
# Anc dims arranged from fastest to slowest
# Expecting data dims to be arranged according to slow to fast
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main, get_labels=True, sort_dims=True,
lazy=False, verbose=True)
self.assertTrue(success)
self.assertTrue(np.all([x == y for x, y in zip(labels, ['Y', 'X', 'Cycle', 'Bias'])]))
self.assertTrue(np.allclose(nd_slow_to_fast, n_dim))
def test_h5_manually_provided_anc_dsets_h5(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
nd_slow_to_fast = h5_f['/Raw_Measurement/n_dim_form'][()]
nd_fast_to_slow = nd_slow_to_fast.transpose(1, 0, 3, 2)
exp_labs = ['X', 'Y', 'Bias', 'Cycle']
h5_main = h5_f['/Raw_Measurement/source_main']
h5_pos_inds = h5_f['/Raw_Measurement/Position_Indices']
h5_spec_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
# BOTH POS AND SPEC
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main,
h5_pos=h5_pos_inds,
h5_spec=h5_spec_inds,
get_labels=True,
sort_dims=False,
lazy=False, verbose=True)
self.assertTrue(np.all([x == y for x, y in zip(labels, exp_labs)]))
self.assertTrue(success)
self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))
# ONLY POS:
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main,
h5_pos=h5_pos_inds,
h5_spec=None,
get_labels=True,
sort_dims=False,
lazy=False,
verbose=True)
self.assertTrue(np.all([x == y for x, y in zip(labels, exp_labs)]))
self.assertTrue(success)
self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))
# ONLY SPEC
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main,
h5_pos=None,
h5_spec=h5_spec_inds,
get_labels=True,
sort_dims=False,
lazy=False,
verbose=True)
self.assertTrue(np.all([x == y for x, y in zip(labels, exp_labs)]))
self.assertTrue(success)
self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))
def test_h5_not_main_dset(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/Ancillary']
h5_pos = h5_f['/Raw_Measurement/Position_Indices']
h5_spec = h5_f['/Raw_Measurement/Spectroscopic_Indices']
# Not main
with self.assertRaises(ValueError):
_ = hdf_utils.reshape_to_n_dims(h5_main)
# Not main and not helping that we are supplign incompatible ancillary datasets
with self.assertRaises(ValueError):
_ = hdf_utils.reshape_to_n_dims(h5_main, h5_pos=h5_pos, h5_spec=h5_spec)
# main but we are supplign incompatible ancillary datasets
h5_main = h5_f['/Raw_Measurement/source_main-Fitter_000/results_main']
with self.assertRaises(ValueError):
_ = hdf_utils.reshape_to_n_dims(h5_main, h5_pos=h5_pos, h5_spec=h5_spec)
def build_main_anc_4d(self):
num_rows = 3
num_cols = 5
num_cycles = 2
num_cycle_pts = 7
# arrange as fast, slow
pos_inds = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
# arrange as fast, slow
spec_inds = np.vstack((np.tile(np.arange(num_cycle_pts), num_cycles),
np.repeat(np.arange(num_cycles), num_cycle_pts)))
# Data is arranged from slowest to fastest
main_nd = np.zeros(shape=(num_rows, num_cols, num_cycles,
num_cycle_pts), dtype=np.uint8)
for row_ind in range(num_rows):
for col_ind in range(num_cols):
for cycle_ind in range(num_cycles):
# for bias_ind in range(num_cycle_pts):
val = 1E+3*row_ind + 1E+2*col_ind + 1E+1*cycle_ind + np.arange(num_cycle_pts)
main_nd[row_ind, col_ind, cycle_ind] = val
return main_nd, pos_inds, spec_inds
def base_comparison_4d(self, flip_pos_inds, flip_spec_inds, lazy_in=False,
lazy_out=False, verbose=False):
# Generated Data dims from slowest to fastest
exp_nd_s2f, pos_inds, spec_inds = self.build_main_anc_4d()
# nd (Y, X, Cycle, Bias)
main_2d = exp_nd_s2f.reshape(np.prod(exp_nd_s2f.shape[:2]),
np.prod(exp_nd_s2f.shape[2:]))
# Dimension names arranged from slowest to fastest
labs_s2f = ['Position Dimension 1', 'Position Dimension 0',
'Spectral Dimension 1', 'Spectral Dimension 0']
# Generated ancillary dimensions are arranged from fastest to slowest
# Unless any flipping is requested, as-is order should be fast to slow
as_is_nd_order = [1, 0, 3, 2]
# Unless any flipping is requested, s2f order is already in place
s2f_lab_order = [0, 1, 2, 3]
if flip_pos_inds:
# arranged as slow to fast
pos_inds = np.fliplr(pos_inds)
as_is_nd_order = as_is_nd_order[:2][::-1] + as_is_nd_order[2:]
s2f_lab_order = [1, 0] + s2f_lab_order[2:]
if flip_spec_inds:
# arranged as slow to fast
as_is_nd_order = as_is_nd_order[:2] + as_is_nd_order[2:][::-1]
s2f_lab_order = s2f_lab_order[:2] + [3, 2]
spec_inds = np.flipud(spec_inds)
if lazy_in:
main_2d = da.from_array(main_2d, chunks=main_2d.shape)
pos_inds = da.from_array(pos_inds, chunks=pos_inds.shape)
spec_inds = da.from_array(spec_inds, chunks=spec_inds.shape)
n_dim, suc, labs = hdf_utils.reshape_to_n_dims(main_2d,
h5_pos=pos_inds,
h5_spec=spec_inds, sort_dims=True,
get_labels=True,
lazy=lazy_out,
verbose=verbose)
if lazy_out:
self.assertIsInstance(n_dim, da.core.Array)
self.assertTrue(np.allclose(exp_nd_s2f, n_dim))
self.assertTrue(suc)
# labels were auto-generated and these will be flipped blindly
exp_labs = np.array(labs_s2f)[s2f_lab_order]
self.assertTrue(np.all([x == y for x, y in zip(labs, exp_labs)]))
if verbose:
print('~~~~~~~~~~~~~~~~~~~~~~ UNSORTED ~~~~~~~~~~~~~~~~~~~~~~~~~')
n_dim, suc, labs = hdf_utils.reshape_to_n_dims(main_2d,
h5_pos=pos_inds,
h5_spec=spec_inds,
sort_dims=False,
get_labels=True,
lazy=lazy_out,
verbose=verbose)
if lazy_out:
self.assertIsInstance(n_dim, da.core.Array)
# Rearrange the dim labels and N-dim form from slow-to-fast to:
if verbose:
print('N-dim order will be permuted as: {}'.format(as_is_nd_order))
print('Labels will be permuted as: {}'.format([1, 0, 3, 2]))
exp_nd = exp_nd_s2f.transpose(tuple(as_is_nd_order))
"""
This is sort of confusing:
No matter how the pos / spec dims are ordered, the names will always
start as P0, P1, S0, S1
"""
exp_labs = np.array(labs_s2f)[[1, 0, 3, 2]]
if verbose:
print('Expected N-dim shape: {} and labels: {}'
''.format(exp_nd.shape, exp_labs))
self.assertTrue(np.allclose(exp_nd, n_dim))
self.assertTrue(suc)
self.assertTrue(np.all([x == y for x, y in zip(labs, exp_labs)]))
def test_numpy_ordinary(self):
self.base_comparison_4d(False, False)
def test_dask_input(self):
self.base_comparison_4d(False, False, lazy_in=True, lazy_out=False)
def test_dask_output(self):
self.base_comparison_4d(False, False, lazy_in=False, lazy_out=True)
def test_dask_all(self):
self.base_comparison_4d(False, False, lazy_in=True, lazy_out=True)
def test_numpy_pos_inds_order_flipped(self):
self.base_comparison_4d(True, False)
def test_numpy_spec_inds_order_flipped(self):
# This is the same situation as in BEPS
self.base_comparison_4d(False, True)
def test_numpy_both_inds_order_flipped(self):
self.base_comparison_4d(True, True)
def test_dask_all_both_inds_order_flipped(self):
self.base_comparison_4d(True, True, lazy_in=True, lazy_out=True)
def build_main_anc_1_2d(self, is_2d=True, is_spec=False):
num_rows = 2
num_cols = 3
# arrange as fast, slow
pos_inds = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
# Data is arranged from slowest to fastest
main_nd = np.random.randint(0, high=255, size=(num_rows, num_cols),
dtype=np.uint8)
if not is_2d:
pos_inds = np.expand_dims(np.arange(num_rows), axis=1)
main_nd = np.random.randint(0, high=255, size=num_rows,
dtype=np.uint8)
spec_inds= np.expand_dims([0], axis=0)
if is_spec:
return main_nd, spec_inds, pos_inds.T
return main_nd, pos_inds, spec_inds
def base_comparison_1_2d(self, is_2d, is_spec, flip_inds,
lazy_in=False, lazy_out=False):
# Data is always stored from fastest to slowest
# By default the ancillary dimensions are arranged from fastest to slowest
main_nd, pos_inds, spec_inds = self.build_main_anc_1_2d(is_2d=is_2d,
is_spec=is_spec)
main_2d = main_nd.reshape(-1, 1)
main_nd_w_sing = np.expand_dims(main_nd, axis=-1)
if is_spec:
main_2d = main_2d.T
main_nd_w_sing = np.expand_dims(main_nd, axis=0)
# nd (Y, X)
order = [1, 0, 2]
if is_spec:
order = [0, 2, 1]
if flip_inds:
# arranged as slow to fast
if is_spec:
spec_inds = np.flipud(spec_inds)
order = [0] + order[1:][::-1]
else:
pos_inds = np.fliplr(pos_inds)
order = order[:2][::-1] + [2]
print('2D: {}, Spec: {}, Flip: {}'.format(is_2d, is_spec, flip_inds))
print('Main data shapes ND: {}, 2D: {}'.format(main_nd.shape, main_2d.shape))
print(main_nd)
print(main_2d)
if lazy_in:
main_2d = da.from_array(main_2d, chunks=main_2d.shape)
n_dim, success = hdf_utils.reshape_to_n_dims(main_2d, h5_pos=pos_inds,
h5_spec=spec_inds,
sort_dims=True,
get_labels=False,
lazy=lazy_out,
verbose=True)
if lazy_out:
self.assertIsInstance(n_dim, da.core.Array)
self.assertTrue(np.allclose(main_nd_w_sing, n_dim))
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
n_dim, success = hdf_utils.reshape_to_n_dims(main_2d, h5_pos=pos_inds,
h5_spec=spec_inds,
sort_dims=False,
get_labels=False,
lazy=lazy_out,
verbose=True)
if lazy_out:
self.assertIsInstance(n_dim, da.core.Array)
if is_2d:
main_nd_w_sing = main_nd_w_sing.transpose(order)
self.assertTrue(np.allclose(main_nd_w_sing, n_dim))
def test_numpy_ordinary_1d_pos(self):
self.base_comparison_1_2d(False, False, False)
def test_dask_in_ordinary_1d_pos(self):
self.base_comparison_1_2d(False, False, False,
lazy_in=True, lazy_out=False)
def test_dask_out_ordinary_1d_pos(self):
self.base_comparison_1_2d(False, False, False,
lazy_in=False, lazy_out=True)
def test_dask_all_ordinary_1d_pos(self):
self.base_comparison_1_2d(False, False, False,
lazy_in=True, lazy_out=True)
def test_numpy_ordinary_1d_spec(self):
self.base_comparison_1_2d(False, True, False)
def test_dask_in_ordinary_1d_spec(self):
self.base_comparison_1_2d(False, True, False,
lazy_in=True, lazy_out=False)
def test_dask_out_ordinary_1d_spec(self):
self.base_comparison_1_2d(False, True, False,
lazy_in=False, lazy_out=True)
def test_dask_all_ordinary_1d_spec(self):
self.base_comparison_1_2d(False, True, False,
lazy_in=True, lazy_out=True)
def test_numpy_ordinary_2d_pos(self):
self.base_comparison_1_2d(True, False, False)
def test_numpy_ordinary_2d_spec(self):
self.base_comparison_1_2d(True, True, False)
def test_h5_both_inds_flipped(self):
# Flipping both the spec and pos dimensions means that the order in which
# the data is stored is the same order in which dimensions are arranged
# In other words, sort should make no difference at all!
file_path = 'reshape_to_n_dim_sort_required.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_raw_grp = h5_f.create_group('Raw_Measurement')
main_nd, source_pos_data, source_spec_data = self.build_main_anc_4d()
# arrange as slow, fast instead of fast, slow
source_pos_data = np.fliplr(source_pos_data)
# make spectroscopic slow, fast instead of fast, slow
source_spec_data = np.flipud(source_spec_data)
source_dset_name = 'source_main'
# Arrange from slow to fast
pos_attrs = {'units': ['nm', 'um'], 'labels': ['Y', 'X']}
#def build_ind_val_dsets(name, inds, attrs, is_spec):
h5_pos_inds = h5_raw_grp.create_dataset('Position_Indices', data=source_pos_data, dtype=np.uint16)
data_utils.write_aux_reg_ref(h5_pos_inds, pos_attrs['labels'], is_spec=False)
data_utils.write_string_list_as_attr(h5_pos_inds, pos_attrs)
h5_pos_vals = h5_raw_grp.create_dataset('Position_Values', data=source_pos_data, dtype=np.float32)
data_utils.write_aux_reg_ref(h5_pos_vals, pos_attrs['labels'], is_spec=False)
data_utils.write_string_list_as_attr(h5_pos_vals, pos_attrs)
source_main_data = main_nd.reshape(np.prod(main_nd.shape[:2]),
np.prod(main_nd.shape[2:]))
h5_source_main = h5_raw_grp.create_dataset(source_dset_name, data=source_main_data)
data_utils.write_safe_attrs(h5_source_main, {'units': 'A', 'quantity': 'Current'})
# Remember to set from slow to faset
source_spec_attrs = {'units': ['', 'V'], 'labels': ['Cycle', 'Bias']}
h5_source_spec_inds = h5_raw_grp.create_dataset('Spectroscopic_Indices', data=source_spec_data,
dtype=np.uint16)
data_utils.write_aux_reg_ref(h5_source_spec_inds, source_spec_attrs['labels'], is_spec=True)
data_utils.write_string_list_as_attr(h5_source_spec_inds, source_spec_attrs)
h5_source_spec_vals = h5_raw_grp.create_dataset('Spectroscopic_Values', data=source_spec_data,
dtype=np.float32)
data_utils.write_aux_reg_ref(h5_source_spec_vals, source_spec_attrs['labels'], is_spec=True)
data_utils.write_string_list_as_attr(h5_source_spec_vals, source_spec_attrs)
# Now need to link as main!
for dset in [h5_pos_inds, h5_pos_vals, h5_source_spec_inds, h5_source_spec_vals]:
h5_source_main.attrs[dset.name.split('/')[-1]] = dset.ref
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_source_main, get_labels=True, sort_dims=True,
lazy=False, verbose=False)
self.assertTrue(np.all([x == y for x, y in zip(labels, ['Y', 'X', 'Cycle', 'Bias'])]))
self.assertTrue(
|
np.allclose(main_nd, n_dim)
|
numpy.allclose
|
import tensorflow as tf
from random import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, BatchNormalization, Activation
from tensorflow.keras import Input
from tensorflow.keras.initializers import RandomUniform, RandomNormal, Constant
import matplotlib.pyplot as plt
import cv2 as cv
from random import random
import numpy as np
class Model():
def __init__(self, input_shape = 2, n_hidden_layers = 8, n_neurons = 16, activation = 'tanh', init = RandomNormal(), *args):
self.model = Sequential()
self.model.add(Input(shape = (input_shape)))
initializer = init(*args)
self.model.add(Dense(units = n_neurons, kernel_initializer = initializer, use_bias=True, bias_initializer = 'glorot_uniform'))
self.model.add(Activation(activation))
for _ in range(n_hidden_layers):
self.model.add(Dense(units = n_neurons, kernel_initializer = initializer, use_bias=False))
self.model.add(Activation(activation))
self.model.add(Dense(units = 1, kernel_initializer = initializer, use_bias=False))
self.model.add(Activation('sigmoid'))
def predict(self, data):
return self.model.predict(data)
def generate_pixel_map(w, h, zoom):
body = cv.resize(cv.imread('body.jpg', 0), (w, h))
pixel_map = np.zeros(shape=(h, w, 3))
pixel_map = pixel_map.tolist()
a = 0
for i in range(h):
for j in range(w):
pixel_map[i][j][0]=a
a+=1
a = 0
"""for i in range((int)(w/2)):
for j in range(a, h-a):
pixel_map[j][i]=a
a+=1
a = 0
for i in range((int)(w/2)):
for j in range(a, h-a):
pixel_map[j][w-1-i]=a
a+=1
a = 0
for i in range((int)(h/2)):
for j in range(a, w-a):
pixel_map[h-1-i][j]=a
a+=1"""
pixel_map = np.array(pixel_map)
max_px = pixel_map.max()
pixel_map = pixel_map.tolist()
for i in range(h):
for j in range(w):
pixel_map[i][j][0] = (pixel_map[i][j][0]/max_px-0.5)*zoom
#pixel_map[i][j][1] = (pixel_map[i][j][1]/max_px-0.5)*zoom
pixel_map[i][j][1] = np.sqrt((i/h-0.5)**2+(j/w-0.5)**2)
if i>h/2:
pixel_map[i][j][2] = 0
else:
pixel_map[i][j][2] = ((1-(body[i][j]/255))-0.5)*pixel_map[i][j][0]
#pixel_map[i][j][2]=((i/h)-0.5)**2+((j/w)-0.5)**2
pixel_map =
|
np.array(pixel_map)
|
numpy.array
|
import os,sys
import numpy as np
import random
import multiprocessing
import concurrent.futures
from concurrent.futures import ProcessPoolExecutor
NCPU = 4
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
##################### input parameters ###############################
FILEDIR = sys.argv[1]
INDIR = sys.argv[2] # native npz dir
nb_epochs = int(sys.argv[3])
n2d_layers = int(sys.argv[4])
n2d_filters = int(sys.argv[5])
method = sys.argv[6]
# FILEDIR = "/dl/suhong/project/TemptrRosetta/DB/DB13989"
# INDIR = "/dl/yangjy/project/distance/npz" # native npz dir
# nb_epochs = 1
# n2d_layers = 11
# n2d_filters = 8
test_file = "%s/test_lst"%(FILEDIR)
all_file = "%s/list"%(FILEDIR)
TEMPDIR = "%s/temp_npz1"%(FILEDIR) # template npz dir
# test set
with open(test_file) as f:
test_ids = f.read().splitlines()
# all set: containing train and test list
with open(all_file) as f:
IDs = f.read().splitlines()
maxseq = 20000
minseq = 1
dmax = 20.0
dmin = 2.0
nbins = 36
kmin = 6
bins = np.linspace(dmin, dmax, nbins+1)
bins180 = np.linspace(0.0, np.pi, 13)
bins360 = np.linspace(-np.pi, np.pi, 25)
def npz_loader(ID, DIR = INDIR, DIR2 = TEMPDIR):
name = DIR + '/' + ID + '.npz'
name_temp = DIR2 + '/' + ID + '_T01' + '.npz'
npz =
|
np.load(name)
|
numpy.load
|
# from imports import *
import threading
import pandas as pd
# from plyfile import PlyData
from sklearn.decomposition import PCA
from multiprocessing import Process
import random
import os
from time import time
import numpy as np
import ezdxf
import open3d as o3d
from math import sqrt, floor
import laspy
from shapely.geometry.collection import GeometryCollection
from shapely.geometry.polygon import Polygon
from shapely.geometry import Point, MultiPoint, LineString
from shapely import ops
from shapely.geometry.multilinestring import MultiLineString
def SaveRenderOptions(vis):
print("Saving camera parameters")
params = vis.get_view_control().convert_to_pinhole_camera_parameters()
o3d.io.write_pinhole_camera_parameters("./data/camera.json", params)
return False
def MaskTrajectoryFile(vis):
trajectory_file = "./data/camera_trajectory.json"
mask_trajectory_file = "./data/camera_trajectory.json.mask"
if(os.path.exists(trajectory_file)):
os.rename(trajectory_file, mask_trajectory_file)
elif(os.path.exists(mask_trajectory_file)):
os.rename(mask_trajectory_file, trajectory_file)
return False
class TrajectoryRecorder():
def __init__(self):
self.trajectory = []
self.trajectory_file = "./data/camera_trajectory.json"
if(os.path.exists(self.trajectory_file)):
os.remove(self.trajectory_file)
def record(self, vis):
params = vis.get_view_control().convert_to_pinhole_camera_parameters()
self.trajectory.append(params)
def save(self, vis):
trajectory = o3d.camera.PinholeCameraTrajectory()
trajectory.parameters = self.trajectory
o3d.io.write_pinhole_camera_trajectory(self.trajectory_file, trajectory)
def delete(self, vis):
self.trajectory = []
if(os.path.exists(self.trajectory_file)):
os.remove(self.trajectory_file)
def AppendCameraTrajectory(vis):
print("Append camera trajectory.")
params = vis.get_view_control().convert_to_pinhole_camera_parameters()
trajectory = o3d.camera.PinholeCameraTrajectory()
if(os.path.exists("./data/camera_trajectory.json")):
trajectory = o3d.io.read_pinhole_camera_trajectory("./data/camera_trajectory.json")
trajectory.parameters = trajectory.parameters + [params]
o3d.io.write_pinhole_camera_trajectory("./data/camera_trajectory.json", trajectory)
return False
def LoadRenderOptions(vis, returnVis = False):
# time.sleep(1) # sleep 1 second
paramsFile = "./data/camera.json"
if(not os.path.exists(paramsFile)):
return False
print("Loading camera parameters")
params = o3d.io.read_pinhole_camera_parameters(paramsFile)
vis.get_view_control().convert_from_pinhole_camera_parameters(params)
if(returnVis):
return vis
else:
return False
def AnimationCallBack(vis):
ctr = vis.get_view_control()
ctr.rotate(0.2, 0.0)
# ctr.scale(1/80)
return False
class PlayTrajectory():
def __init__(self):
assert(os.path.exists("./data/camera_trajectory.json"))
self.trajectory = o3d.io.read_pinhole_camera_trajectory("./data/camera_trajectory.json").parameters
self.i = 0
self.time = time()
def StepTrajectory(self, vis):
if(self.i < len(self.trajectory)): # and time() - self.time > 1):
ctr = vis.get_view_control()
ctr.convert_from_pinhole_camera_parameters(self.trajectory[self.i])
self.time = time()
self.i += 1
class DataTool:
def __init__(self, piece_size = 1000000, threads_allowed = 1000, pointSize = 5):
self.piece_size = piece_size
self.threads_allowed = threads_allowed
self.vis = None
self.bBox = None
self.pointCloud = None
self.displayCloud = None
self.pointSize = pointSize
def ReadPointCloudTxt(self, path, pcData):
t = time()
pcFile = open(path, 'r')
self.pointCloud = None
threads = [None] * self.threads_allowed
points_read = 0
thread_index = 0
while True:
if(threads[thread_index] is not None and threads[thread_index].isAlive()):
print("Wait for thread {}".format(thread_index), end=" \r")
threads[thread_index].join()
chunk = pcFile.readlines(self.piece_size)
if(len(chunk) < 1):
break
if(pcData.shape[0] <= points_read + len(chunk)):
if(pcData.shape[0] == 0):
pcData.resize((points_read + len(chunk))*2, axis=0)
else:
pcData.resize(pcData.shape[0]*2, axis=0)
# if(type(self.pointCloud) is np.ndarray):
# self.pointCloud = np.append(self.pointCloud, np.zeros(shape=(len(chunk), 7), dtype="float32"), axis=0)
# else:
# self.pointCloud = np.zeros(shape=(len(chunk), 7), dtype="float32")
threads[thread_index] = threading.Thread(target= self.__ReadPCChunkTxt, args=(chunk, points_read, pcData))
threads[thread_index].start()
points_read += len(chunk)
thread_index += 1
if(thread_index >= self.threads_allowed):
thread_index = 0
print("{0} points read".format(points_read), end='\r')
for i in range(self.threads_allowed):
if(threads[i] is not None):
print("Join thread {}".format(i), end=" \r")
threads[i].join()
pcData.resize(points_read, axis=0)
pcFile.close()
print("PC Finished reading {} points in {:.2f} min".format(pcData.shape[0], (time() - t)/60))
return self.pointCloud
def __ReadPCChunkTxt(self, chunk, start_index, pcData):
for i in range(len(chunk)):
if(chunk[i] != ""):
flts = chunk[i].replace('\n','').split()
# self.pointCloud[start_index + i] = np.array([float(flts[0]), float(flts[1]), float(flts[2]), float(flts[3]),
# float(flts[4]), float(flts[5]), float(flts[6])])
pcData[start_index + i] = np.array([float(flts[0]), float(flts[1]), float(flts[2]), float(flts[3]),
float(flts[4]), float(flts[5]), float(flts[6])])
del chunk
def ReadPointLabelsTxt(self, path):
t = time()
labelsFile = open(path, 'r')
labelsArr = labelsFile.read().split('\n')
if(labelsArr[-1] == ''):
del labelsArr[-1]
self.labels = np.array(labelsArr, dtype='int')
print("Finished reading {} labels in {:.2f} min".format(self.labels.shape[0], (time() - t)/60))
return self.labels
def ConvertToBin(self, path_to_pointcloud, path_to_pointlabels, output_path, extension = ".hdf5"):
if(os.path.isfile(output_path+extension)):
return
else:
print("Converting: ",output_path)
t = time()
pointcloud = np.array(pd.read_csv(path_to_pointcloud, sep=" ", dtype=np.float32, header=None), dtype=np.float32)
h5File = None
if(extension == ".hdf5"):
h5File = h5py.File(output_path+".hdf5", 'w')
h5File.create_dataset("pointcloud", data=pointcloud, dtype='float32', compression="lzf")
del pointcloud
if(path_to_pointlabels):
labels = np.array(pd.read_csv(path_to_pointlabels, dtype=np.int8, header=None))
if(extension == ".hdf5"):
h5File.create_dataset("labels", data=labels, dtype='int8', compression="lzf")
elif(extension == ".npy"):
pointcloud = np.concatenate((pointcloud, labels.astype(np.float32)), 1)
del labels
print("Done reading")
if(extension == ".hdf5"):
h5File.close()
elif(extension == ".npy"):
np.save(output_path, pointcloud, allow_pickle=False)
print("done in {}:{} min.".format(int((time() - t)/60), int((time() - t)%60)))
def ConvertDatasets(self, folder, outputFolder):
pcFiles = [f for f in listdir(folder) if isfile(join(folder, f)) and f.endswith('.txt')]
os.makedirs(outputFolder, exist_ok=True)
for file in pcFiles:
name = file.replace('.txt', '')
if(not isfile(join(folder, name+'.labels'))):
self.ConvertToBin(join(folder, name+'.txt'), None, join(outputFolder, name))
else:
self.ConvertToBin(join(folder, name+'.txt'), join(folder, name+'.labels'), join(outputFolder, name))
def createWindow(self, windowName = "Pointcloud"):
# vis = o3d.visualization.Visualizer()
self.vis = o3d.visualization.VisualizerWithKeyCallback()
self.vis.create_window(windowName, 800, 800)
opt = self.vis.get_render_option()
# opt.line_width = 100
opt.point_size = self.pointSize
# opt.background_color = np.asarray([0, 0, 0])
def addPointCloud(self, pointCloud, downSample = False, color = None):
pc = o3d.geometry.PointCloud()
pc.points = o3d.utility.Vector3dVector(pointCloud)
if(color != None):
pc.paint_uniform_color(np.asarray(color))
if(downSample):
pc = o3d.geometry.voxel_down_sample(pc, voxel_size=0.02)
self.vis.add_geometry(pc)
def setPointCloud(self, pointCloud, downSample = False, color = None):
pc = o3d.geometry.PointCloud()
pc.points = o3d.utility.Vector3dVector(pointCloud)
if(downSample):
pc = o3d.geometry.voxel_down_sample(pc, voxel_size=0.02)
if(self.pointCloud == None):
self.pointCloud = pc
else:
self.pointCloud.points = pc.points
if(color != None):
self.pointCloud.paint_uniform_color(np.asarray(color))
def addBoundingBox(self, bBox, color = []):
self.addBbox(self.vis, bBox, color)
def addPolyline(self, points, color = []):
self.addLine(self.vis, points, color)
@staticmethod
def addLine(vis, points, color = []):
lines = []
for i in range(len(points)-1):
lines.append([i, i+1])
colors = [color for _ in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(np.array(points))
line_set.lines = o3d.utility.Vector2iVector(np.array(lines))
line_set.colors = o3d.utility.Vector3dVector(np.array(colors))
vis.add_geometry(line_set)
@staticmethod
def addBbox(vis, bBox, color = []):
lines = [[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7],[0, 4], [1, 5], [2, 6], [3, 7]]
box = [[bBox[0], bBox[2], bBox[4]],
[bBox[1], bBox[2], bBox[4]],
[bBox[0], bBox[3], bBox[4]],
[bBox[1], bBox[3], bBox[4]],
[bBox[0], bBox[2], bBox[5]],
[bBox[1], bBox[2], bBox[5]],
[bBox[0], bBox[3], bBox[5]],
[bBox[1], bBox[3], bBox[5]]]
if(len(color) == 0):
colors = [[1,0,0] for _ in range(len(lines))]
else:
colors = [color for _ in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(np.array(box))
line_set.lines = o3d.utility.Vector2iVector(np.array(lines))
line_set.colors = o3d.utility.Vector3dVector(np.array(colors))
vis.add_geometry(line_set)
def setBoundingBox(self, bBox, color = None):
box = [[bBox[0], bBox[2], bBox[4]],
[bBox[1], bBox[2], bBox[4]],
[bBox[0], bBox[3], bBox[4]],
[bBox[1], bBox[3], bBox[4]],
[bBox[0], bBox[2], bBox[5]],
[bBox[1], bBox[2], bBox[5]],
[bBox[0], bBox[3], bBox[5]],
[bBox[1], bBox[3], bBox[5]]]
if(color == None):
colors = [[1,0,0] for _ in range(12)] #len(lines)
else:
colors = [color for _ in range(12)]
if(self.bBox == None):
lines = [[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7],[0, 4], [1, 5], [2, 6], [3, 7]]
line_set = o3d.geometry.LineSet()
line_set.lines = o3d.utility.Vector2iVector(np.array(lines))
line_set.points = o3d.utility.Vector3dVector(np.array(box))
line_set.colors = o3d.utility.Vector3dVector(np.array(colors))
self.bBox = line_set
else:
self.bBox.points = o3d.utility.Vector3dVector(np.array(box))
self.bBox.colors = o3d.utility.Vector3dVector(np.array(colors))
def setDisplayedCloud(self, bBox):
if(self.pointCloud == None or bBox == None):
return
points = np.asarray(self.pointCloud.points)
rows = np.where((points[:,0] >= bBox[0]) &
(points[:,0] <= bBox[1]) &
(points[:,1] >= bBox[2]) &
(points[:,1] <= bBox[3]) &
(points[:,2] >= bBox[4]) &
(points[:,2] <= bBox[5]) )
if(self.displayCloud == None):
self.displayCloud = o3d.geometry.PointCloud()
self.displayCloud.points = o3d.utility.Vector3dVector(points[rows])
def VisualizePointCloudAsync(self, dataset = [], dataColors = None, downSample = False, deleteZeros = False, bBoxes = None, lines = None, boxesColors = [], linesColors = [], windowName = None, animationFunction = None, loadCameraSettings = False, recordTrajectory = False):
p = Process(target=self.VisualizePointCloud, args=(dataset, dataColors, downSample, deleteZeros, bBoxes, lines, boxesColors, linesColors, windowName, animationFunction, loadCameraSettings, recordTrajectory))
p.start()
def VisualizePointCloud(self, dataset, dataColors = None, downSample = False, deleteZeros = False, bBoxes = None, lines = None, boxesColors = [], linesColors = [], windowName = None, animationFunction = None, loadCameraSettings = False, recordTrajectory = False):
# if(len(dataset) == 0):
# return
if(windowName is None):
pointCount = sum(0 if (data is None) else len(data) for data in dataset)
windowName = f"Point count: {pointCount}"
self.createWindow(windowName=windowName)
for i in range(len(dataset)):
if(dataset[i] is None):
continue
if(len(dataset[i]) == 0):
continue
dataset[i] = np.array(dataset[i])
if (deleteZeros):
if(len(dataset[i][0]) == 3):
indexes = np.where((dataset[i][:, 0] == 0.0) & (dataset[i][:, 1] == 0.0) & (dataset[i][:, 2] == 0.0))
else:
indexes = np.where((dataset[i][:, 0] == 0.0) & (dataset[i][:, 1] == 0.0) & (dataset[i][:, 2] == 0.0) & (dataset[i][:, 3] == 0.0))
dataset[i] = np.delete(dataset[i], indexes, axis=0)
print("Adding dataset {}/{} to visualization ".format(i+1, len(dataset)), end = '\r')
pc = o3d.geometry.PointCloud()
if(len(dataset[i][0]) == 3):
pc.points = o3d.utility.Vector3dVector(dataset[i])
else:
pc.points = o3d.utility.Vector3dVector(dataset[i][:,:3])
if(not (dataColors is None)):
if(not (dataColors[i] is None)):
if(len(dataColors[i]) == len(dataset[i]) and len(dataset[i]) != 3):
pc.colors = o3d.utility.Vector3dVector(np.asarray(dataColors[i]))
elif(len(dataColors) == len(dataset)):
pc.paint_uniform_color(np.asarray(dataColors[i]))
if(not (downSample is None) and downSample != False):
if(not isinstance(downSample, float)):
downSample = 0.02
pc = o3d.geometry.PointCloud.voxel_down_sample(pc, voxel_size=downSample)
self.vis.add_geometry(pc)
print("")
if(bBoxes is not None):
print("Adding {} bBoxes to visualization".format(len(bBoxes)), end = '\r')
for i in range(len(bBoxes)):
# print("Adding bBox {}/{} to visualization".format(i+1, len(bBoxes)), end = '\r')
color = []
if(len(boxesColors) > i and boxesColors[i] is not None):
color = boxesColors[i]
self.addBoundingBox(bBoxes[i], color)
if(not lines is None):
opt = self.vis.get_render_option()
opt.point_size = 2
for i in range(len(lines)):
print("Adding {}/{} line to visualization".format(i, len(lines)), end = '\r')
color = [1.0, 0.0, 0.0] if linesColors is None or len(linesColors)-1 < i else linesColors[i]
self.addPolyline(lines[i], color)
self.vis.register_key_callback(ord("s"), SaveRenderOptions)
self.vis.register_key_callback(ord("S"), SaveRenderOptions)
self.vis.register_key_callback(ord("l"), LoadRenderOptions)
self.vis.register_key_callback(ord("L"), LoadRenderOptions)
self.vis.register_key_callback(ord("m"), MaskTrajectoryFile)
self.vis.register_key_callback(ord("M"), MaskTrajectoryFile)
if recordTrajectory:
recorder = TrajectoryRecorder()
self.vis.register_key_callback(ord("a"), recorder.record)
self.vis.register_key_callback(ord("A"), recorder.record)
self.vis.register_key_callback(ord("r"), recorder.save)
self.vis.register_key_callback(ord("R"), recorder.save)
self.vis.register_key_callback(ord("d"), recorder.delete)
self.vis.register_key_callback(ord("D"), recorder.delete)
# paramFiles = "./data/camera.json"
# if(os.path.exists(paramFiles)):
# os.remove(paramFiles)
if not (animationFunction is None):
self.vis.register_animation_callback(animationFunction)
if(loadCameraSettings):
self.vis = LoadRenderOptions(self.vis, returnVis=True)
self.vis.run()
self.vis.destroy_window()
def DoBoxesQA(self, pointcloud = None, bBoxes = None, downSamplePC = False):
if(len(pointcloud) == 0 and len(self.pointCloud) == 0):
return
elif(len(pointcloud) != 0):
self.setPointCloud(pointcloud, downSamplePC)
acceptedBoxes = []
def darkMode(vis):
opt = vis.get_render_option()
opt.background_color = np.asarray([0, 0, 0])
return False
def acceptBox(vis):
print("Accept")
acceptedBoxes.append(bBoxes[self.boxIndex])
vis.close()
return False
def discardBox(vis):
print("Discard")
vis.close()
return False
key_to_callback = {}
key_to_callback[ord("Y")] = acceptBox
key_to_callback[ord("N")] = discardBox
key_to_callback[ord("D")] = darkMode
self.boxIndex = 0
for box in bBoxes:
self.setDisplayedCloud(box)
self.setBoundingBox(box)
o3d.visualization.draw_geometries_with_key_callbacks([self.displayCloud, self.bBox], key_to_callback, "QA", 800, 800)
self.boxIndex += 1
print("QA done")
return acceptedBoxes
def QAResults(self, dataFolder, boxesFolder, boxesExportFolder, override = True):
pcFiles = [f for f in listdir(boxesFolder) if isfile(join(boxesFolder, f)) and f.endswith('.txt')]
for file in pcFiles:
name = file.replace('.txt', '').replace('BBOXES_', '')
boxesFile = join(boxesFolder, 'BBOXES_'+name+'.txt')
dataFile = join(dataFolder, name+'.hdf5')
newBoxPath = join(boxesExportFolder, 'BBOXES_'+name+'.txt')
if(isfile(dataFile)):
if(override == False and isfile(newBoxPath)):
print("Already done: "+dataFile)
continue
print("QA: "+dataFile)
pc = self.ReadHDF5XYZ(dataFile)
boxes = ReadBoundingBoxes(boxesFile)
newBoxes = self.DoBoxesQA(pc, boxes, True)
SaveBoundingBoxes(newBoxPath, newBoxes)
def SaveHDF5(pointcloud, labels, output_path):
if(not output_path.endswith(".hdf5")):
output_path += ".hdf5"
print("Converting: ",output_path)
t = time()
h5File = h5py.File(output_path, 'w')
h5File.create_dataset("pointcloud", data=pointcloud, dtype='float32', compression="lzf")
h5File.create_dataset("labels", data=labels, dtype='int8', compression="lzf")
h5File.close()
print("done in {}:{} min.".format(int((time() - t)/60), int((time() - t)%60)))
def ReadHDF5(path, with_labels = True):
print("Reading '{}'".format(path))
t=time()
h5File = h5py.File(path, 'r')
pointCloud = np.array(h5File["pointcloud"], dtype="float32")
if(with_labels):
labels = np.array(h5File["labels"], dtype="float32")
labels = np.expand_dims(labels, 1)
pointCloud = np.append(pointCloud, labels, axis=1)
del labels
print("Finished reading in {:.2f} min. Shape = {}".format((time() - t)/60, pointCloud.shape))
return pointCloud
def ReadXYZ(file, dataName = "pointcloud", verbose = False, readFormat=None):
if(verbose):
print("Reading pointcloud of '{}'".format(path))
t=time()
xyz = None
if(file.endswith(".hdf5")):
h5File = h5py.File(file, 'r')
pc = h5File["pointcloud"]
xyz = pc[:, :3]
h5File.close()
if(file.endswith(".npy") or readFormat == ".npy"):
pc = np.load(file)
xyz = pc[:, :3]
elif(file.endswith(".las")):
import laspy
lasFile = laspy.file.File(file, mode = "r")
xyz = np.concatenate((np.expand_dims(lasFile.x,1), np.expand_dims(lasFile.y,1), np.expand_dims(lasFile.z,1)), 1)
# xyz *= np.array(lasFile.header.scale)
lasFile.close()
elif(file.endswith(".ply")):
plydata = PlyData.read(file)
x = plydata["vertex"].data["x"].astype(np.float32)
y = plydata["vertex"].data["y"].astype(np.float32)
z = plydata["vertex"].data["z"].astype(np.float32)
xyz = np.concatenate((np.expand_dims(x,1), np.expand_dims(y,1), np.expand_dims(z,1)), axis=1)
if(verbose):
print("Finished reading pointcloud in {:.2f} min. Shape = {}".format((time() - t)/60, xyz.shape))
return xyz
def ReadRGB(file, dataName = "pointcloud", verbose = False):
t=time()
rgb = None
if(file.endswith(".hdf5")):
h5File = h5py.File(file, 'r')
pc = h5File["pointcloud"]
rgb = pc[:, 4:7]
h5File.close()
if(file.endswith(".npy")):
pts = np.load(file)
rgb = pts[:, 3:6]
elif(file.endswith(".las")):
import laspy
lasFile = laspy.file.File(file, mode = "r")
rgb = np.concatenate((np.expand_dims(lasFile.Red,1), np.expand_dims(lasFile.Green,1), np.expand_dims(lasFile.Blue,1)), 1)
rgb = rgb/65536 #[0,1]
lasFile.close()
print("Finished reading RGB values in {:.2f} min. Shape = {}".format((time() - t)/60, rgb.shape))
return rgb
def PointsInBlock(pts, pt, blocksize):
if(not isinstance(blocksize,list) and not isinstance(blocksize, np.ndarray)):
blocksize = [blocksize, blocksize]
mask_x = np.logical_and(pts[:,0]<pt[0]+blocksize[0]/2, pts[:,0]>pt[0]-blocksize[0]/2)
mask_y = np.logical_and(pts[:,1]<pt[1]+blocksize[1]/2, pts[:,1]>pt[1]-blocksize[1]/2)
pts = pts[np.where(mask_x & mask_y)[0]]
if(len(blocksize) == 3):
mask_z = np.logical_and(pts[:,2]<pt[2]+blocksize[2]/2, pts[:,2]>pt[2]-blocksize[2]/2)
return pts[np.where(mask_z)[0]]
else:
return pts
def PointsInRange(pts, pt, pointcloud_range, filter_height = True):
"""
pointcloud_range = [minX, mixY, minZ, maxX, maxY, maxZ]
X and Z are relative to pt
Z values are just clipped
minX, mixY, minZ - negative values
"""
if(len(pointcloud_range) == 6):
minX, minY, minZ, maxX, maxY, maxZ = pointcloud_range
else:
range_X, range_Y, range_Z, = pointcloud_range
minX = -range_X/2
minY = -range_Y/2
minZ = -range_Z/2
maxX = range_X/2
maxY = range_Y/2
maxZ = range_Z/2
mask_x = np.logical_and(pts[:,0]<pt[0]+maxX, pts[:,0]>pt[0]+minX)
mask_y = np.logical_and(pts[:,1]<pt[1]+maxY, pts[:,1]>pt[1]+minY)
pts = pts[np.where(mask_x & mask_y)[0]]
if(len(pts) == 0):
return pts
if(filter_height):
# mask_z = np.logical_and(pts[:,2]<pt[2]+maxZ, pts[:,2]>pt[2]+minZ)
mask_z = pts[:,2]<(min(pts[:,2])+(maxZ-minZ))
pts = pts[np.where(mask_z)[0]]
return pts
def crop_lines(chunk_center, chunk_size, lines, min_z, max_z):
min_x = chunk_center[0]-chunk_size[0]/2
max_x = chunk_center[0]+chunk_size[0]/2
min_y = chunk_center[1]-chunk_size[1]/2
max_y = chunk_center[1]+chunk_size[1]/2
coords = (
(min_x, min_y, max_z),
(min_x, max_y, max_z),
(max_x, max_y, max_z),
(max_x, min_y, max_z),
(min_x, min_y, min_z),
(min_x, max_y, min_z),
(max_x, max_y, min_z),
(max_x, min_y, min_z),
)
polygon = Polygon(coords).convex_hull
# multi_line = MultiLineString([ops.clip_by_rect(LineString(line) for line in lines])
# cropped_lines = np.array(ops.clip_by_rect(multi_line, min_x, min_y, max_x, max_y).coords)
# cropped_lines = [np.array(line.coords) for line in multi_line.geoms]
cropped_lines = []
for line in lines:
# new_line = ops.clip_by_rect(LineString(line), min_x, min_y, max_x, max_y)
linestring = line if isinstance(line, LineString) else LineString(line)
new_line = polygon.intersection(linestring)
if(isinstance(new_line, MultiLineString)):
split_lines = [np.array(geom.coords) for geom in new_line.geoms]
cropped_lines.extend(split_lines)
elif(isinstance(new_line, GeometryCollection)):
for geom in new_line.geoms:
split_lines.append(np.array(geom.coords))
else:
cropped_lines.append(np.array(new_line.coords))
# DataTool().VisualizePointCloudAsync([xyz], lines = cropped_lines)
# DataTool().VisualizePointCloudAsync([xyz], lines = lines)
# return [line[np.logical_not(np.isnan(line).any(1))] for line in cropped_lines if len(line)>0]
return [line[1:-1] for line in cropped_lines if len(line)>0]
def SelectPointsFromBlock(pts, pt, blocksize, npoints = None):
selectedPts = PointsInBlock(pts, pt, blocksize)
if(len(selectedPts) == 0):
return np.zeros((0, 3))
if(npoints is None):
return selectedPts
else:
return selectedPts[np.random.choice(len(selectedPts), npoints, replace=True)]
def TemplatesInBlock(templates, pt, blocksize):
if(not isinstance(blocksize,list) and not isinstance(blocksize, np.ndarray)):
blocksize = [blocksize, blocksize]
mask_x = np.logical_and(templates[:,:,0]<pt[0]+blocksize[0]/2, templates[:,:,0]>pt[0]-blocksize[0]/2)
mask_y = np.logical_and(templates[:,:,1]<pt[1]+blocksize[1]/2, templates[:,:,1]>pt[1]-blocksize[1]/2)
mask = (mask_x & mask_y).any(axis=1)
templates = templates[np.where(mask)[0]]
if(len(blocksize) == 3):
mask_z = np.logical_and(templates[:,:,2]<pt[2]+blocksize[2]/2, templates[:,:,2]>pt[2]-blocksize[2]/2)
mask_z = mask_z.any(axis=1)
return templates[np.where(mask_z)[0]]
else:
return templates
def SelectTemplatesFromBlock(templates, pt, blocksize):
return TemplatesInBlock(templates, pt, blocksize)
def ReadPoints(fileList, curbLabel):
ptsList = []
curbPtsList = []
for file in fileList:
pts = np.load(os.path.join(Paths.Curbs.forDelineation, (file if file.endswith(".npy") else file+".npy")))
curbPtsIdx = np.where(pts[:,3] == curbLabel)
otherPts = np.where(pts[:,3] == 2)
curbPtsList.append(pts[curbPtsIdx][:,:3])
ptsList.append(pts[otherPts][:,:3])
return ptsList, curbPtsList
def ReadLinesPoint(fileList):
linePtsList = []
for file in fileList:
file = os.path.basename(file)
if not file.endswith(".npy"):
file += ".npy"
file = os.path.join(Paths.Curbs.denseLines, file)
linePtsList.append(np.load(file))
return linePtsList
def ReadXYZRGB(file):
xyz = ReadXYZ(file)
rgb = ReadRGB(file)
return xyz.astype(np.float32), rgb.astype(np.float32)
def ReadPolyLines(file):
dwg = ezdxf.readfile(file, errors = "ignore")
lines = []
for entity in dwg.entities:
# print(entity.dxftype())
points = []
if(entity.dxftype() == 'LINE'):
points = [entity.dxf.start.xyz, entity.dxf.end.xyz]
else:
for i, point in enumerate(entity.points()):
points.append(point.xyz)
# print(f"{i} : {x}, {y}, {z}")
lines.append(np.array(points))
return lines
def ReadLine(points_file, line_file):
xyz = ReadXYZ(points_file)
# rgb = ReadRGB(points_file)
line = np.load(line_file)
return xyz, None, line
def GroupTiles(xyz, centers, tile_size):
tiles = []
for pt in centers:
pts = PointsInBlock(xyz, pt, tile_size)
if(len(pts) > 0):
tiles.append(pts)
return tiles
def FillTiles(tiles, numberOfPoints):
return [pts[np.random.randint(0, len(pts), numberOfPoints)] for pts in tiles]
def CalculateTileBoxes(minZ, maxZ, centers, tile_size):
bboxes = [BoundingBoxFromVoxel([pt[0], pt[1], 0], tile_size, minZ, maxZ) for pt in centers]
return bboxes
def CalculateTileCenters(xyz, line, step):
points = np.concatenate([xyz, line], axis=0)[:,:2]
return np.unique(np.round(points[:,:2] / step) * step, axis=0)
def ReadCurbData(line_file, shift = True):
cloud_file = os.path.join(Paths.Curbs.forDelineation, os.path.basename(line_file))
if(not os.path.exists(cloud_file) or not os.path.exists(line_file)):
return None, None, None
xyz, _, line = ReadLine(cloud_file, line_file)
if(shift):
minXYZ = np.concatenate([xyz, line],axis=-2).min(axis=0)
xyz -= minXYZ
line -= minXYZ
# Prepare point cloud tiles
# tiles, centers = SplitCurbIntoTiles(xyz, line, tile_size)
# if(visualize):
# bboxes = CalculateTileBoxes(tiles, centers, tile_size)
# DataTool().VisualizePointCloudAsync(tiles, lines=[line], bBoxes=bboxes)
# org_pts_count = sum([len(tile) for tile in tiles])
# tiles = [tile[np.random.randint(len(tile), size=points_in_tile)] for tile in tiles]
# sampled_pts_count = sum([len(tile) for tile in tiles])
# Prepare curb line
# if(visualize):
# DataTool(pointSize=20).VisualizePointCloudAsync([line], dataColors=[[0,0,1]], lines=[line], bBoxes=bboxes)
# distances = []
# for i in range(len(line)-1):
# distances.append(Distance(line[i], line[i+1]))
# print(f"avg distance between points: {np.mean(distances)}")
return xyz, np.array(line)
def ReadLines(file_name):
files = [os.path.splitext(file)[0] for file in os.listdir(Paths.Curbs.forDelineation) if file.startswith(file_name) and os.path.splitext(file)[0][-1].isdigit()]
points = []
# colors = []
lines = []
for file in files:
pts_file = os.path.join(Paths.Curbs.forDelineation, file+".npy")
line_file = os.path.join(Paths.Curbs.denseLines, file+".npy")
xyz, _, line = ReadLine(pts_file, line_file)
points.append(xyz)
lines.append(line)
DataTool().VisualizePointCloudAsync(points, [np.random.uniform(0,1,(3,)) for _ in range(len(points))], lines=lines)
def NormalizeVector(vec):
vec = np.array(vec)
length = sqrt(np.sum(vec**2))
return vec / length
def Distance2D(a, b):
return Distance(a[:2], b[:2])
def Distance(a, b):
return np.sqrt(np.sum((a - b)**2, axis=-1))
def DensifyLines(lines, maxGap):
newLines = []
for points in lines:
newPoints = [points[0]]
for i in range(len(points)-1):
a = points[i]
b = points[i+1]
distance = Distance(a, b)
addPoints = []
if(distance > maxGap):
dirVec = NormalizeVector(b - a)
addPoints = [a + (dirVec * dist) for dist in np.linspace(0, distance, num=floor(distance / maxGap)+2, dtype=np.float64)[1:-1]]
addPoints.append(b)
newPoints = newPoints + addPoints
newLines.append(np.array(newPoints))
return newLines
def ReadTemplates(fileList, nodes:int, densify = True):
templateList = []
separateCurbs = []
for file in fileList:
name = os.path.splitext(os.path.basename(file))[0]
curbs = []
curbIdx = 0
while True:
fileName = os.path.join(Paths.Curbs.denseLines, f"{name}_{curbIdx}.npy")
if(os.path.exists(fileName)):
curbs.append(np.load(fileName))
curbIdx += 1
else:
break
assert(len(curbs) > 0)
templates = np.zeros((0, nodes, 3))
for curb in curbs:
curb_templates = []
for i in range(len(curb[0])):
template = [curb[y][i] for y in range(nodes)]
curb_templates.append(template)
if(densify):
curb_templates = DensifyTemplates(curb_templates)
templates = np.concatenate([templates, np.array(curb_templates)], axis=0)
separateCurbs.append(curb_templates)
templateList.append(templates)
return templateList, separateCurbs
def DensifyTemplates(templates, maxGap = 0.01):
newTemplates = []
for i in range(len(templates)-1):
ta = templates[i]
tb = templates[i+1]
lines = [[ta[i], tb[i]] for i in range(len(ta))]
lines = DensifyLines(lines, maxGap)
# DataTool().VisualizePointCloudAsync(lines, windowName="a")
minLen = np.min([len(line) for line in lines])
lines = [line[np.linspace(0, len(line)-1, minLen, dtype=int)] for line in lines]
# DataTool().VisualizePointCloudAsync(lines, windowName="b")
for i in range(len(lines[0])):
template = [lines[y][i] for y in range(len(lines))]
newTemplates.append(template)
# DataTool().VisualizePointCloudAsync(lines = newTemplates, windowName="c")
return newTemplates
def ReadCurbs(file, selectOnly = None):
nodes = ["node0", "node1", "node2", "node3"]
if(not (selectOnly is None)):
nodes = [selectOnly]
allLines = []
for node in nodes:
dwg = ezdxf.readfile(os.path.join(Paths.Curbs.lines, file+"_"+node+".dxf"))
lines = []
for entity in dwg.entities:
points = []
if(entity.dxftype() == 'LINE'):
points = [entity.dxf.start.xyz, entity.dxf.end.xyz]
else:
for point in entity.points():
points.append(point.xyz)
lines.append(np.array(points))
allLines.append(lines)
curbs = []
for i in range(len(allLines[0])):
curb = np.array([lines[i] for lines in allLines])
curbs.append(curb)
return curbs
def ConstructCurbPatterns(curbs):
patterns = []
for curb in curbs:
for i in range(curb.shape[1]):
pattern = [curb[y][i] for y in range(curb.shape[0])]
patterns.append(np.array(pattern))
return patterns
def ConstructCurbLines(curbs):
lines = []
for curb in curbs:
for points in curb:
lines.append(points)
return lines
def ConstructLinesOutOfTemplates(templates):
lines = np.zeros((templates.shape[1], templates.shape[0], templates.shape[2]))
for i in range(len(lines)):
lines[i] = templates[:, i]
return lines
def WritePolyLines(lines, file):
dwg = ezdxf.new()
msp = dwg.modelspace()
for line in lines:
msp.add_lwpolyline(line)
dwg.saveas(file)
return lines
def NormalizeVector(vec):
vec = np.array(vec)
length = sqrt(np.sum(vec**2))
return vec / length
def sign(points, p2, p3):
return (points[:, 0] - p3[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (points[:, 1] - p3[1]);
def PointInTriangle(points, v1, v2, v3):
b1 = sign(points, v1, v2) < 0
b2 = sign(points, v2, v3) < 0
b3 = sign(points, v3, v1) < 0
return (b1 == b2) & (b2 == b3)
def PointsInRectangle(points, A, B, C, D):
a = PointInTriangle(points, A, B, C)
b = PointInTriangle(points, A, D, C)
return np.where(a | b)[0]
def PointsInRectangle(points, start, end, edge):
A, B, C, D = CalculateBox(start, end, edge/2)
a = PointInTriangle(points, A, B, C)
b = PointInTriangle(points, A, D, C)
return np.where(a | b)[0]
def CalculateBox(start, end, edge):
dir = NormalizeVector(end - start)
right = np.cross(dir, (0,0,1))
left = np.cross(dir, (0,0,-1))
A = end + left * edge
B = end + right * edge
C = start + right * edge
D = start + left * edge
return A, B, C, D
def LabelWirePoints(points, lines, maxDistance = 2):
labels = np.zeros((len(points),), np.uint8)
goodLbl = []
for line in tqdm(lines):
start = line[0]
end = line[-1]
boxPtsIdx = PointsInRectangle(points, start, end, 5)
boxPts = points[boxPtsIdx]
for i in range(len(line)-1):
a = line[i]
b = line[i+1]
segmentPtsIdx = PointsInRectangle(boxPts, a, b, maxDistance)
segmentPts = boxPts[segmentPtsIdx]
segmentGoodPtsIdx = np.where((segmentPts[:, 2] >= (np.min([a[2], b[2]])-maxDistance/2)) & (segmentPts[:, 2] <= (np.max([a[2], b[2]])+maxDistance/2)))
labels[boxPtsIdx[segmentPtsIdx[segmentGoodPtsIdx]]] = 1
goodLbl += list(boxPtsIdx[segmentPtsIdx[segmentGoodPtsIdx]])
return labels
def CutPointsAroundLine(points, line, width, height):
a = np.logical_and(points[:, 0] >= np.min(line[:,0])-width, points[:, 0] <= np.max(line[:,0])+width)
b = np.logical_and(points[:, 1] >= np.min(line[:,1])-width, points[:, 1] <= np.max(line[:,1])+width)
c = np.logical_and(points[:, 2] >= np.min(line[:,2])-height, points[:, 2] <= np.max(line[:,2])+height)
idx = np.where(np.logical_and(np.logical_and(a,b), c))[0]
pts = points[idx]
return pts, idx
def LabelCurbPoints(labels, points, lines, width = 0.5, height = 0.7, class_number = 1, margin = 0):
if(isinstance(labels, np.ndarray)):
labels = [labels]
assert(isinstance(labels, list)) # multiple numpy arrays of labels with instance numbers
for id, wholeLine in tqdm(enumerate(lines)):
id += 1
tempIdx = list(range(len(wholeLine)))
size = 20
step = size-1 # one point overlap
for lineIdx in [tempIdx[i : i + size] for i in range(0, len(tempIdx), step)]:
line = wholeLine[lineIdx]
pts, idx = CutPointsAroundLine(points, line, width, height)
# for i in tqdm(range(len(line)-1)):
for i in range(len(line)-1):
a = line[i]
b = line[i+1]
if(margin > 0):
vec = NormalizeVector(a - b)*margin
a -= vec
b += vec
segmentPtsIdx = PointsInRectangle(pts, a, b, width)
segmentPts = pts[segmentPtsIdx]
# segmentPtsIdx = PointsInRectangle(points, a, b, width)
# segmentPts = points[segmentPtsIdx]
segmentGoodPtsIdx = np.where((segmentPts[:, 2] >= (np.min([a[2], b[2]])-height/2)) & (segmentPts[:, 2] <= (np.max([a[2], b[2]])+height/2)))
for i in range(len(labels)):
labels[i][idx[segmentPtsIdx[segmentGoodPtsIdx]]] = [class_number, id]
# for pt in tqdm(line):
for pt in line:
dist = np.sqrt(np.sum(np.power(pts - pt, 2), axis=1))
segmentGoodPtsIdx = np.where(dist <= width/2)[0]
for i in range(len(labels)):
labels[i][idx[segmentGoodPtsIdx]] = [class_number, id]
if(len(labels) == 1):
labels = labels[0]
return labels
# import scipy.spatial as spatial
from sklearn.neighbors import KDTree
def BuildPointTree(points):
print("Building KDTree...")
t = time()
tree = KDTree(points, leaf_size=10)
print("Done in {}:{} min.".format(int((time() - t)/60), int((time() - t)%60)))
return tree
def LabelNearestPoints(labels, tree, lines, radius, class_number = 1):
print("Quering KDTree...")
for pt in tqdm(lines):
idx = tree.query_radius(pt[:3], radius)
if(isinstance(labels, list)):
for i in range(len(labels)):
labels[i][idx] = class_number
else:
labels[idx] = class_number
return labels
def LabelPointsInDistance(labels, src_pts, lines, radius_lbl):
src_pts = src_pts[:,:3]
for pt in tqdm(lines):
dist = np.sqrt(np.sum(np.power(src_pts - pt, 2), axis=1))
for radius, lbl in radius_lbl:
idx = np.where(dist <= radius)[0]
if(isinstance(labels, list)):
for i in range(len(labels)):
labels[i][idx] = class_number
else:
labels[idx] = class_number
return labels
def CenterOfPoints(pts):
return (np.max(pts, axis=0) + np.min(pts, axis=0))/2
# return np.min(pts, axis=0)
def NormalizePoints(pts, centerPoint):
if(isinstance(pts, list)):
return [points - centerPoint for points in pts]
else:
return pts - centerPoint
def ReadLabels(file, verbose = False, readFormat=None):
if(verbose):
print("Reading labels of '{}'".format(file))
t=time()
lbl = None
if(file.endswith(".hdf5") or readFormat == ".hdf5"):
h5File = h5py.File(file, 'r')
lbl = np.array(h5File["labels"])
h5File.close()
elif(file.endswith(".las") or readFormat == ".las"):
import laspy
lasFile = laspy.file.File(file, mode = "r")
lbl = lasFile.Classification
lasFile.close()
elif(file.endswith(".labels") or file.endswith(".txt") or readFormat == ".txt" or readFormat == ".labels"):
lbl = np.array(pd.read_csv(file, dtype=np.int8, header=None))
elif(file.endswith(".ply") or readFormat == ".ply"):
plydata = PlyData.read(file)
lbl = plydata["vertex"].data["class"].astype(np.float32)
elif(file.endswith(".npy") or readFormat == ".npy"):
pc = np.load(file)
if(pc.shape[1] == 7):
lbl = pc[:, 6]
if(pc.shape[1] == 5):
lbl = pc[:, 4]
if(pc.shape[1] == 4):
lbl = pc[:, 3]
lbl = np.expand_dims(lbl, 1)
if(len(lbl.shape) == 1):
lbl = np.expand_dims(lbl, 1)
print("Finished reading labels in {:.2f} min. Shape = {}".format((time() - t)/60, lbl.shape))
return lbl
def ReadXYZL(file, lblFile = None, verbose = False):
if(verbose):
printline("Reading: '{}'".format(os.path.basename(file)))
t=time()
xyz = None
lbl = None
if(file.endswith(".hdf5")):
h5File = h5py.File(file, 'r')
pc = h5File["pointcloud"]
xyz = pc[:, :3]
if(lblFile is None):
lbl = h5File["labels"]
h5File.close()
elif(file.endswith(".las")):
import laspy
lasFile = laspy.file.File(file, mode = "r")
xyz = np.concatenate((np.expand_dims(lasFile.x,1), np.expand_dims(lasFile.y,1), np.expand_dims(lasFile.z,1)), 1)
if(lblFile is None):
lbl = lasFile.Classification
lasFile.close()
elif(file.endswith(".ply")):
plydata = PlyData.read(file)
x = plydata["vertex"].data["x"].astype(np.float32)
y = plydata["vertex"].data["y"].astype(np.float32)
z = plydata["vertex"].data["z"].astype(np.float32)
xyz = np.concatenate((np.expand_dims(x,1), np.expand_dims(y,1), np.expand_dims(z,1)), axis=1)
lbl = plydata["vertex"].data["class"].astype(np.float32)
if(not (lblFile is None) and lblFile.endswith(".labels")):
lbl = ReadLabels(lblFile)
if(len(lbl.shape) == 1):
lbl = np.expand_dims(lbl, 1)
xyzl = np.concatenate((xyz, lbl), 1)
printline("Finished in {:.2f} min. Shape = {}".format((time() - t)/60, xyzl.shape))
return xyzl
def ReadHDF5Boxes(path):
h5File = h5py.File(path, 'r')
boxesPos = np.array(h5File["boxes"])
boundindBoxes = []
for vox in boxesPos:
boundindBoxes.append(BoundingBoxFromVoxel(Point(vox[0], vox[1], vox[2]), Const.voxelSize))
return boundindBoxes
class DataReader:
threads = []
dataset = []
def ReadFiles(self, files, pointsDataSet = "points", silent=True, positionData = False):
if(type(files) is not list):
files = [files]
points = []
labels = []
position = []
t=time()
count = 0
for f in files:
count+=1
h5File = h5py.File(f, 'r')
tempLabels = np.asarray(h5File["labels"], dtype="int8")
if(tempLabels.shape[1] == 1):
tempLabels = np.eye(Const.numOfCategories, dtype="int8")[tempLabels]
tempLabels = np.squeeze(tempLabels, axis=2)
if(len(points) == 0):
points = np.asarray(h5File[pointsDataSet], dtype="float32")
labels = tempLabels
if(positionData):
position = np.asarray(h5File["position"], dtype="float32")
else:
points = np.concatenate((points, np.asarray(h5File[pointsDataSet], dtype="float32")))
labels = np.concatenate((labels, tempLabels))
if(positionData):
position = np.concatenate((position, np.asarray(h5File["position"], dtype="float32")))
if(not silent):
print("Read file {}/{}. Voxels got: {}.".format(count, len(files), len(points)))
if(not silent):
elapsed = round(time() - t)
print("{} dataset read in {:.0f} min {:.0f} sec".format(len(files), (elapsed - (elapsed % 60))/60, elapsed % 60))
if(positionData):
return points, position, labels
else:
return points, labels
class Point:
def __init__(self, x, y, z, label = -1):
self.x = x
self.y = y
self.z = z
self.label = label
@staticmethod
def from_XYZL(XYZL):
return Point(XYZL[0], XYZL[1], XYZL[2], XYZL[3])
@staticmethod
def from_XYZ(XYZ):
return Point(XYZ[0], XYZ[1], XYZ[2])
def GetPointsInBoundingBox(points, boundingBox):
if(len(boundingBox) != 6):
return None
rows = GetPointsIndexInBoundingBox(points, boundingBox)
return points[rows]
def CountPointsInBox(points, boundingBox):
if(len(boundingBox) != 6):
return None
indices = GetPointsIndexInBoundingBox(points, boundingBox)
return len(indices[0])
def GetPointsIndexInBoundingBox(points, boundingBox):
if(len(boundingBox) != 6):
return None
return np.where((points[:,0] >= boundingBox[0]) & (points[:,0] <= boundingBox[1]) &
(points[:,1] >= boundingBox[2]) & (points[:,1] <= boundingBox[3]) &
(points[:,2] >= boundingBox[4]) & (points[:,2] <= boundingBox[5]))
def BoundingBoxFromVoxel(vxlCntr, vxlEdge, minZ = 0, maxZ = 0):
if(not(type(vxlCntr) is Point)):
vxlCntr = Point(vxlCntr[0], vxlCntr[1], vxlCntr[2])
if type(vxlEdge) is int or type(vxlEdge) is float or type(vxlEdge) is np.float64:
subEdgeX = vxlEdge/2
subEdgeY = vxlEdge/2
subEdgeZ = vxlEdge/2
elif(len(vxlEdge) == 3):
subEdgeX = vxlEdge[0]/2
subEdgeY = vxlEdge[1]/2
subEdgeZ = vxlEdge[2]/2
minX = vxlCntr.x - subEdgeX
maxX = vxlCntr.x + subEdgeX
minY = vxlCntr.y - subEdgeY
maxY = vxlCntr.y + subEdgeY
if(minZ == 0):
minZ = vxlCntr.z - subEdgeZ
if(maxZ == 0):
maxZ = vxlCntr.z + subEdgeZ
return [minX, maxX, minY, maxY, minZ, maxZ]
def GetGlobalBoundingBox(points, discardZeros = False):
if(discardZeros):
points = np.array(points)
indexes = np.where(points[:] == [0, 0, 0])
points = np.delete(points, indexes, axis=0)
mins = np.amin(points, axis = 0)
maxs = np.amax(points, axis = 0)
return [mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]]
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else
"{0:x}".format(v) for v in RGB])
def LinearGradient(start_hex, finish_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_RGB(start_hex)
f = hex_to_RGB(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [
int(s[j] + (float(t)/(n-1))*(f[j]-s[j]))
for j in range(3)
]
# Add it to our list of output colors
RGB_list.append(curr_vector)
return RGB_list
def SaveBoundingBoxes(file_path, bBoxes):
file = open(file_path,"w")
for box in bBoxes:
file.write(str(box[0])+" "+str(box[1])+" "+str(box[2])+" "+str(box[3])+" "+str(box[4])+" "+str(box[5])+"\n")
file.close()
def SaveVoxels(file_path, voxels):
file = open(file_path,"w")
for vox in voxels:
file.write(str(vox[0])+" "+str(vox[1])+" "+str(vox[2])+" "+str(vox[3])+" "+str(vox[4])+" "+str(vox[5])+" "+str(vox[6])+"\n")
file.close()
def ReadBoundingBoxes(file_path):
file = open(file_path,"r")
boundingBoxes = []
for line in file:
fl = line.split()
floats = []
for l in fl:
floats.append(float(l))
boundingBoxes.append(floats)
file.close()
return boundingBoxes
def DownsampleAndAddclass(points, classNum, voxelSize = -1):
if(voxelSize != -1):
pointCloud = o3d.geometry.PointCloud()
pointCloud.points = o3d.utility.Vector3dVector(points)
pointCloud = o3d.geometry.voxel_down_sample(pointCloud, voxel_size=voxelSize)
points = np.asarray(pointCloud.points)
labels = np.full((len(points), 1), classNum)
points = np.append(points, labels, axis = 1)
return points
def PrepPointCloud(dataIN, objectLabel, noObjectLabel, downSampleVoxel = -1, verbose = False):
dataTool = DataTool()
print("Reading: {}".format(dataIN))
worldPoints = ReadXYZ(dataIN)
pointLabels = ReadLabels(dataIN)
indexes = np.nonzero(pointLabels == Label.cars)
carPoints = worldPoints[indexes]
worldPoints = np.delete(worldPoints, indexes, axis=0)
carPoints = DownsampleAndAddclass(carPoints, objectLabel, downSampleVoxel)
worldPoints = DownsampleAndAddclass(worldPoints, noObjectLabel, downSampleVoxel)
pointCloud =
|
np.concatenate((carPoints, worldPoints))
|
numpy.concatenate
|
import numpy as np
from PIL import Image, ImageDraw
import math
import torch
def RandomBrush(
max_tries,
s,
min_num_vertex = 4,
max_num_vertex = 18,
mean_angle = 2*math.pi / 5,
angle_range = 2*math.pi / 15,
min_width = 12,
max_width = 48):
H, W = s, s
average_radius = math.sqrt(H*H+W*W) / 8
mask = Image.new('L', (W, H), 0)
for _ in range(np.random.randint(max_tries)):
num_vertex = np.random.randint(min_num_vertex, max_num_vertex)
angle_min = mean_angle - np.random.uniform(0, angle_range)
angle_max = mean_angle + np.random.uniform(0, angle_range)
angles = []
vertex = []
for i in range(num_vertex):
if i % 2 == 0:
angles.append(2*math.pi - np.random.uniform(angle_min, angle_max))
else:
angles.append(np.random.uniform(angle_min, angle_max))
h, w = mask.size
vertex.append((int(np.random.randint(0, w)), int(np.random.randint(0, h))))
for i in range(num_vertex):
r = np.clip(
np.random.normal(loc=average_radius, scale=average_radius//2),
0, 2*average_radius)
new_x = np.clip(vertex[-1][0] + r * math.cos(angles[i]), 0, w)
new_y = np.clip(vertex[-1][1] + r * math.sin(angles[i]), 0, h)
vertex.append((int(new_x), int(new_y)))
draw = ImageDraw.Draw(mask)
width = int(np.random.uniform(min_width, max_width))
draw.line(vertex, fill=1, width=width)
for v in vertex:
draw.ellipse((v[0] - width//2,
v[1] - width//2,
v[0] + width//2,
v[1] + width//2),
fill=1)
if np.random.random() > 0.5:
mask.transpose(Image.FLIP_LEFT_RIGHT)
if np.random.random() > 0.5:
mask.transpose(Image.FLIP_TOP_BOTTOM)
mask = np.asarray(mask, np.uint8)
if np.random.random() > 0.5:
mask = np.flip(mask, 0)
if np.random.random() > 0.5:
mask = np.flip(mask, 1)
return mask
def RandomMask(s, hole_range=[0,1]):
coef = min(hole_range[0] + hole_range[1], 1.0)
while True:
mask = np.ones((s, s), np.uint8)
def Fill(max_size):
w, h = np.random.randint(max_size), np.random.randint(max_size)
ww, hh = w // 2, h // 2
x, y = np.random.randint(-ww, s - w + ww), np.random.randint(-hh, s - h + hh)
mask[max(y, 0): min(y + h, s), max(x, 0): min(x + w, s)] = 0
def MultiFill(max_tries, max_size):
for _ in range(
|
np.random.randint(max_tries)
|
numpy.random.randint
|
import os
import numpy as np
import time
from . import abHighResAlphaMatrix
from . import abCellsEstimator
from . import abCellsEstimatorParallel
from . import abWwiiiObstrFileSaver
from . import abWwiiiPropSchObstrFileSaver
from .abOptionManager import getOption, printOpts
from . import abEtopo1BathyLoader
from . import abGebcoBathyLoader
from . import abRectangularGridBuilder
from . import abCoastalCellDetector
from . import abTriangularMesh
from .abTriangularMeshGridBuilder import abTriangularMeshGridBuilder
################################################################
##### IMPLEMENTATION ON REGULAR GRIDS ##########################
################################################################
def regularGridSpecWW3(xmin=0, dx=0, nx=0, ymin=0, dy=0, ny=0, maskFilePath=''):
"""
regularGridSpecWWIII:
contains all the specifications necessary to the creation
of an abGrid object based on a regular grid.
The mask is a matrix ny x nx with value 1 on sea cells, and 0 on land cells.
Here it is loaded from the mask file produced by gridgen
"""
class specClass:
pass
rs = specClass()
rs.xmin, rs.ymin = xmin, ymin
rs.dx, rs.dy = dx, dy
rs.nx, rs.ny = nx, ny
# loading the mask from the wwiii mask file produced by gridgen
mask = np.zeros([ny, nx])
fl = open(maskFilePath)
ix = 0
for ln in fl:
if ix >= nx:
raise Exception('regularGridSpecWWIII: wrong mask file: lon dimension does not match')
vlStrs = ln.strip(' \n').split()
vls = [int(s) for s in vlStrs]
mask[ix, :] = vls
ix += 1
rs.mask = mask
return rs
def abEstimateAndSaveRegularEtopo1(dirs, freqs, gridName, regularGridSpec, etopo1FilePath, outputDirectory, nParWorker, abOptions = None):
"""
abEstimateAndSaveRegularEtopo1:
This method does:
- build an instance of _abGrid from the input regularGridSpec object (that represents
the logical structure of a latlon mesh, and can be generated with the regularGridSpecWW3 function)
- build an instance of highResolutionBathyMatrix from etopo1
- invoke _abEstimateAndSave like abEstimateAndSaveRegularEtopo1 does
"""
# instatiating the builder of abGrid object for regular grids
r = regularGridSpec
xmin, ymin = r.xmin, r.ymin
dx, dy = r.dx, r.dy
nx, ny = r.nx, r.ny
mask = r.mask
regGridBld = abRectangularGridBuilder.abRectangularGridBuilder(xmin, ymin, dx, dy, nx, ny,
mask, nParWorker = nParWorker)
# building the high resolution matrix of alpha based on etopo1
llcrnr = getOption(abOptions, 'llcrnr', None)
urcrnr = getOption(abOptions, 'urcrnr', None)
zlim = -.1
print('loading etopo1 bathymetry ...')
x, y, z = abEtopo1BathyLoader.loadBathy(etopo1FilePath, llcrnr, urcrnr)
alphamtx = np.ones(z.shape, dtype=bool)
alphamtx[z > zlim] = 0
highResolutionBathyMatrix = abHighResAlphaMatrix.abHighResAlphaMatrix(x, y, alphamtx)
# creating the detector of the cells located along the coasts of big coastal bodies.
# These bodies are resolved correctly by the model, and do not need subscale modelling
coastalCellDetector = abCoastalCellDetector.abCoastalCellDetector(abOptions)
# creating the grid object (where each cell is represented as a polygon)
grid = regGridBld.buildGrid(highResolutionBathyMatrix, coastalCellDetector)
if grid.wrapAroundDateline:
highResolutionBathyMatrix.wrapAroundDateline()
_abEstimateAndSave(dirs, freqs, gridName, grid, highResolutionBathyMatrix, outputDirectory, nParWorker, abOptions)
def abEstimateAndSaveRegularGebco(dirs, freqs, gridName, regularGridSpec, etopo1FilePath, outputDirectory, nParWorker, abOptions = None):
"""
abEstimateAndSaveRegularGebco:
This method does:
- build an instance of _abGrid from the input regularGridSpec object (that represents
the logical structure of a latlon mesh, and can be generated with the regularGridSpecWW3 function)
- build an instance of highResolutionBathyMatrix from gebco
- invoke _abEstimateAndSave like abEstimateAndSaveRegularEtopo1 does
Use with care in large applications: GEBCO 2019 with alphaBetaLab takes 16 times more memory and time with respect to ETOPO1.
"""
# instatiating the builder of abGrid object for regular grids
r = regularGridSpec
xmin, ymin = r.xmin, r.ymin
dx, dy = r.dx, r.dy
nx, ny = r.nx, r.ny
mask = r.mask
regGridBld = abRectangularGridBuilder.abRectangularGridBuilder(xmin, ymin, dx, dy, nx, ny,
mask, nParWorker = nParWorker)
# building the high resolution matrix of alpha based on etopo1
llcrnr = getOption(abOptions, 'llcrnr', None)
urcrnr = getOption(abOptions, 'urcrnr', None)
zlim = -.1
print('loading gebco bathymetry ...')
x, y, z = abGebcoBathyLoader.loadBathy(etopo1FilePath, llcrnr, urcrnr)
alphamtx = np.ones(z.shape, dtype=bool)
alphamtx[z > zlim] = 0
highResolutionBathyMatrix = abHighResAlphaMatrix.abHighResAlphaMatrix(x, y, alphamtx)
# creating the detector of the cells located along the coasts of big coastal bodies.
# These bodies are resolved correctly by the model, and do not need subscale modelling
coastalCellDetector = abCoastalCellDetector.abCoastalCellDetector(abOptions)
# creating the grid object (where each cell is represented as a polygon)
grid = regGridBld.buildGrid(highResolutionBathyMatrix, coastalCellDetector)
if grid.wrapAroundDateline:
highResolutionBathyMatrix.wrapAroundDateline()
_abEstimateAndSave(dirs, freqs, gridName, grid, highResolutionBathyMatrix, outputDirectory, nParWorker, abOptions)
################################################################
################################################################
################################################################
################################################################
##### IMPLEMENTATION ON TRIANGULAR MESHES ##################
################################################################
triMeshSpecFromGr3File = abTriangularMesh.loadFromGr3File
triMeshSpecFromMshFile = abTriangularMesh.loadFromMshFile
def abEstimateAndSaveTriangularEtopo1(dirs, freqs, gridName, triMeshSpec, etopo1FilePath, outputDirectory, nParWorker, abOptions = None):
"""
abEstimateAndSaveTriangularEtopo1:
This method does:
- build an instance of _abGrid from the input triMeshSpec object (that should represent
the logical structure of a triangular mesh, and should be loaded, for example, from a gmesh file)
- build an instance of highResolutionBathyMatrix from etopo1
- invoke _abEstimateAndSave like abEstimateAndSaveRegularEtopo1 does
"""
gridBld = abTriangularMeshGridBuilder(triMeshSpec, nParWorker = nParWorker)
grid = gridBld.buildGrid()
llcrnr = getOption(abOptions, 'llcrnr', None)
urcrnr = getOption(abOptions, 'urcrnr', None)
zlim = -.1
print('loading etopo1 bathymetry ...')
x, y, z = abEtopo1BathyLoader.loadBathy(etopo1FilePath, llcrnr, urcrnr)
alphamtx =
|
np.ones(z.shape)
|
numpy.ones
|
import skfuzzy as sf
import time
import numpy as np
from math import pi, log
class FuzzyPID:
def __init__(self, Pmax, Pmin, Imax, Imin, Dmax, Dmin):
self.Kpmax = Pmax
self.Kpmin = Pmin
self.Kimax = Imax
self.Kimin = Imin
self.Kdmax = Dmax
self.Kdmin = Dmin
self.sample_time = 0.0
self.current_time = time.time()
self.last_time = self.current_time
self.tfm = self.tfm_generator(-pi, pi)
self.dtfm = self.tfm_generator(-8, 8)
self.re = self.rule()
self.rde = self.re.T
self.rie = self.rule_ki()
self.a = self.rule_alpha()
self.b = self.a.T
self.clear()
def tfm_generator(self, xmin, xmax):
x = (xmax - xmin) / 2
NB = np.array([xmin, xmin, xmin + 1 / 3 * x], dtype=np.float32)
NM = np.array([xmin, xmin + 1 / 3 * x, xmin + 2 / 3 * x], dtype=np.float32)
NS = np.array([xmin + 1 / 3 * x, xmin + 2 / 3 * x, xmin + x], dtype=np.float32)
ZE = np.array([xmin + 2 / 3 * x, xmin + x, xmax - 2 / 3 * x], dtype=np.float32)
PS = np.array([xmin + x, xmax - 2 / 3 * x, xmax - x / 3], dtype=np.float32)
PM = np.array([xmax - 2 / 3 * x, xmax - x / 3, xmax], dtype=np.float32)
PB = np.array([xmax - 1 / 3 * x, xmax, xmax], dtype=np.float32)
return [NB, NM, NS, ZE, PS, PM, PB]
def membership(self, x, tfm):
x = np.array([x])
return [sf.trimf(x, tfm[0]), sf.trimf(x, tfm[1]), sf.trimf(x, tfm[2]), \
sf.trimf(x, tfm[3]), sf.trimf(x, tfm[4]), sf.trimf(x, tfm[5]), sf.trimf(x, tfm[6])]
def rule(self):
return np.matrix([[3, 4, 5, 6, 5, 4, 3], [2, 3, 4, 5, 4, 3, 2], [1, 2, 3, 4, 3, 2, 1], \
[0, 1, 2, 3, 2, 1, 0], [1, 2, 3, 4, 3, 2, 1], [2, 3, 4, 5, 4, 3, 2], [3, 4, 5, 6, 5, 4, 3]])
def rule_alpha(self):
return np.matrix([[2, 2, 2, 2, 2, 2, 2], [3, 3, 2, 2, 2, 3, 3], [4, 3, 3, 2, 3, 3, 4], \
[5, 4, 3, 3, 3, 4, 5], [4, 3, 3, 2, 3, 3, 4], [3, 3, 2, 2, 2, 3, 3], [2, 2, 2, 2, 2, 2, 2]])
def rule_ki(self):
return np.matrix([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 2, 2, 2, 0, 0], \
[0, 2, 4, 2, 4, 2, 0], [0, 0, 2, 2, 2, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
def clear(self):
self.SetPoint = 0.0
self.PTerm = 0.0
self.ITerm = 0.0
self.DTerm = 0.0
self.last_error = 0.0
self.int_error = 0.0
self.windup_guard = 10.0
self.output = 0.0
def update_K(self, error, d_error):
self.Kp = self.re[np.argmax(self.membership(error, self.tfm)), \
np.argmax(self.membership(d_error, self.dtfm))] / 6 * (self.Kpmax - self.Kpmin) + self.Kpmin
self.Kd = self.rde[np.argmax(self.membership(error, self.tfm)), \
np.argmax(self.membership(d_error, self.dtfm))] / 6 * (self.Kdmax - self.Kdmin) + self.Kdmin
self.alpha = self.a[np.argmax(self.membership(error, self.tfm)), \
np.argmax(self.membership(d_error, self.dtfm))]
self.Ki = self.rie[np.argmax(self.membership(error, self.tfm)), \
np.argmax(self.membership(d_error, self.dtfm))] / 4 * (self.Kimax - self.Kimin) + self.Kimin
def update(self, feedback_value, speed):
error = self.SetPoint - feedback_value
self.current_time = time.time()
delta_time = self.current_time - self.last_time
delta_error = error - self.last_error
d_error = speed
self.update_K(error, d_error)
if delta_time >= self.sample_time:
pTerm = self.Kp * error
if pTerm < -self.windup_guard:
self.PTerm = -self.windup_guard
elif pTerm > self.windup_guard:
self.PTerm = self.windup_guard
else:
self.PTerm = pTerm
self.ITerm += self.Ki * error * delta_time
if (self.ITerm < -self.windup_guard):
self.ITerm = -self.windup_guard
elif (self.ITerm > self.windup_guard):
self.ITerm = self.windup_guard
if delta_time > 0:
self.DTerm = self.Kd * delta_error / delta_time
if (self.DTerm < -self.windup_guard):
self.DTerm = -self.windup_guard
elif (self.DTerm > self.windup_guard):
self.DTerm = self.windup_guard
self.last_time = self.current_time
self.last_error = error
Output = self.PTerm + (self.ITerm) + (self.DTerm)
if Output > 15:
self.output = 15
elif Output < -15:
self.output = -15
else:
self.output = Output
def setKp(self, Pmax, Pmin):
self.Kpmax = Pmax
self.Kpmin = Pmin
def setKd(self, Dmax, Dmin):
self.Kdmax = Dmax
self.Kdmin = Dmin
def setKi(self, Imax, Imin):
self.Kimax = Imax
self.Kimin = Imin
def setSampleTime(self, sample_time):
self.sample_time = sample_time
def setSetPoint(self, setpoint):
self.SetPoint = setpoint
def demo():
import skfuzzy
import time
import os
import sys
lib_path = os.path.abspath(os.path.join(sys.path[0], '..'))
sys.path.append(lib_path)
# import gym
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import LogNorm
import numpy as np
import math
from tqdm import tqdm
Ctl = FuzzyPID(10, 7, 4, 2, 1.15, 0.75)
Ctl.setKp(10, 3)
Ctl.setKi(9, 0)
Ctl.setKd(0.9, 0.3)
Ctl.setSampleTime(0.05)
Ctl.setSetPoint(0.0)
graph = []
Graph = []
a =
|
np.arange(-pi, pi, pi / 100)
|
numpy.arange
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from fig2_spline import get_feats, get_P
matplotlib.rcParams.update({'font.size': 22})
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
def make_legend_arrow(legend, orig_handle,
xdescent, ydescent,
width, height, fontsize):
p = matplotlib.patches.FancyArrow(0, 0.5*height, width, 0, length_includes_head=True, head_width=0.75*height, color='black')
return p
def plot_augmentations_polar(S0, theta_star):
angles = np.r_[np.linspace(0, 2*np.pi, 1000), np.pi, np.pi/2, 3*np.pi/2]
fig, ax = plt.subplots(figsize=(6, 6))
bad_c = 'white'
good_c = 'C1'
ax.set_facecolor(bad_c)
def get_lhs_rhs(x):
rho = (np.inner(x, theta_star)) / np.maximum(np.inner(x, x), 1e-16)
lhs = rho**2 * S0.dot(x).dot(x)
rhs = 2 * rho * S0.dot(x).dot(theta_star)
return lhs, rhs
for angle in angles:
x1v = 2*np.pi * np.cos(angle)
x2v = 2*np.pi * np.sin(angle)
x = np.asarray([x1v, x2v])
lhs, rhs = get_lhs_rhs(x)
if rhs - lhs > -1e-10:
plt.plot([0, x1v], [0, x2v], color=good_c, zorder=0)
plt.plot([0],[0], color=good_c, zorder=0)
plt.axis([-1, 1, -1, 1])
arrow = plt.arrow(0, 0, theta_star[0], theta_star[1], width=0.02, alpha=1, length_includes_head=True, color='black', zorder=100)
plt.legend([arrow],
[r'$\theta^*$'],
loc="lower right",
handler_map={matplotlib.patches.FancyArrow : matplotlib.legend_handler.HandlerPatch(patch_func=make_legend_arrow),})
ax = plt.gca()
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.text(0, 1.1, '$e_2$', horizontalalignment='center')
ax.text(1.1, 0, '$e_1$', verticalalignment='center')
##################
# Figure 3
##################
matplotlib.rcParams.update({'font.size': 28})
theta_star = np.asarray([1, 0.2])
X = np.diag([1, 2])
S0 = X.T @ X
plot_augmentations_polar(S0, theta_star)
plt.subplots_adjust(bottom=0.15, left=0.2)
plt.savefig('flag_less_skew.png')
matplotlib.rcParams.update({'font.size': 28})
theta_star = np.asarray([1, 0.2])
X = np.diag([1, 5])
S0 = X.T @ X
plot_augmentations_polar(S0, theta_star)
plt.subplots_adjust(bottom=0.15, left=0.2)
plt.savefig('flag_more_skew.png')
##################
# Figure 7
##################
num_stairs = 10
num_examples = 22
adv_eps = (1.0 / 2)
noise_eps = 0.0
x_noise = 0.1
slope = 1
np.set_printoptions(precision=5)
discrete_support = True
knots = np.r_[np.arange(num_stairs), np.arange(num_stairs)+adv_eps]
knots = np.sort(knots)
weights_1 = np.asarray([1/5]*5)
weights_2 = np.asarray([0.01]*(num_stairs-5))
weights = np.concatenate([weights_1, weights_2])
weights /= np.sum(weights)
X = np.r_[np.arange(5).astype(float)]
X = np.sort(X)
y = slope*np.floor(X)
# compute the population \Sigma_0
# first we must rotate the spline basis in a way that the correct norm is being minimized
feats = get_feats(X, knots)
# add small identity for numerical stability
P = get_P(knots) + 1e-10 * np.eye(22)
eigvals, eigs = np.linalg.eig(P)
eigvals = np.maximum(eigvals, 0)
Q = eigs.dot(np.linalg.pinv(np.diag(np.sqrt(eigvals)))).dot(eigs.T)
P_half = np.linalg.inv(Q)
# Q.T X^T X Q
S0_trans = np.zeros((feats.shape[1], feats.shape[1]))
for x in range(num_stairs):
x1, x2 = get_feats(np.asarray([x, x+adv_eps]), knots).dot(Q)
S0_trans += (1 - x_noise) * weights[x] * np.outer(x1, x1) + x_noise * weights[x] * np.outer(x2, x2)
def solve_rotated(X, y):
feats = get_feats(X, knots)
feats_trans = feats.dot(Q)
theta_trans = np.linalg.pinv(feats_trans.T.dot(feats_trans)).dot(feats_trans.T.dot(y))
return feats_trans, theta_trans
feats_std_trans, theta_std_trans = solve_rotated(X, y)
# construct theta_star
all_xs = np.r_[np.asarray([i for i in range(num_stairs)]), np.asarray([i + adv_eps for i in range(num_stairs)])]
all_xs = np.sort(all_xs)
all_ys = slope*np.floor(all_xs)
all_feats_trans, theta_star_trans = solve_rotated(all_xs, all_ys)
def plot_std_aug(theta_std, theta_aug):
X_stairs = np.arange(0, num_stairs).astype(float)
y_stairs = slope*X_stairs
for X_stair, y_stair in zip(X_stairs, y_stairs):
plt.plot([X_stair, X_stair+adv_eps], [y_stair, y_stair], color='black', alpha=0.5)
X_t = np.linspace(0, num_stairs-0.5, 100)
plt.plot(X_t, get_feats(X_t, knots).dot(Q).dot(theta_std), label='Standard', linestyle='dashed', lw=5)
plt.plot(X_t, get_feats(X_t, knots).dot(Q).dot(theta_aug), label='Augmented', linestyle='solid', lw=5)
plt.legend()
plt.legend()
plt.scatter(X, y, color='black', s=75, zorder=1000)
plt.xlabel(r'$t$')
plt.ylabel(r'$f_{\theta}(t)$')
# add 3.5
matplotlib.rcParams.update({'font.size': 18})
X_aug = np.r_[X, 3.5]
X_aug = np.sort(X_aug)
y_aug = slope*np.floor(X_aug)
feats_aug, theta_aug = solve_rotated(X_aug, y_aug)
plt.figure(figsize=(5,5))
plot_std_aug(theta_std_trans, theta_aug)
plt.axis('equal')
plt.xlim([-0.5, 10])
plt.ylim([-0.5, 10])
plt.xticks(np.arange(0, 10, 2.0))
plt.yticks(np.arange(0, 10, 2.0))
plt.scatter([3.5], [3], marker='X', s=75, color='C2', zorder=1000)
plt.savefig('spline_add_35.png')
# add 4.5
matplotlib.rcParams.update({'font.size': 22})
feats_std_trans, theta_std_trans = solve_rotated(X, y)
all_xs = np.r_[np.asarray([i for i in range(num_stairs)]), np.asarray([i + adv_eps for i in range(num_stairs)])]
all_xs = np.sort(all_xs)
all_ys = slope*np.floor(all_xs)
all_feats_trans, theta_star_trans = solve_rotated(all_xs, all_ys)# add 4.5
matplotlib.rcParams.update({'font.size': 18})
X_aug = np.r_[X, 4.5]
X_aug = np.sort(X_aug)
y_aug = slope*np.floor(X_aug)
feats_aug, theta_aug = solve_rotated(X_aug, y_aug)
plt.figure(figsize=(5,5))
plot_std_aug(theta_std_trans, theta_aug)
plt.axis('equal')
plt.xlim([-0.5, 10])
plt.ylim([-0.5, 10])
plt.xticks(np.arange(0, 10, 2.0))
plt.yticks(np.arange(0, 10, 2.0))
plt.scatter([4.5], [4], marker='X', s=75, color='C2', zorder=1000)
plt.savefig('spline_add_45.png')
# plot the difference in test error as suggested in Theorem 1, Fig 7a
plt.clf()
# check if a perturbation does/does not satisfy the criterion
hatS0 = feats_std_trans.T.dot(feats_std_trans)
def proj(S, rank_S=None):
eigvals, eigs = np.linalg.eig(S)
if rank_S is not None:
sort_idx = np.argsort(-eigvals)
eigvals[sort_idx[:rank_S]] = 1
eigvals[sort_idx[rank_S:]] = 0
else:
eigvals[eigvals <= 1e-8] = 0.0
eigvals[eigvals > 0] = 1.0
return eigs.dot(np.diag(eigvals)).dot(eigs.T).real
hat_proj_0 = np.eye(hatS0.shape[0]) - proj(hatS0, rank_S=feats_std_trans.shape[0])
def criterion(S0, theta_star, proj0, x):
theta_0 = proj0.dot(theta_star)
u = proj0.dot(x)
if np.inner(u, u) < 1e-10:
return 0
rho = np.inner(theta_0, u) / np.inner(u, u)
diff = 2 * rho * S0.dot(theta_0).dot(u) - rho**2 * S0.dot(u).dot(u)
return diff
matplotlib.rcParams.update({'font.size': 16})
# on the line
lines = np.arange(10).astype(float)
line_feats = get_feats(lines, knots)
line_feats_trans = line_feats.dot(Q)
line_diffs = []
for i in range(line_feats_trans.shape[0]):
x = line_feats_trans[i]
diff = -criterion(S0_trans, theta_star_trans, hat_proj_0, x).real
line_diffs.append(diff)
# not on the line
perts = np.arange(10).astype(float) + adv_eps
pert_feats = get_feats(perts, knots)
pert_feats_trans = pert_feats.dot(Q)
pert_diffs = []
for i in range(pert_feats_trans.shape[0]):
x = pert_feats_trans[i]
diff = -criterion(S0_trans, theta_star_trans, hat_proj_0, x).real
pert_diffs.append(diff)
plt.scatter(lines, line_diffs, label='On the line', marker='o', s=90)
plt.scatter(perts, pert_diffs, label='Perturbations', marker='^', s=90)
plt.ylabel('Bias criterion (Aug - Std)')
plt.xlabel(r'Augmentation point ($t$)')
plt.xticks(np.arange(0, 10, 1.0))
plt.legend(loc="upper right")
plt.subplots_adjust(bottom=0.15, left=0.15)
plt.savefig('spline_perturbations.png')
matplotlib.rcParams.update({'font.size': 22})
###############
# Fig 4
###############
matplotlib.rcParams.update({'font.size': 28})
# curr dataset
X = np.r_[0,1]
X = np.sort(X)
y = slope*np.floor(X)
# rotation matrix
P = get_P(knots) + 1e-10 * np.eye(22)
eigvals, eigs = np.linalg.eig(P)
eigvals = np.maximum(eigvals, 0)
Q = eigs.dot(np.linalg.pinv(np.diag(np.sqrt(eigvals)))).dot(eigs.T)
X0 = get_feats(X, knots).dot(Q)
xaug_raw = np.r_[X, 4.5]
Xaug = get_feats(xaug_raw, knots).dot(Q)
yaug = np.floor(xaug_raw)
# std estimator
stdest = np.linalg.pinv(X0.T @ X0) @ (X0.T @ y)
augest = np.linalg.pinv(Xaug.T @ Xaug) @ (Xaug.T @ yaug)
# sigma
S_trans = all_feats_trans.T @ all_feats_trans
S_eigs, S_eigv = np.linalg.eig(S_trans)
for i in range(S_eigv.shape[1]):
if i > 5:
break
plt.figure()
plt.plot(np.arange(S_eigv.shape[0]), S_eigv[:, i], lw=5)
plt.xlabel('t')
plt.ylabel('f(t)')
plt.title('$q_{%d}$' % (i+1))
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.savefig(f'eig{i}.png')
##########
# Fig 8
##########
matplotlib.rcParams.update({'font.size': 23})
def normalize(x):
return x /
|
np.linalg.norm(x)
|
numpy.linalg.norm
|
#!/usr/bin/python3
import argparse
import cv2
import math
import numpy as np
import os
import pyexiv2 # dnf install python3-exiv2 (py3exiv2)
from tqdm import tqdm
import matplotlib.pyplot as plt
from props import root, getNode
import props_json
from lib import camera
from lib import image
parser = argparse.ArgumentParser(description='Align and combine sentera images.')
parser.add_argument('--flight', help='Base directory (parent of NIR/RGB directories.')
parser.add_argument('--scale', type=float, default=0.4, help='scale image before processing')
parser.add_argument('--image', help='image name')
parser.add_argument('image1', help='image1 path')
parser.add_argument('image2', help='image1 path')
args = parser.parse_args()
def detect_camera(image_path):
camera = ""
exif = pyexiv2.ImageMetadata(image_path)
exif.read()
if 'Exif.Image.Make' in exif:
camera = exif['Exif.Image.Make'].value
if 'Exif.Image.Model' in exif:
camera += '_' + exif['Exif.Image.Model'].value
if 'Exif.Photo.LensModel' in exif:
camera += '_' + exif['Exif.Photo.LensModel'].value
camera = camera.replace(' ', '_')
return camera
if args.flight and args.image:
image1 = os.path.join(args.flight, 'NIR', args.image)
image2 = os.path.join(args.flight, 'RGB', args.image)
else:
image1 = args.image1
image2 = args.image2
cam1 = detect_camera(image1)
cam2 = detect_camera(image2)
print(cam1)
print(cam2)
cam1_node = getNode("/camera1", True)
cam2_node = getNode("/camera2", True)
if props_json.load(os.path.join("../cameras", cam1 + ".json"), cam1_node):
print("successfully loaded cam1 config")
if props_json.load(os.path.join("../cameras", cam2 + ".json"), cam2_node):
print("successfully loaded cam2 config")
tmp = []
for i in range(9):
tmp.append( cam1_node.getFloatEnum('K', i) )
K1 = np.copy(np.array(tmp)).reshape(3,3)
print("K1:", K1)
tmp = []
for i in range(5):
tmp.append( cam1_node.getFloatEnum('dist_coeffs', i) )
dist1 = np.array(tmp)
print("dist1:", dist1)
tmp = []
for i in range(9):
tmp.append( cam2_node.getFloatEnum('K', i) )
K2 = np.copy(np.array(tmp)).reshape(3,3)
print("K2:", K2)
tmp = []
for i in range(5):
tmp.append( cam2_node.getFloatEnum('dist_coeffs', i) )
dist2 = np.array(tmp)
print("dist2:", dist2)
i1 = cv2.imread(image1, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
i2 = cv2.imread(image2, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
if i1 is None:
print("Error loading:", image1)
quit()
if i2 is None:
print("Error loading:", image2)
quit()
i1 = cv2.undistort(i1, K1, dist1)
i2 = cv2.undistort(i2, K1, dist1)
# scale images (anticipating images are identical dimensions, but this
# will force that assumption if the happen to not be.)
(h, w) = i1.shape[:2]
i1 = cv2.resize(i1, (int(w*args.scale), int(h*args.scale)))
i2 = cv2.resize(i2, (int(w*args.scale), int(h*args.scale)))
detector = cv2.xfeatures2d.SIFT_create()
kp1, des1 = detector.detectAndCompute(i1, None)
kp2, des2 = detector.detectAndCompute(i2, None)
print("Keypoints:", len(kp1), len(kp2))
print("Descriptors:", len(des1), len(des2))
FLANN_INDEX_KDTREE = 1
flann_params = {
'algorithm': FLANN_INDEX_KDTREE,
'trees': 5
}
search_params = dict(checks=100)
matcher = cv2.FlannBasedMatcher(flann_params, search_params)
matches = matcher.knnMatch(des1, des2, k=5)
print("Raw matches:", len(matches))
if False:
plt.figure()
plt.title('match distance fall off')
for i, m in enumerate(tqdm(matches)):
if i % 10 == 0:
for j in m:
vals = []
pos = []
for j in range(len(m)):
pos.append(j)
vals.append(m[j].distance)
plt.plot(pos, vals, lw=1)
plt.show()
def draw_inlier(src1, src2, kpt1, kpt2, inlier, drawing_type):
height = max(src1.shape[0], src2.shape[0])
width = src1.shape[1] + src2.shape[1]
output = np.zeros((height, width, 3), dtype=np.uint8)
output[0:src1.shape[0], 0:src1.shape[1]] = src1
output[0:src2.shape[0], src1.shape[1]:] = src2[:]
if drawing_type == 'ONLY_LINES':
for i in range(len(inlier)):
left = kpt1[inlier[i].queryIdx].pt
right = tuple(sum(x) for x in zip(kpt2[inlier[i].trainIdx].pt, (src1
.shape[1], 0)))
cv2.line(output, tuple(map(int, left)), tuple(map(int, right)), (0,
255, 255))
elif drawing_type == 'LINES_AND_POINTS':
for i in range(len(inlier)):
left = kpt1[inlier[i].queryIdx].pt
right = tuple(sum(x) for x in zip(kpt2[inlier[i].trainIdx].pt, (src1.shape[1], 0)))
cv2.line(output, tuple(map(int, left)), tuple(map(int, right)), (255, 0, 0))
for i in range(len(inlier)):
left = kpt1[inlier[i].queryIdx].pt
right = tuple(sum(x) for x in zip(kpt2[inlier[i].trainIdx].pt, (src1.shape[1], 0)))
cv2.circle(output, tuple(map(int, left)), 1, (0, 255, 255), 2)
cv2.circle(output, tuple(map(int, right)), 1, (0, 255, 0), 2)
cv2.imshow('show', output)
cv2.waitKey()
def decomposeAffine(affine):
tx = affine[0][2]
ty = affine[1][2]
a = affine[0][0]
b = affine[0][1]
c = affine[1][0]
d = affine[1][1]
sx = math.sqrt( a*a + b*b )
if a < 0.0:
sx = -sx
sy = math.sqrt( c*c + d*d )
if d < 0.0:
sy = -sy
rotate_deg = math.atan2(-b,a) * 180.0/math.pi
if rotate_deg < -180.0:
rotate_deg += 360.0
if rotate_deg > 180.0:
rotate_deg -= 360.0
return (rotate_deg, tx, ty, sx, sy)
def make_lut_u():
return np.array([[[i,255-i,0] for i in range(256)]],dtype=np.uint8)
def make_lut_v():
return np.array([[[0,255-i,i] for i in range(256)]],dtype=np.uint8)
def ndre_helper(lut, a, b, c1, c2):
print('range:', a, b)
db = c2[0] - c1[0]
dg = c2[1] - c1[1]
dr = c2[2] - c1[2]
for i in range(a, b + 1):
percent = (i - a) / (b - a)
lut[0][i][0] = int(c1[0] + db * percent)
lut[0][i][1] = int(c1[1] + dg * percent)
lut[0][i][2] = int(c1[2] + dr * percent)
print(' ', i, percent, lut[0][i])
def make_lut_ndre(cutoffs, colors):
lut = np.zeros( (1, 256, 3) ).astype('uint8')
ndre_helper(lut, 0, cutoffs[0], colors[0], colors[0])
size = len(cutoffs)
for i in range(0, size-1):
ndre_helper(lut, cutoffs[i], cutoffs[i+1], colors[i], colors[i+1])
ndre_helper(lut, cutoffs[size-1], 255, colors[size-1], colors[size-1])
return lut
def normalize(img):
min = np.min(img)
max = np.max(img)
img_norm = (img - min) / (max - min)
return img_norm
angle_bins = [0] * 91
dist_bins = [0] * (int(math.sqrt(w*w+h*h)/10.0)+1)
H = np.identity(3);
first_iteration = True
while True:
print('H:', H)
src_pts = np.float32([kp1[i].pt for i in range(len(kp1))]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[i].pt for i in range(len(kp2))]).reshape(-1, 1, 2)
src_pts = cv2.perspectiveTransform(src_pts, H)
#print('src:', src_pts)
#print('dst:', dst_pts)
print("collect stats...")
match_stats = []
for i, m in enumerate(tqdm(matches)):
best_index = 0
best_value = 99999999999999999999999.9
best_angle = 0
best_size = 0
best_dist = 0
ratio = m[0].distance / m[1].distance
for j in range(len(m)):
p1 = src_pts[m[j].queryIdx]
p2 = dst_pts[m[j].trainIdx]
#print(p1, p2)
raw_dist = np.linalg.norm(p1-p2)
if first_iteration:
# first iteration don't use distance
px_dist = 1
else:
px_dist = 1 + raw_dist*raw_dist
a1 = np.array(kp1[m[j].queryIdx].angle)
a2 = np.array(kp2[m[j].trainIdx].angle)
# angle difference mapped to +/- 180
# angle = (a1-a2+180) % 360 - 180
# angle difference mapped to +/- 90
angle = (a1-a2+90) % 180 - 90
#angle = 1
#print(a1, a2, angle)
angle_dist = abs(angle) + 1
s1 = np.array(kp1[m[j].queryIdx].size)
s2 = np.array(kp2[m[j].trainIdx].size)
size_diff = abs(s1 - s2) + 1
metric = (px_dist * angle_dist * size_diff) / ratio
#print(" ", j, m[j].distance, px_dist, abs(1 + angle), size_diff, metric)
if metric < best_value:
best_value = metric
best_index = j
best_angle = abs(angle)
best_size = size_diff
best_dist = raw_dist
#print(i, best_index, m[best_index].distance, best_angle, best_size, best_value)
match_stats.append( [ m[best_index], ratio, best_value, best_angle,
best_size, best_dist ] )
dist_bins[int(best_dist/10.0)] += 1
angle_bins[int(round(abs(best_angle)))] += 1
if first_iteration:
target_dist = np.argmax(dist_bins)*10
else:
target_dist = 0.0
print("target dist:", target_dist)
target_angle = np.argmax(angle_bins)
print("target angle:", target_angle)
# select the best subset of matches
filt_matches = []
for i, line in enumerate(tqdm(match_stats)):
match = line[0]
ratio = line[1]
best_value = line[2]
best_angle = line[3]
best_size = line[4]
best_dist = line[5]
if ratio < 0.60:
# passes ratio test as per Lowe's paper
filt_matches.append(match)
else:
if abs(best_dist - target_dist) > 30:
continue
if abs(best_angle - target_angle) > 5:
continue
elif best_size > 2:
continue
elif (first_iteration and best_value < 5) or (not first_iteration and best_value < 500):
print(i, best_index, match.distance, best_angle, best_size, best_value)
filt_matches.append(match)
print("Filtered matches:", len(filt_matches))
first_iteration = False
if False:
# dist histogram
plt.figure()
y_pos = np.arange(len(dist_bins))
plt.bar(y_pos, dist_bins, align='center', alpha=0.5)
plt.xticks(y_pos, range(len(dist_bins)))
plt.ylabel('count')
plt.title('total distance histogram')
# angle histogram
plt.figure()
y_pos = np.arange(len(angle_bins))
plt.bar(y_pos, angle_bins, align='center', alpha=0.5)
plt.xticks(y_pos, range(len(angle_bins)))
plt.ylabel('count')
plt.title('angle histogram')
plt.show()
if False:
sh1 = i1.shape
sh2 = i2.shape
size1 = (sh1[1], sh1[0])
size2 = (sh2[1], sh2[0])
matchesGMS = cv2.xfeatures2d.matchGMS(size1, size2, kp1, kp2, filt_matches, withRotation=False, withScale=False, thresholdFactor=5.0)
print("GMS matches:", len(matchesGMS))
if True:
print("Filtering by findHomography")
tol = 4.0
src = []
dst = []
for m in filt_matches:
src.append( kp1[m.queryIdx].pt )
dst.append( kp2[m.trainIdx].pt )
H, status = cv2.findHomography(np.array([src]).astype(np.float32),
np.array([dst]).astype(np.float32),
cv2.RANSAC,
tol)
matches_fit = []
for i, m in enumerate(filt_matches):
if status[i]:
matches_fit.append(m)
print("Fitted matches:", len(matches_fit))
draw_inlier(i1, i2, kp1, kp2, matches_fit, 'ONLY_LINES')
if True:
src = []
dst = []
for m in matches_fit:
src.append( kp1[m.queryIdx].pt )
dst.append( kp2[m.trainIdx].pt )
affine, status = \
cv2.estimateAffinePartial2D(np.array([src]).astype(np.float32),
np.array([dst]).astype(np.float32))
(rot, tx, ty, sx, sy) = decomposeAffine(affine)
print("Affine:")
print("Rotation (deg):", rot)
print("Translation (pixels):", tx, ty)
print("Skew:", sx, sy)
# H, status = cv2.findHomography(np.array([src]).astype(np.float32),
# np.array([dst]).astype(np.float32),
# cv2.LMEDS)
print("Homography:", H)
# (rot, tx, ty, sx, sy) = decomposeAffine(affine)
# print("Affine:")
# print("Rotation (deg):", rot)
# print("Translation (pixels):", tx, ty)
# print("Skew:", sx, sy)
#i1_new = cv2.warpAffine(i1, affine, (i1.shape[1], i1.shape[0]))
i1_new = cv2.warpPerspective(i1, H, (i1.shape[1], i1.shape[0]))
blend = cv2.addWeighted(i1_new, 0.5, i2, 0.5, 0)
if False:
cv2.imshow('i1', i1)
cv2.imshow('i1_new', i1_new)
cv2.imshow('i2', i2)
cv2.imshow('blend', blend)
if True:
# NDRE test
ndvi_lut = cv2.imread("NDVI_Scale_LDP.png", flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
ndvi_lut = np.reshape(ndvi_lut, (1, 256, 3))
print(ndvi_lut.shape, ndvi_lut.dtype)
#print(make_lut_ndre().shape, make_lut_ndre().dtype)
nir, garbage, re = cv2.split(i1_new)
g, b, r = cv2.split(i2)
if False:
cv2.imshow('index nir', nir)
cv2.imshow('index re', re)
cutoffs = []
if False:
# sentera formala for ndre
#((-0.341*nir_red + 2.426*nir_blue)- (1.0*nir_red - 0.956*nir_blue))/ ( (-0.341*nir_red + 2.426*nir_blue)+(1.0*nir_red - 0.956*nir_blue))
nnir = normalize(-0.341*re + 2.426*nir)
nnir[nnir==0] = 1
nre = normalize(1.0*re - 0.956*nir)
nindex = (nnir - nre) / (nnir + nre)
cutoffs = [ 32, 60, 80, 110, 168 ]
print('nir', nnir.shape, np.min(nnir), np.max(nnir))
print('re', nre.shape, np.min(nre), np.max(nre))
print('ndvi', nindex.shape, np.min(nindex), np.max(nindex))
elif False:
# using the inversion of re as a proxy for red (modified NDRE)
nnir = nir/255.0
nre = 1.0 - re/255.0
nindex = (nnir - nre) / (nnir + nre)
cutoffs = [ 32, 60, 80, 110, 168 ]
print('nir', nnir.shape, np.min(nnir), np.max(nnir))
print('re', nre.shape, np.min(nre), np.max(nre))
print('ndvi', nindex.shape, np.min(nindex), np.max(nindex))
elif True:
# using nir and r for traditional ndvi
nir[nir==0] = 1
nnir = nir/255.0
nre = r/255.0
nindex = (nnir - nre) / (nnir + nre)
cutoffs = [ 42, 70, 90, 110, 168 ]
print('nir', nnir.shape, np.min(nnir), np.max(nnir))
print('re', nre.shape, np.min(nre), np.max(nre))
print('ndvi', nindex.shape,
|
np.min(nindex)
|
numpy.min
|
import warnings
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from core.lattice import *
class Diffract(object):
def __init__(self, lattice, energy, polarization, surface, aziref, absorb):
self.lat = lattice
self.e = energy
self.lam = 1.23984193e4 / energy
self.pol = polarization
self.n = surface
self.azir = aziref
self.absorb = absorb
def orientate(self, Q):
"""
Computes tensor that projects crystal frame onto diffraction frame
:param Q:
:return:
"""
Q = np.asarray(Q) / np.asarray([self.lat.a,self.lat.b,self.lat.c])
Qnorm = Q / np.linalg.norm(Q)
if (Q[0] == 0 and Q[2] == 0):
zeta = 0
else:
zeta = np.arctan2(np.dot(Qnorm, np.array([0, 0, 1])), np.dot(Qnorm, np.array([1, 0, 0])))
eta = np.arccos(np.dot(Qnorm, np.array([0, 1, 0])))
T = np.array([[-np.cos(zeta) * np.cos(eta), np.sin(eta), -np.sin(zeta) * np.cos(eta)],
[ np.sin(zeta), 0, -np.cos(zeta)],
[-np.cos(zeta) * np.sin(eta),-np.cos(eta), -np.sin(zeta) * np.sin(eta)]])
az = np.dot(T, self.azir)
psi = np.arctan2(-az[1], az[0])
Ru3 = np.array([[np.cos(psi), -np.sin(psi), 0], [np.sin(psi), np.cos(psi), 0], [0, 0, 1]])
return np.dot(Ru3, T)
def dspacing(self,Q):
"""
Evaluate d-spacing of reflection
:param Q: wavevector
:return: d-spacing
"""
if(len(np.ravel(Q)) == 3):
Q = np.asarray(Q).reshape(1,3)
#TODO generalise to triclinc symmetry
gg = (Q[:,0] / self.lat.a)**2 + (Q[:,1] / self.lat.b)**2 + (Q[:,2] / self.lat.c)**2
d = np.sqrt(1 / gg)
return d
def th(self, Q):
"""
Calculate theta of reflection Q
:param Q: in [h,k,l]
:return: theta in radians
"""
d = self.dspacing(Q)
with warnings.catch_warnings():
warnings.filterwarnings('error')
th = np.arcsin(self.lam / (2.0*d))
return th
def tth(self,Q):
"""
Calculate two-theta of reflection Q
:param Q: in [h,k,l]
:return: two-theta in radians
"""
return 2.0*self.th(Q)
def xrms_tensor(self, M):
"""
Calculate XRMS tensor in spherical approximation
:param M: magnetic structure factor vector
:return: 3 x 3 scattering tensor
"""
Fm = 1j * np.array([[0, M[2], -M[1]], [-M[2], 0, M[0]], [M[1], -M[0], 0]])
return Fm
# ----- Absorption corrections ----- #
def calc_absorption(self, Q, psi):
th = self.th(Q)
delta = self.calc_delta(Q, psi)
alpha = th - delta
beta = th + delta
abs = (1 + np.sin(alpha) / np.sin(beta)) ** -1
return abs, alpha, beta
def calc_delta(self, Q, psi):
T = self.orientate(Q) # crystal orienation in diffraction frame
a = np.dot(T,self.n)
delta = np.zeros(len(psi))
for i in range(0, len(psi)):
npsi = np.dot(self.rotZ(psi[i]), a)
npro = self.nProj(npsi)
delta[i] = np.arccos(np.dot(npro, np.array([0, 0, -1])))
if npro[0] <= 0:
delta[i] *= -1
return delta
def rotZ(self, angle):
psi = np.deg2rad(angle)
R = np.array([[np.cos(psi), -
|
np.sin(psi)
|
numpy.sin
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""CPM train module"""
import numpy as np
import mindspore.nn as nn
from mindspore.common.initializer import initializer
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore import context
import mindspore.common.dtype as mstype
from mindspore.common.parameter import Parameter
from mindspore.ops.operations.comm_ops import _VirtualDataset
from mindspore.ops import functional as F
from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell
from src.cpm_loss import Cross_entropy
from src.cpm import CPMModel
from src.util import ClipByGlobalNorm
class CPMWithLoss(nn.Cell):
"""
Provide CPM training loss through network.
Args:
batch_size (int): Batch size of input dataset.
seq_length (int): Length of input tensor sequence.
vocab_size (int): Size of the vocabulary list.
hidden_size (int): Internal feature dimension.
config: The config of CPM network.
num_hidden_layers (int): Number of hidden layers.
num_attention_heads (int): Number of attention heads.
Returns:
Tensor, the loss of the network.
"""
def __init__(self, batch_size, seq_length, vocab_size, hidden_size,
config, num_hidden_layers, num_attention_heads):
super(CPMWithLoss, self).__init__()
self.batch_size = batch_size
self.seq_length = seq_length
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.squeeze = P.Squeeze()
self.expanddims = P.ExpandDims().shard(((config.dp, 1),))
self.expanddims1 = P.ExpandDims().shard(((config.dp,),))
self.tile = P.Tile().shard(((config.dp, 1, 1),))
self.reducesum = P.ReduceSum().shard(((config.dp, 1, 1),))
self.reducesum2 = P.ReduceSum().shard(((config.dp, 1),))
self.reducemean = P.ReduceMean().shard(((1, 1),))
self.cast = P.Cast()
self.readdiv = P.RealDiv().shard(((config.dp, 1), (config.dp, 1)))
self.readdiv2 = P.RealDiv().shard(((1,), (1,)))
self.mul = P.Mul().shard(((config.dp, 1, 1), (config.dp, 1, 1)))
self.mul2 = P.Mul().shard(((config.dp, 1), (config.dp, 1)))
self.cpm_model = CPMModel(batch_size=self.batch_size,
seq_length=self.seq_length,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
config=config,
hidden_dropout=config.dropout,
attention_dropout=config.dropout,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
is_training=True)
self.loss_net = Cross_entropy(batch_size=self.batch_size,
seq_length=self.seq_length,
vocab_size=self.vocab_size,
config=config,
is_training=True)
self.slice = P.StridedSlice().shard(((config.dp, 1),))
self.slice_mask = P.StridedSlice().shard(((config.dp, 1, 1),))
def construct(self, input_ids, attention_mask=None, position_ids=None, loss_mask=None, labels=None):
r"""
CPM model with loss.
"""
input_ids = self.slice(input_ids, (0, 0),
(self.batch_size, self.seq_length),
(1, 1))
position_ids = self.slice(position_ids, (0, 0),
(self.batch_size, self.seq_length),
(1, 1))
attention_mask_1 = self.slice_mask(attention_mask, (0, 0, 0),
(self.batch_size, self.seq_length, self.seq_length),
(1, 1, 1))
logist = self.cpm_model(input_ids, position_ids, attention_mask_1)
loss_mask_expand = self.expanddims(loss_mask, -1)
# 8 725 -> 8, 725, 1
loss_masks = self.tile(loss_mask_expand, (1, 1, self.vocab_size))
# 8 725 30000
loss_mask_sum = self.expanddims1(self.reducesum2(loss_mask, -1), -1)
# [8, 725, 30000|8, 725, 30000
logist_mask_mul = self.mul(logist, loss_masks)
# 8, 725, 30000->8, 30000
logist_mask_sum = self.reducesum(logist_mask_mul, 1)
# 8, 30000| 8 1
output = self.readdiv(logist_mask_sum, loss_mask_sum)
# 8 725 | 8 725
label_mul_mask = self.mul2(labels, loss_mask)
# 8 725 -> 8
label_mask = self.reducesum2(label_mul_mask, 1)
# 8 725 -> 8
loss_mask_for_label = self.reducesum2(loss_mask, -1)
# 8 / 8
label_final = self.readdiv2(label_mask, loss_mask_for_label)
# batch 1 vocabe_size
output = self.expanddims(output, 1)
# batchsize 1
label_final = self.expanddims1(label_final, 1)
# batchsize 1
losses = self.loss_net(output, self.cast(label_final, mstype.float32))
loss = self.reducemean(losses, 0)
return loss
GRADIENT_CLIP_TYPE = 1
GRADIENT_CLIP_VALUE = 1.0
clip_grad = C.MultitypeFuncGraph("clip_grad")
@clip_grad.register("Number", "Number", "Tensor")
def _clip_grad(clip_type, clip_value, grad):
"""
Clip gradients.
Inputs:
clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
if clip_type not in [0, 1]:
return grad
dt = F.dtype(grad)
if clip_type == 0:
new_grad = C.clip_by_value(
grad, F.cast(F.tuple_to_array((-clip_value,)), dt),
F.cast(F.tuple_to_array((clip_value,)), dt))
else:
new_grad = nn.ClipByNorm()(grad,
F.cast(F.tuple_to_array((clip_value,)),
dt))
return new_grad
class VirtualDatasetOneInputCell(nn.Cell):
def __init__(self, backbone):
super(VirtualDatasetOneInputCell, self).__init__(auto_prefix=False)
self._backbone = backbone
self._virtual_dataset = _VirtualDataset()
def construct(self, *data):
data_ = self._virtual_dataset(*data)
return self._backbone(*data_)
grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()
@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
return grad * reciprocal(scale)
class CPMTrainOneStepWithLossScaleCell(TrainOneStepWithLossScaleCell):
"""
Encapsulation class of CPM network training.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph.
Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
scale_update_cell (Cell): Cell to do the loss scale. Default: None.
enable_global_norm (Bool): Whether using global normalization.
"""
def __init__(self,
network,
optimizer,
scale_update_cell=None,
enable_global_norm=True):
super(CPMTrainOneStepWithLossScaleCell,
self).__init__(network, optimizer, scale_update_cell)
self.network = network
self.weights = optimizer.parameters
self.optimizer = optimizer
self.default_lr = Tensor([0.0], dtype=mstype.float32)
self.enable_global_norm = enable_global_norm
self.cast = P.Cast()
self.clip = ClipByGlobalNorm(self.weights)
def construct(self,
input_ids,
attention_mask,
position_ids,
loss_mask,
labels,
sens=None):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
attention_mask,
position_ids,
loss_mask,
labels)
scaling_sens = self.scale_sense
# alloc status and clear should be right before grad operation.
status, scaling_sens = self.start_overflow_check(loss, scaling_sens)
scaling_sens_filled = C.ones_like(loss) * F.cast(scaling_sens, F.dtype(loss))
grads = self.grad(self.network,
weights)(input_ids,
attention_mask,
position_ids,
loss_mask,
labels,
scaling_sens_filled)
# apply grad reducer on grads.
grads = self.grad_reducer(grads)
grads = self.hyper_map(
F.partial(grad_scale, scaling_sens), grads)
if self.enable_global_norm:
grads, _ = self.clip(grads)
else:
grads = self.hyper_map(
F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE),
grads)
cond = self.get_overflow_status(status, grads)
overflow = self.process_loss_scale(cond)
if not overflow:
self.optimizer(grads)
return loss, cond, scaling_sens
cast = P.Cast()
update_accu_grads = C.MultitypeFuncGraph("update_accu_grads")
@update_accu_grads.register("Tensor", "Tensor")
def _update_accu_grads(accu_grad, grad):
succ = True
return F.depend(succ, F.assign_add(accu_grad, cast(grad, mstype.float32)))
zeroslike = P.ZerosLike()
reset_accu_grads = C.MultitypeFuncGraph("reset_accu_grads")
@reset_accu_grads.register("Tensor")
def _reset_accu_grads(accu_grad):
succ = True
return F.depend(succ, F.assign(accu_grad, zeroslike(accu_grad)))
class CPMTrainAccuStepsWithLossScaleCell(TrainOneStepWithLossScaleCell):
"""
Encapsulation class of CPM network training with loss scale.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph.
Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
scale_update_cell (Cell): Cell to do the loss scale. Default: None.
enable_global_norm (Bool): Whether using global normalization.
"""
def __init__(self,
network,
optimizer,
scale_update_cell=None,
enable_global_norm=True):
super(CPMTrainAccuStepsWithLossScaleCell, self).__init__(network, optimizer, scale_update_cell)
self.accumulation = False
self.accumulation_steps = context.get_auto_parallel_context("grad_accumulation_step")
self.one = Tensor(np.array([1]).astype(np.int32))
self.zero = Tensor(
|
np.array([0])
|
numpy.array
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for StabilizerTable class."""
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from qiskit import QiskitError
from qiskit.test import QiskitTestCase
from qiskit.quantum_info.operators.symplectic import StabilizerTable
from qiskit.quantum_info.operators.symplectic import PauliTable
def stab_mat(label):
"""Return stabilizer matrix from a stabilizer label"""
mat = np.eye(1, dtype=complex)
if label[0] == "-":
mat *= -1
if label[0] in ["-", "+"]:
label = label[1:]
for i in label:
if i == "I":
mat = np.kron(mat, np.eye(2))
elif i == "X":
mat = np.kron(mat, np.array([[0, 1], [1, 0]]))
elif i == "Y":
mat = np.kron(mat, np.array([[0, 1], [-1, 0]]))
elif i == "Z":
mat = np.kron(mat, np.array([[1, 0], [0, -1]]))
else:
raise QiskitError(f"Invalid stabilizer string {i}")
return mat
class TestStabilizerTableInit(QiskitTestCase):
"""Tests for StabilizerTable initialization."""
def test_array_init(self):
"""Test array initialization."""
with self.subTest(msg="bool array"):
target = np.array([[False, False], [True, True]])
value = StabilizerTable(target)._array
self.assertTrue(np.all(value == target))
with self.subTest(msg="bool array no copy"):
target = np.array([[False, True], [True, True]])
value = StabilizerTable(target)._array
value[0, 0] = not value[0, 0]
self.assertTrue(np.all(value == target))
with self.subTest(msg="bool array raises"):
array = np.array([[False, False, False], [True, True, True]])
self.assertRaises(QiskitError, StabilizerTable, array)
def test_vector_init(self):
"""Test vector initialization."""
with self.subTest(msg="bool vector"):
target = np.array([False, False, False, False])
value = StabilizerTable(target)._array
self.assertTrue(np.all(value == target))
with self.subTest(msg="bool vector no copy"):
target = np.array([False, True, True, False])
value = StabilizerTable(target)._array
value[0, 0] = not value[0, 0]
self.assertTrue(np.all(value == target))
def test_string_init(self):
"""Test string initialization."""
with self.subTest(msg='str init "I"'):
value = StabilizerTable("I")._array
target = np.array([[False, False]], dtype=bool)
self.assertTrue(np.all(np.array(value == target)))
with self.subTest(msg='str init "X"'):
value = StabilizerTable("X")._array
target = np.array([[True, False]], dtype=bool)
self.assertTrue(np.all(np.array(value == target)))
with self.subTest(msg='str init "Y"'):
value = StabilizerTable("Y")._array
target = np.array([[True, True]], dtype=bool)
self.assertTrue(np.all(np.array(value == target)))
with self.subTest(msg='str init "Z"'):
value = StabilizerTable("Z")._array
target = np.array([[False, True]], dtype=bool)
self.assertTrue(np.all(np.array(value == target)))
with self.subTest(msg='str init "IX"'):
value = StabilizerTable("IX")._array
target = np.array([[True, False, False, False]], dtype=bool)
self.assertTrue(np.all(np.array(value == target)))
with self.subTest(msg='str init "XI"'):
value = StabilizerTable("XI")._array
target = np.array([[False, True, False, False]], dtype=bool)
self.assertTrue(np.all(np.array(value == target)))
with self.subTest(msg='str init "YZ"'):
value = StabilizerTable("YZ")._array
target = np.array([[False, True, True, True]], dtype=bool)
self.assertTrue(np.all(np.array(value == target)))
with self.subTest(msg='str init "XIZ"'):
value = StabilizerTable("XIZ")._array
target = np.array([[False, False, True, True, False, False]], dtype=bool)
self.assertTrue(np.all(np.array(value == target)))
def test_table_init(self):
"""Test StabilizerTable initialization."""
with self.subTest(msg="StabilizerTable"):
target = StabilizerTable.from_labels(["XI", "IX", "IZ"])
value = StabilizerTable(target)
self.assertEqual(value, target)
with self.subTest(msg="StabilizerTable no copy"):
target = StabilizerTable.from_labels(["XI", "IX", "IZ"])
value = StabilizerTable(target)
value[0] = "II"
self.assertEqual(value, target)
class TestStabilizerTableProperties(QiskitTestCase):
"""Tests for StabilizerTable properties."""
def test_array_property(self):
"""Test array property"""
with self.subTest(msg="array"):
stab = StabilizerTable("II")
array = np.zeros([2, 4], dtype=bool)
self.assertTrue(np.all(stab.array == array))
with self.subTest(msg="set array"):
def set_array():
stab = StabilizerTable("XXX")
stab.array = np.eye(4)
return stab
self.assertRaises(Exception, set_array)
def test_x_property(self):
"""Test X property"""
with self.subTest(msg="X"):
stab = StabilizerTable.from_labels(["XI", "IZ", "YY"])
array = np.array([[False, True], [False, False], [True, True]], dtype=bool)
self.assertTrue(np.all(stab.X == array))
with self.subTest(msg="set X"):
stab = StabilizerTable.from_labels(["XI", "IZ"])
val = np.array([[False, False], [True, True]], dtype=bool)
stab.X = val
self.assertEqual(stab, StabilizerTable.from_labels(["II", "XY"]))
with self.subTest(msg="set X raises"):
def set_x():
stab = StabilizerTable.from_labels(["XI", "IZ"])
val = np.array([[False, False, False], [True, True, True]], dtype=bool)
stab.X = val
return stab
self.assertRaises(Exception, set_x)
def test_z_property(self):
"""Test Z property"""
with self.subTest(msg="Z"):
stab = StabilizerTable.from_labels(["XI", "IZ", "YY"])
array = np.array([[False, False], [True, False], [True, True]], dtype=bool)
self.assertTrue(np.all(stab.Z == array))
with self.subTest(msg="set Z"):
stab = StabilizerTable.from_labels(["XI", "IZ"])
val = np.array([[False, False], [True, True]], dtype=bool)
stab.Z = val
self.assertEqual(stab, StabilizerTable.from_labels(["XI", "ZZ"]))
with self.subTest(msg="set Z raises"):
def set_z():
stab = StabilizerTable.from_labels(["XI", "IZ"])
val = np.array([[False, False, False], [True, True, True]], dtype=bool)
stab.Z = val
return stab
self.assertRaises(Exception, set_z)
def test_shape_property(self):
"""Test shape property"""
shape = (3, 8)
stab = StabilizerTable(np.zeros(shape))
self.assertEqual(stab.shape, shape)
def test_size_property(self):
"""Test size property"""
with self.subTest(msg="size"):
for j in range(1, 10):
shape = (j, 8)
stab = StabilizerTable(np.zeros(shape))
self.assertEqual(stab.size, j)
def test_num_qubits_property(self):
"""Test num_qubits property"""
with self.subTest(msg="num_qubits"):
for j in range(1, 10):
shape = (5, 2 * j)
stab = StabilizerTable(np.zeros(shape))
self.assertEqual(stab.num_qubits, j)
def test_phase_property(self):
"""Test phase property"""
with self.subTest(msg="phase"):
phase = np.array([False, True, True, False])
array = np.eye(4, dtype=bool)
stab = StabilizerTable(array, phase)
self.assertTrue(np.all(stab.phase == phase))
with self.subTest(msg="set phase"):
phase = np.array([False, True, True, False])
array = np.eye(4, dtype=bool)
stab = StabilizerTable(array)
stab.phase = phase
self.assertTrue(np.all(stab.phase == phase))
with self.subTest(msg="set phase raises"):
phase = np.array([False, True, False])
array = np.eye(4, dtype=bool)
stab = StabilizerTable(array)
def set_phase_raise():
"""Raise exception"""
stab.phase = phase
self.assertRaises(ValueError, set_phase_raise)
def test_pauli_property(self):
"""Test pauli property"""
with self.subTest(msg="pauli"):
phase = np.array([False, True, True, False])
array = np.eye(4, dtype=bool)
stab = StabilizerTable(array, phase)
pauli = PauliTable(array)
self.assertEqual(stab.pauli, pauli)
with self.subTest(msg="set pauli"):
phase = np.array([False, True, True, False])
array = np.zeros((4, 4), dtype=bool)
stab = StabilizerTable(array, phase)
pauli = PauliTable(np.eye(4, dtype=bool))
stab.pauli = pauli
self.assertTrue(np.all(stab.array == pauli.array))
self.assertTrue(np.all(stab.phase == phase))
with self.subTest(msg="set pauli"):
phase = np.array([False, True, True, False])
array = np.zeros((4, 4), dtype=bool)
stab = StabilizerTable(array, phase)
pauli = PauliTable(np.eye(4, dtype=bool)[1:])
def set_pauli_raise():
"""Raise exception"""
stab.pauli = pauli
self.assertRaises(ValueError, set_pauli_raise)
def test_eq(self):
"""Test __eq__ method."""
stab1 = StabilizerTable.from_labels(["II", "XI"])
stab2 = StabilizerTable.from_labels(["XI", "II"])
self.assertEqual(stab1, stab1)
self.assertNotEqual(stab1, stab2)
def test_len_methods(self):
"""Test __len__ method."""
for j in range(1, 10):
labels = j * ["XX"]
stab = StabilizerTable.from_labels(labels)
self.assertEqual(len(stab), j)
def test_add_methods(self):
"""Test __add__ method."""
labels1 = ["+XXI", "-IXX"]
labels2 = ["+XXI", "-ZZI", "+ZYZ"]
stab1 = StabilizerTable.from_labels(labels1)
stab2 = StabilizerTable.from_labels(labels2)
target = StabilizerTable.from_labels(labels1 + labels2)
self.assertEqual(target, stab1 + stab2)
def test_add_qargs(self):
"""Test add method with qargs."""
stab1 = StabilizerTable.from_labels(["+IIII", "-YYYY"])
stab2 = StabilizerTable.from_labels(["-XY", "+YZ"])
with self.subTest(msg="qargs=[0, 1]"):
target = StabilizerTable.from_labels(["+IIII", "-YYYY", "-IIXY", "+IIYZ"])
self.assertEqual(stab1 + stab2([0, 1]), target)
with self.subTest(msg="qargs=[0, 3]"):
target = StabilizerTable.from_labels(["+IIII", "-YYYY", "-XIIY", "+YIIZ"])
self.assertEqual(stab1 + stab2([0, 3]), target)
with self.subTest(msg="qargs=[2, 1]"):
target = StabilizerTable.from_labels(["+IIII", "-YYYY", "-IYXI", "+IZYI"])
self.assertEqual(stab1 + stab2([2, 1]), target)
with self.subTest(msg="qargs=[3, 1]"):
target = StabilizerTable.from_labels(["+IIII", "-YYYY", "-YIXI", "+ZIYI"])
self.assertEqual(stab1 + stab2([3, 1]), target)
def test_getitem_methods(self):
"""Test __getitem__ method."""
with self.subTest(msg="__getitem__ single"):
labels = ["+XI", "-IY"]
stab = StabilizerTable.from_labels(labels)
self.assertEqual(stab[0], StabilizerTable(labels[0]))
self.assertEqual(stab[1], StabilizerTable(labels[1]))
with self.subTest(msg="__getitem__ array"):
labels = np.array(["+XI", "-IY", "+IZ", "-XY", "+ZX"])
stab = StabilizerTable.from_labels(labels)
inds = [0, 3]
self.assertEqual(stab[inds], StabilizerTable.from_labels(labels[inds]))
inds = np.array([4, 1])
self.assertEqual(stab[inds], StabilizerTable.from_labels(labels[inds]))
with self.subTest(msg="__getitem__ slice"):
labels =
|
np.array(["+XI", "-IY", "+IZ", "-XY", "+ZX"])
|
numpy.array
|
import argparse
import numpy as NP
from astropy.io import fits
from astropy.io import ascii
import scipy.constants as FCNST
from scipy import interpolate
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import matplotlib.animation as MOV
from scipy.interpolate import griddata
import datetime as DT
import time
import progressbar as PGB
import healpy as HP
import geometry as GEOM
import interferometry as RI
import catalog as CTLG
import constants as CNST
import my_DSP_modules as DSP
import my_operations as OPS
import primary_beams as PB
import baseline_delay_horizon as DLY
import ipdb as PDB
parser = argparse.ArgumentParser(description='Program to visualize MWA interferometer array simulated data')
parser.add_argument('--antenna-file', help='File containing antenna locations', default='/data3/t_nithyanandan/project_MWA/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt', type=file, dest='antenna_file')
telescope_group = parser.add_argument_group('Telescope parameters', 'Telescope/interferometer specifications')
telescope_group.add_argument('--telescope', help='Telescope name [str, Default = "mwa"]', default='mwa', type=str, dest='telescope', choices=['mwa', 'vla', 'gmrt'])
obsparm_group = parser.add_argument_group('Observation setup', 'Parameters specifying the observation')
obsparm_group.add_argument('-f', '--freq', help='Foreground center frequency in Hz [float, Default=150e6]', default=150e6, type=float, dest='freq')
obsparm_group.add_argument('--dfreq', help='Frequency resolution in Hz [float, Default=40e3]', default=40e3, type=float, dest='freq_resolution')
obsparm_group.add_argument('--obs-mode', help='Observing mode [str, Default="track"]', default='track', type=str, dest='obs_mode', choices=['track', 'drift', 'custom'])
obsparm_group.add_argument('--nchan', help='Number of frequency channels [int, Default=256]', default=256, type=int, dest='n_channels')
fgmodel_group = parser.add_mutually_exclusive_group(required=True)
fgmodel_group.add_argument('--ASM', action='store_true')
fgmodel_group.add_argument('--DSM', action='store_true')
fgmodel_group.add_argument('--SUMSS', action='store_true')
fgmodel_group.add_argument('--NVSS', action='store_true')
fgmodel_group.add_argument('--MSS', action='store_true')
fgmodel_group.add_argument('--GLEAM', action='store_true')
fgmodel_group.add_argument('--PS', action='store_true')
processing_group = parser.add_argument_group('Processing arguments', 'Processing parameters')
processing_group.add_argument('--n-bins-blo', help='Number of bins for baseline orientations [int, Default=4]', default=4, type=int, dest='n_bins_baseline_orientation')
processing_group.add_argument('--bl-chunk-size', help='Baseline chunk size [int, Default=100]', default=100, type=int, dest='baseline_chunk_size')
processing_group.add_argument('--bl-chunk', help='Baseline chunk indices to process [int(s), Default=None: all chunks]', default=None, type=int, dest='bl_chunk', nargs='*')
processing_group.add_argument('--n-bl-chunks', help='Upper limit on baseline chunks to be processed [int, Default=None]', default=None, type=int, dest='n_bl_chunks')
processing_group.add_argument('--bpw', help='Bandpass window shape [str, "rect"]', default='rect', type=str, dest='bpass_shape', choices=['rect', 'bnw'])
processing_group.add_argument('--f-pad', help='Frequency padding fraction for delay transform [float, Default=1.0]', type=float, dest='f_pad', default=1.0)
parser.add_argument('--max-abs-delay', help='Maximum absolute delay (micro seconds) [float, Default=None]', default=None, type=float, dest='max_abs_delay')
backdrop_group = parser.add_argument_group('Backdrop arguments', 'Backdrop parameters')
backdrop_group.add_argument('--backdrop-coords', help='Backdrop coordinates [str, Default="dircos"]', default='dircos', type=str, dest='backdrop_coords', choices=['radec', 'dircos'])
backdrop_group.add_argument('--backdrop-size', help='Backdrop size (x, y) [int, Default=(100,50)]', type=int, dest='backdrop_size', metavar=('xsize', 'ysize'), nargs=2, default=[100,50])
backdrop_group.add_argument('--nside', help='nside parameter for healpix map [int, Default=64]', type=int, dest='nside', default=64, choices=[64, 128])
visual_group = parser.add_argument_group('Visualization arguments', 'Visualization setup parameters')
visual_group.add_argument('--fig-size', help='Figure size in inches [float, Default=(14,14)]', default=[14,14], type=float, dest='figsize', metavar=('xsize', 'ysize'), nargs=2)
visual_group.add_argument('--fps', help='Frame rate in fps [float, Default=1.0]', default=1.0, type=float, dest='fps', metavar='framerate')
visual_group.add_argument('--interval', help='Frame interval in ms [float, Default=100.0]', default=100.0, dest='interval', metavar='interval')
visual_group.add_argument('--animation-file', help='Animal filename prefix [str, Default=None]', dest='animation_file', default=None, type=str)
visual_group.add_argument('--animation-format', help='Animation file format [str, Default=MP4]', default='MP4', choices=['MP4', 'GIF'], dest='animation_format', type=str)
args = vars(parser.parse_args())
try:
ant_locs = NP.loadtxt(args['antenna_file'], skiprows=6, comments='#', usecols=(1,2,3))
except IOError:
raise IOError('Could not open file containing antenna locations.')
freq = args['freq']
freq_resolution = args['freq_resolution']
bpass_shape = args['bpass_shape']
n_bins_baseline_orientation = args['n_bins_baseline_orientation']
baseline_chunk_size = args['baseline_chunk_size']
bl_chunk = args['bl_chunk']
n_bl_chunks = args['n_bl_chunks']
telescope = args['telescope']
obs_mode = args['obs_mode']
bl = RI.baseline_generator(ant_locs, auto=False, conjugate=False)
bl_length = NP.sqrt(NP.sum(bl**2, axis=1))
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
sortind = NP.argsort(bl_length, kind='mergesort')
bl = bl[sortind,:]
bl_length = bl_length[sortind]
bl_orientation = bl_orientation[sortind]
neg_bl_orientation_ind = NP.logical_or(bl_orientation < -0.5*180.0/n_bins_baseline_orientation, bl_orientation > 180.0 - 0.5*180.0/n_bins_baseline_orientation)
bl[neg_bl_orientation_ind,:] = -1.0 * bl[neg_bl_orientation_ind,:]
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
total_baselines = bl_length.size
baseline_bin_indices = range(0,total_baselines,baseline_chunk_size)
if bl_chunk is None:
bl_chunk = range(len(baseline_bin_indices))
if n_bl_chunks is None:
n_bl_chunks = len(bl_chunk)
bl_chunk = bl_chunk[:n_bl_chunks]
bl = bl[:baseline_bin_indices[n_bl_chunks],:]
bl_length = bl_length[:baseline_bin_indices[n_bl_chunks]]
bl_orientation = bl_orientation[:baseline_bin_indices[n_bl_chunks]]
oversampling_factor = 1.0 + args['f_pad']
n_channels = args['n_channels']
nchan = n_channels
max_abs_delay = args['max_abs_delay']
nside = args['nside']
use_GSM = args['ASM']
use_DSM = args['DSM']
use_NVSS = args['NVSS']
use_SUMSS = args['SUMSS']
use_MSS = args['MSS']
use_GLEAM = args['GLEAM']
use_PS = args['PS']
if use_GSM:
fg_str = 'asm'
elif use_DSM:
fg_str = 'dsm'
elif use_SUMSS:
fg_str = 'sumss'
elif use_GLEAM:
fg_str = 'gleam'
elif use_PS:
fg_str = 'point'
elif use_NVSS:
fg_str = 'nvss'
else:
fg_str = 'other'
PDB.set_trace()
lags = None
skyvis_lag = None
vis_lag = None
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_bl_chunks).start()
for i in range(0, n_bl_chunks):
infile = '/data3/t_nithyanandan/project_MWA/multi_baseline_visibilities_'+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[i]],bl_length[min(baseline_bin_indices[i]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'{0:.1f}_MHz_'.format(nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_part_{0:0d}'.format(i)
hdulist = fits.open(infile+'.fits')
# extnames = [hdu.header['EXTNAME'] for hdu in hdulist]
if i == 0:
lags = hdulist['SPECTRAL INFO'].data.field('lag')
vis_lag = hdulist['real_lag_visibility'].data + 1j * hdulist['imag_lag_visibility'].data
skyvis_lag = hdulist['real_lag_sky_visibility'].data + 1j * hdulist['imag_lag_sky_visibility'].data
latitude = hdulist[0].header['latitude']
pointing_coords = hdulist[0].header['pointing_coords']
pointings_table = hdulist['POINTING INFO'].data
lst = pointings_table['LST']
if pointing_coords == 'altaz':
pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'radec':
pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'hadec':
pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
else:
vis_lag = NP.vstack((vis_lag, hdulist['real_lag_visibility'].data + 1j * hdulist['imag_lag_visibility'].data))
skyvis_lag = NP.vstack((skyvis_lag, hdulist['real_lag_sky_visibility'].data + 1j * hdulist['imag_lag_sky_visibility'].data))
hdulist.close()
progress.update(i+1)
progress.finish()
if max_abs_delay is not None:
small_delays_ind = NP.abs(lags) <= max_abs_delay * 1e-6
lags = lags[small_delays_ind]
vis_lag = vis_lag[:,small_delays_ind,:]
skyvis_lag = skyvis_lag[:,small_delays_ind,:]
## Delay limits estimation
delay_matrix = DLY.delay_envelope(bl, pointings_dircos, units='mks')
## Binning baselines by orientation
# blo = bl_orientation[:min(n_bl_chunks*baseline_chunk_size, total_baselines)]
blo = bl_orientation
# blo[blo < -0.5*360.0/n_bins_baseline_orientation] = 360.0 - NP.abs(blo[blo < -0.5*360.0/n_bins_baseline_orientation])
PDB.set_trace()
bloh, bloe, blon, blori = OPS.binned_statistic(blo, statistic='count', bins=n_bins_baseline_orientation, range=[(-0.5*180.0/n_bins_baseline_orientation, 180.0-0.5*180.0/n_bins_baseline_orientation)])
if use_DSM or use_GSM:
backdrop_coords = 'radec'
if backdrop_coords == 'radec':
xmin = 0.0
xmax = 360.0
ymin = -90.0
ymax = 90.0
xgrid, ygrid = NP.meshgrid(NP.linspace(xmin, xmax, backdrop_xsize), NP.linspace(ymin, ymax, backdrop_xsize/2))
xvect = xgrid.ravel()
yvect = ygrid.ravel()
elif backdrop_coords == 'dircos':
xmin = -1.0
xmax = 1.0
ymin = -1.0
ymax = 1.0
xgrid, ygrid = NP.meshgrid(NP.linspace(xmin, xmax, backdrop_xsize), NP.linspace(ymin, ymax, backdrop_xsize))
nanind = (xgrid**2 + ygrid**2) > 1.0
goodind = (xgrid**2 + ygrid**2) <= 1.0
zgrid = NP.empty_like(xgrid)
zgrid[nanind] = NP.nan
zgrid[goodind] = NP.sqrt(1.0 - (xgrid[goodind]**2 + ygrid[goodind]**2))
xvect = xgrid.ravel()
yvect = ygrid.ravel()
zvect = zgrid.ravel()
xyzvect = NP.hstack((xvect.reshape(-1,1), yvect.reshape(-1,1), zvect.reshape(-1,1)))
if use_DSM or use_GSM:
dsm_file = '/data3/t_nithyanandan/project_MWA/foregrounds/gsmdata{0:0d}.fits'.format(nside)
hdulist = fits.open(dsm_file)
dsm_table = hdulist[1].data
ra_deg = dsm_table['RA']
dec_deg = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes = temperatures
backdrop = HP.cartview(temperatures.ravel(), coord=['G','E'], rot=[180,0,0], xsize=backdrop_xsize, return_projected_map=True)
elif use_GLEAM or use_SUMSS:
if use_GLEAM:
catalog_file = '/data3/t_nithyanandan/project_MWA/foregrounds/mwacs_b1_131016.csv' # GLEAM catalog
catdata = ascii.read(catalog_file, data_start=1, delimiter=',')
dec_deg = catdata['DEJ2000']
ra_deg = catdata['RAJ2000']
fpeak = catdata['S150_fit']
ferr = catdata['e_S150_fit']
freq_catalog = 1.4 # GHz
spindex = -0.83 + NP.zeros(fpeak.size)
fluxes = fpeak * (freq_catalog * 1e9 / freq)**spindex
else:
SUMSS_file = '/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt'
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg = ra_deg[PS_ind]
dec_deg = dec_deg[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 1.0
ra_deg = ra_deg[bright_source_ind]
dec_deg = dec_deg[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg = ra_deg[valid_ind]
dec_deg = dec_deg[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
freq_catalog = 0.843 # in GHz
spindex = -0.83 + NP.zeros(fint.size)
fluxes = fint * (freq_catalog*1e9/freq)**spindex
if backdrop_coords == 'radec':
backdrop = griddata(NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes, NP.hstack((xvect.reshape(-1,1), yvect.reshape(-1,1))), method='cubic')
backdrop = backdrop.reshape(backdrop_xsize/2, backdrop_xsize)
elif backdrop_coords == 'dircos':
if (telescope == 'mwa_dipole') or (obs_mode == 'drift'):
backdrop = PB.primary_beam_generator(xyzvect, freq, telescope=telescope, freq_scale='Hz', skyunits='dircos', phase_center=[0.0,0.0,1.0])
backdrop = backdrop.reshape(backdrop_xsize, backdrop_xsize)
else:
if backdrop_coords == 'radec':
backdrop = griddata(NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes, NP.hstack((xvect.reshape(-1,1), yvect.reshape(-1,1))), method='nearest')
backdrop = backdrop.reshape(backdrop_xsize/2, backdrop_xsize)
elif backdrop_coords == 'dircos':
if (telescope == 'mwa_dipole') or (obs_mode == 'drift'):
backdrop = PB.primary_beam_generator(xyzvect, freq, telescope=telescope, freq_scale='Hz', skyunits='dircos', phase_center=[0.0,0.0,1.0])
backdrop = backdrop.reshape(backdrop_xsize, backdrop_xsize)
## Create data for overlay
overlays = []
roi_obj_inds = []
for i in xrange(n_snaps):
overlay = {}
if backdrop_coords == 'radec':
havect = lst[i] - xvect
altaz = GEOM.hadec2altaz(NP.hstack((havect.reshape(-1,1),yvect.reshape(-1,1))), latitude, units='degrees')
roi_altaz = NP.asarray(NP.where(altaz[:,0] >= 0.0)).ravel()
pb = PB.primary_beam_generator(altaz[roi_altaz,:], freq, telescope=telescope, skyunits='altaz', freq_scale='Hz', phase_center=pointings_altaz[i,:])
overlay['pbeam'] = pb
overlay['roi_obj_inds'] = roi_altaz
# roi_obj_inds += [roi_altaz]
elif backdrop_coords == 'dircos':
havect = lst[i] - ra_deg
fg_altaz = GEOM.hadec2altaz(NP.hstack((havect.reshape(-1,1),dec_deg.reshape(-1,1))), latitude, units='degrees')
fg_dircos = GEOM.altaz2dircos(fg_altaz, units='degrees')
roi_dircos = NP.asarray(NP.where(fg_dircos[:,2] >= 0.0)).ravel()
overlay['roi_obj_inds'] = roi_dircos
overlay['fg_dircos'] = fg_dircos
if obs_mode == 'track':
pb = PB.primary_beam_generator(xyzvect, freq, telescope=telescope, skyunits='dircos', freq_scale='Hz', phase_center=pointings_dircos[i,:])
# pb[pb < 0.5] = NP.nan
overlay['pbeam'] = pb.reshape(backdrop_xsize, backdrop_xsize)
overlays += [overlay]
## Animation set up
fps = args['fps']
interval = args['interval']
animation_format = args['animation_format']
if animation_format == 'MP4':
anim_format = '.mp4'
else:
anim_format = 'gif'
animation_file = args['animation_file']
if animation_file is None:
animation_file = '/data3/t_nithyanandan/project_MWA/multi_baseline_noiseless_visibilities_'+obs_mode+'_'+'{0:0d}'.format(80*baseline_chunk_size)+'_baselines_{0:0d}_orientations_'.format(n_bins_baseline_orientation)+'gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_8_sectors'
if n_bins_baseline_orientation == 4:
blo_ax_mapping = [6,3,2,1,4,7,8,9]
# if n_bins_baseline_orientation == 4:
# blo_ax_mapping = [6,2,4,8]
# elif n_bins_baseline_orientation == 8:
# blo_ax_mapping = [6,3,2,1,4,7,8,9]
fig = PLT.figure(figsize=(14,14))
axs = []
for i in range(2*n_bins_baseline_orientation):
ax = fig.add_subplot(3,3,blo_ax_mapping[i])
if i < n_bins_baseline_orientation:
ax.set_xlim(0,bloh[i]-1)
ax.set_ylim(0.0, NP.amax(lags*1e6))
else:
# ax = fig.add_subplot(3,3,blo_ax_mapping[i%n_bins_baseline_orientation])
ax.set_xlim(0,bloh[i%n_bins_baseline_orientation]-1)
ax.set_ylim(NP.amin(lags*1e6), 0.0)
l = ax.plot([], [], 'k-', [], [], 'k:', [], [])
ax.set_title(r'{0:+.1f} <= $\theta_b [deg]$ < {1:+.1f}'.format(bloe[i%n_bins_baseline_orientation], bloe[(i%n_bins_baseline_orientation)+1]), weight='semibold')
ax.set_ylabel(r'lag [$\mu$s]', fontsize=18)
# ax.set_aspect('auto')
axs += [ax]
ax = fig.add_subplot(3,3,5)
if backdrop_coords == 'radec':
ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=12)
ax.set_ylabel(r'$\delta$ [degrees]', fontsize=12)
elif backdrop_coords == 'dircos':
ax.set_xlabel('l')
ax.set_ylabel('m')
ax.set_title('Sky Model', fontsize=18, weight='semibold')
ax.grid(True)
ax.tick_params(which='major', length=12, labelsize=12)
ax.tick_params(which='minor', length=6)
if use_DSM or use_GSM:
# linit = ax.imshow(OPS.reverse(backdrop, axis=1), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm())
linit = ax.imshow(backdrop, origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm())
# cbmn = NP.amin(backdrop)
# cbmx = NP.amax(backdrop)
# cbaxes = fig.add_axes([0.85, 0.1, 0.02, 0.23])
# cbar = fig.colorbar(linit, cax=cbaxes)
# cbmd = 10.0**(0.5*(NP.log10(cbmn)+NP.log10(cbmx)))
# cbar.set_ticks([cbmn, cbmd, cbmx])
# cbar.set_ticklabels([cbmn, cbmd, cbmx])
else:
ax.set_xlim(NP.amin(xvect), NP.amax(xvect))
ax.set_ylim(NP.amin(yvect), NP.amax(yvect))
if backdrop_coords == 'radec':
linit = ax.scatter(ra_deg, dec_deg, c=fpeak, marker='.', cmap=PLT.cm.get_cmap("rainbow"), norm=PLTC.LogNorm())
# cbmn = NP.amin(fpeak)
# cbmx = NP.amax(fpeak)
else:
if (obs_mode == 'drift') or (telescope == 'mwa_dipole'):
linit = ax.imshow(backdrop, origin='lower', extent=(NP.amin(xvect), NP.amax(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm())
# cbaxes = fig.add_axes([0.65, 0.1, 0.02, 0.23])
# cbar = fig.colorbar(linit, cax=cbaxes)
l = ax.plot([], [], 'w.', [], [])
# txt = ax.text(0.25, 0.65, '', transform=ax.transAxes, fontsize=18)
axs += [ax]
tpc = axs[-1].text(0.5, 1.15, '', transform=ax.transAxes, fontsize=12, weight='semibold', ha='center')
PLT.tight_layout()
fig.subplots_adjust(bottom=0.1)
def update(i, pointing_radec, lst, obsmode, telescope, backdrop_coords, bll, blori, lags, vis_lag, delaymatrix, overlays, xv, yv, xv_uniq, yv_uniq, axs, tpc):
delay_ranges = NP.dstack((delaymatrix[:,:vis_lag.shape[0],1] - delaymatrix[:,:vis_lag.shape[0],0], delaymatrix[:,:vis_lag.shape[0],1] + delaymatrix[:,:vis_lag.shape[0],0]))
delay_horizon = NP.dstack((-delaymatrix[:,:vis_lag.shape[0],0], delaymatrix[:,:vis_lag.shape[0],0]))
bl = bll[:vis_lag.shape[0]]
label_str = r' $\alpha$ = {0[0]:+.3f} deg, $\delta$ = {0[1]:+.2f} deg'.format(pointing_radec[i,:]) + '\nLST = {0:.2f} deg'.format(lst[i])
for j in range((len(axs)-1)/2):
blind = blori[blori[j]:blori[j+1]]
sortind = NP.argsort(bl[blind], kind='heapsort')
axs[j].lines[0].set_xdata(NP.arange(blind.size))
axs[j].lines[0].set_ydata(delay_ranges[i,blind[sortind],1]*1e6)
axs[j].lines[0].set_linewidth(0.5)
axs[j].lines[1].set_xdata(NP.arange(blind.size))
axs[j].lines[1].set_ydata(delay_horizon[i,blind[sortind],1]*1e6)
axs[j].lines[1].set_linewidth(0.5)
axs[j].lines[2] = axs[j].imshow(NP.abs(vis_lag[blind[sortind],NP.floor(0.5*vis_lag.shape[1]):,i].T), origin='lower', extent=(0, blind.size-1, 0.0, NP.amax(lags*1e6)), norm=PLTC.LogNorm(vmin=NP.amin(NP.abs(vis_lag)), vmax=NP.amax(NP.abs(vis_lag))), interpolation=None)
axs[j].set_aspect('auto')
axs[j+(len(axs)-1)/2].lines[0].set_xdata(NP.arange(blind.size))
axs[j+(len(axs)-1)/2].lines[0].set_ydata(delay_ranges[i,blind[sortind],0]*1e6)
axs[j+(len(axs)-1)/2].lines[0].set_linewidth(0.5)
axs[j+(len(axs)-1)/2].lines[1].set_xdata(NP.arange(blind.size))
axs[j+(len(axs)-1)/2].lines[1].set_ydata(delay_horizon[i,blind[sortind],0]*1e6)
axs[j+(len(axs)-1)/2].lines[1].set_linewidth(0.5)
axs[j+(len(axs)-1)/2].lines[2] = axs[j+(len(axs)-1)/2].imshow(NP.abs(vis_lag[blind[sortind],:NP.floor(0.5*vis_lag.shape[1]),i].T), origin='lower', extent=(0, blind.size-1, NP.amin(lags*1e6), 1e6*lags[NP.floor(0.5*lags.size)-1]), norm=PLTC.LogNorm(vmin=NP.amin(
|
NP.abs(vis_lag)
|
numpy.abs
|
import sapyen_robot
from .base_robot_env import BaseRobotEnv
from robot.python.env.physx_utils import mat2transform, transform2mat
import transforms3d
import numpy as np
import os
from .path_utils import get_assets_path
RGBD_CAMERA_THRESHOLD = 10
CAMERA_TO_LINK = np.zeros([4, 4])
CAMERA_TO_LINK[[0, 1, 2, 3], [2, 0, 1, 3]] = [1, -1, -1, 1]
class MOVOEnv(BaseRobotEnv):
def __init__(self):
"""
Sapien Kinova MOVO base class.
If you want to use it with sapien object environment, do not use __init__ but _init_robot" (most case)
If you just want to load the robot only, you should consider use __init__ but not _init_robot
"""
urdf_path = os.path.join(get_assets_path(), "robot/all_robot.urdf")
gripper_material = self.sim.create_material(3.0, 2.0, 0.01)
BaseRobotEnv.__init__(self, urdf_path, gripper_material)
print("Initiate MOVO Environment in stand alone version")
def _init_robot(self) -> None:
"""
Load the robot and controllers
"""
gripper_material = self.sim.create_material(3.0, 2.0, 0.01)
self._load_robot('../assets/robot/single_gripper.urdf', gripper_material)
def _load_controller(self) -> None:
"""
Create controllers, set pd and force limit to each joint with fine tuned value
"""
controllable_wrapper = self.sim.create_controllable_articulation(self.robot)
self._head_joint = ["pan_joint", "tilt_joint"]
self._gripper_joint = ["right_gripper_finger1_joint", "right_gripper_finger2_joint",
"right_gripper_finger3_joint"]
self._body_joint = ["linear_joint"]
self.manger = sapyen_robot.ControllerManger("movo", controllable_wrapper)
self.head_controller = self.manger.create_joint_velocity_controller(self._head_joint, "head")
self.gripper_controller = self.manger.create_joint_velocity_controller(self._gripper_joint, "gripper")
self.body_controller = self.manger.create_joint_velocity_controller(self._body_joint, "body")
# Add joint state publisher to keep in synchronization with ROS
# You must use it if you want to do cartesian control
self.manger.add_joint_state_publisher(60)
self.manger.add_group_trajectory_controller("right_arm")
self.arm_planner = self.manger.create_group_planner("right_arm")
# Cache gripper limit for execute high level action
joint_limit = self.robot.get_qlimits()
gripper_index = self.robot_joint_names.index(self._gripper_joint[0])
self.__gripper_limit = joint_limit[gripper_index, :]
# Cache robot pose
self.root_theta = 0
self.root_pos = np.array([0, 0], dtype=np.float)
self.init_qpos = [0, 0, 0, 0.25, -1.9347, 0, -1.5318, 0, 0.9512, -2.24, 0.34, 0.64, -1.413, 0, 0, 0]
# Tune PD controller
self.robot.set_pd(20000, 3000, 2000,
|
np.arange(4)
|
numpy.arange
|
'''
# ambre.analyze.align_seg.py
#
# Copyright March 2013 by <NAME>
#
# This program is free software; you may redistribute it and/or modify its
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License or
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# For any other inquiries send an email to: <NAME> (<EMAIL>)
'''
import ambre.utils.SAM as SAM
import numpy as na
import sys
import bisect
from collections import defaultdict
from utils import cigar_listparse, get_frag_interval, CIGARAlignmentScoring, get_mdi_cigar
import itertools
debug_log = sys.stderr
class Alignments(object):
def __init__(self):
self.frags = defaultdict(lambda : defaultdict(list))
self.frag_lengths = {}
self.align_scorer = None
def get_frag_length(self, qname):
return self.frag_lengths[qname]
def get_alignments(self, qname, rname):
return self.frags[qname][rname]
def get_aligned_frag(self, fp, fe, fl):
frag_matched_bp = na.zeros((fl,), dtype=na.int16)
if fp>fe:
frag_matched_bp[fe:fp+1] = True
else:
frag_matched_bp[fp:fe+1] = True
return frag_matched_bp
def get_ascore(self, astr_parts):
return self.align_scorer.get_score(astr_parts)
def get_segment_rp(self, md_array, i_array, fp_c, ref_start, ref_end, rc):
rp_c = []
for i in xrange(1,len(fp_c)):
rp_c.append(na.sum(md_array[fp_c[i-1]:fp_c[i]]!='-')+na.sum(i_array[fp_c[i-1]:fp_c[i]]))
cumsum_rp_c = na.cumsum(rp_c)
if rc:
nrp = na.concatenate(([ref_start],(ref_start-cumsum_rp_c)))
#assert nrp[-1] == ref_end
else:
nrp = na.concatenate(([ref_start],(ref_start+cumsum_rp_c)))
#assert nrp[-1] == ref_end
return nrp
def str_concatenate(self, a_strs_list):
pass
class SAMAlignments(Alignments):
def __init__(self, sam_fpath):
Alignments.__init__(self)
parser = SAM.SAMParser()
self.sam_fpath = sam_fpath
rev_flag_idx = SAM.SAM_FLAGS_H['rev_strand_of_query']
for frag in parser.parse(sam_fpath):
cig_parts = cigar_listparse(frag)
c = SAM.get_flag(rev_flag_idx, int(frag.flags))
start, end, frag_length = get_frag_interval(cig_parts, c)
#if abs(int(frag.tlen))<150:
# continue
self.frags[frag.qname][frag.rname].append((int(frag.pos), int(frag.pos) + int(frag.tlen), start, end, cig_parts, c))
self.frag_lengths[frag.qname] = frag_length
self.align_scorer = CIGARAlignmentScoring()
def get_mdi(self, a_str):
return get_mdi_cigar(a_str)
def get_segment_a_str(self, fp, a_str, ref_start, ref_end, rc):
if len(a_str)>0:
if a_str[0][1]=='H' or a_str[0][1]=='S':
a_str = a_str[1:]
if len(a_str)>0:
if a_str[-1][1]=='H' or a_str[-1][1]=='S':
a_str = a_str[:-1]
if rc:
a_str = a_str[::-1]
md_array, i_array, idx_array = get_mdi_cigar(a_str)
assert (fp[-1]-fp[0])==md_array.size
fp_c = fp-fp[0]
# In the context of the fragment
nrp = self.get_segment_rp(md_array, i_array, fp_c, ref_start, ref_end, rc)
# Requires cigar informative splitting of the a_str at segment parts
# Create a new a_str with proper breaks and idx array adjustments
# with
segment_a_str = []
for i in xrange(1,len(fp_c)-1):
a_str_idx = idx_array[fp_c[i]]
if a_str_idx==idx_array[fp_c[i]-1]:
count_to_split = na.sum(idx_array[:fp_c[i]]==a_str_idx)
c, t = a_str.pop(a_str_idx)
a_str.insert(a_str_idx, (c-count_to_split, t))
a_str.insert(a_str_idx, (count_to_split, t))
idx_array[fp_c[i]:] += 1
a_str_idx = idx_array[fp_c[i]]
assert a_str_idx!=idx_array[fp_c[i]-1]
segment_a_str.append(a_str[idx_array[fp_c[i-1]]:idx_array[fp_c[i]]])
segment_a_str.append(a_str[idx_array[fp_c[-2]]:])
return segment_a_str, nrp
def get_a_str(self, cig_parts):
return ''.join(["%d%s"%a for a in cig_parts])
def str_concatenate(self, a_strs_list):
for i in xrange(1, len(a_strs_list)):
c,t = a_strs_list[i][0]
if a_strs_list[i-1][-1][1]==t:
a_strs_list[i][0] = (a_strs_list[i-1][-1][0]+c, t)
a_strs_list[i-1].pop(-1)
return ''.join([self.get_a_str(a_str) for a_str in a_strs_list])
class MaxAlignmentScoreFragFiltering(object):
def __init__(self, fpath):
self.fpath = fpath
self.a = SAMAlignments(fpath)
def check_overlaps(self, frag_alignments, top_a_scores=None, remove_encompassed=False):
rnames, fps, fes, rps, res, a_strs, rcs = zip(*frag_alignments)
a_scores = [self.a.get_ascore(a_str) for a_str in a_strs]
frag_alignments = zip(a_scores, rnames, fps, fes, a_strs, rcs)
frag_alignments.sort()
if not top_a_scores is None:
frag_alignments = frag_alignments[:top_a_scores]
n = len(frag_alignments)
a_scores, rnames, fps, fes, a_strs, rcs = zip(*frag_alignments)
breakpoints = list(set(zip(rnames, fps)+zip(rnames, fes)))
breakpoints.sort()
m = len(breakpoints)
nodes = na.zeros((m, n), dtype=na.int)
for idx, rname, fp, fe in zip(range(n), rnames, fps, fes):
i,j = bisect.bisect_left(breakpoints, (rname, fp)), bisect.bisect_right(breakpoints, (rname, fe))
nodes[i:j,idx] = 1
if remove_encompassed:
mark_for_deletion = []
for j in xrange(n):
a = nodes[:,j]
if na.sum(na.dot(a.reshape((1,m)), nodes)==na.sum(a))>1:
mark_for_deletion.append(j)
mask = na.ones(n, dtype=na.bool)
mask[mark_for_deletion] = False
nodes = nodes[:, mask]
n = nodes.shape[1]
# count overlaps
# count encompasses
c_overlaps, c_encompasses, encompassment = 0,0,0
for j in xrange(n):
a = nodes[:,j]
similar_nodes = na.dot(a.reshape((1,m)), nodes)
c = na.sum(similar_nodes==na.sum(a))
c_encompasses += c-1
c_overlaps += na.sum(similar_nodes>0)-1
if c>1:
encompassment += 1
print >>debug_log, n, c_overlaps, c_encompasses, encompassment
def max_scoring_path(self, frag_alignments, breakpoint_weight=-50):
n = len(frag_alignments)
frag_alignments.sort()
rnames, fps, fes, rps, res, a_strs, rcs = zip(*frag_alignments)
breakpoints = list(set(fps).union(set(map(lambda x:x+1, fes))))
breakpoints.sort()
m = len(breakpoints)
nodes = na.zeros((m, n), dtype=na.int)
for idx, fp, fe in zip(range(n), fps, fes):
i,j = bisect.bisect_left(breakpoints, fp), bisect.bisect_right(breakpoints, fe+1)
nodes[i:j,idx] = 1
# Removes alignments that are encompassed by another alignment.
mark_for_deletion = []
for j in xrange(n):
a = nodes[:,j]
if na.sum(na.dot(a.reshape((1,m)), nodes)==na.sum(a))>1:
mark_for_deletion.append(j)
mask =
|
na.ones(n, dtype=na.bool)
|
numpy.ones
|
import numpy as np
import time
import precice
n = 20
dn = 1 / n
# generate mesh
y =
|
np.linspace(0, 1, n + 1)
|
numpy.linspace
|
import torch
import torch.nn
import os.path as osp
from baseline.utils.parser import get_opts
from baseline.nnutils.stream_modules import ActionClassification
from baseline.data.ucf101 import UCF101, split
from baseline.logger import Logger
from baseline.utils_1 import non_maximal_suppression as nms
from baseline.utils_1 import read_class_names
import pdb
import collections
from sklearn.metrics import average_precision_score, recall_score
from scipy.ndimage import label
import os
import numpy as np
def process_tcam(attn, tcam, numInputs):
# assuming input tcam is 400 x 20
# 400 segments
# assuming center of value in tcam designates activation at center of segment
if numInputs < 400:
return attn[:numInputs], tcam[:numInputs]
else:
sample_range = numInputs
seg_length = 400
sampledN = np.round(np.linspace(0, sample_range, seg_length + 1)).astype(np.int32)
K = sampledN[0:-1]
K = np.mod(K, np.ones(K.shape) * numInputs).astype(np.int)
probe_points = [x for x in range(numInputs)]
samples = K
#import pdb;pdb.set_trace()
interpolated_tcam = np.zeros([len(probe_points), tcam.shape[1]])
for j in range(tcam.shape[1]):
interpolated_tcam[:, j] = np.interp(probe_points, samples, tcam[:, j])
interpolated_attn = np.interp(probe_points, samples, attn[:,0])
interpolated_attn =
|
np.expand_dims(interpolated_attn, axis=1)
|
numpy.expand_dims
|
from sklearn.datasets import load_svmlight_file, dump_svmlight_file
from sklearn.preprocessing import normalize
from sklearn.model_selection import train_test_split
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import svds
import numpy as np
import os
import click
class MatrixEditor(object):
def __init__(self, indir=None, outdir=None, seed=None, debug=False):
self.debug = debug
self.indir = os.path.abspath(indir or 'datasets')
self.outdir = os.path.abspath(outdir or 'data')
self.data = None
self.y = None
self.index = None
self.s = None
self.Vh = None
self.seed = seed
@property
def n_features(self):
if self.data is None:
return None
return self.data.shape[1]
@property
def n_points(self):
if self.data is None:
return None
return self.data.shape[0]
pass_editor = click.make_pass_decorator(MatrixEditor)
@click.group(chain=True)
@click.option('--basepath', default='.')
@click.option('--indir', default='datasets')
@click.option('--outdir', default='data')
@click.option('--seed', type=click.INT, default=None)
@click.option('-v', is_flag=True)
@click.pass_context
def cli(ctx, basepath, indir, outdir, seed, v):
indir = os.path.join(basepath, indir)
outdir = os.path.join(basepath, outdir)
ctx.obj = MatrixEditor(indir, outdir, seed, v)
@cli.command('load')
@click.argument('dataset')
@click.option('--confirm', is_flag=True)
@pass_editor
def load(editor, dataset, confirm):
if editor.data is not None and not confirm:
print("There is existing data, pass --confirm flag to load anyway")
return False
if '.svm' not in dataset:
dataset += '.svm'
path = os.path.join(editor.indir, dataset)
assert os.path.exists(path), f"SVM file '{path}' not found"
old_data = editor.data.copy() if editor.data is not None else None
old_y = editor.y.copy() if editor.y is not None else None
try:
editor.data, editor.y = load_svmlight_file(path)
editor.index = np.asarray(range(len(editor.y)))
editor.data = editor.data.tocsc()
if editor.debug:
print(f"Loaded '{dataset}', shape {editor.data.shape}")
return
except Exception as e:
print(e)
editor.data = old_data
editor.y = old_y
exit(0)
@cli.command('scale')
@click.option('--center', is_flag=True)
@click.option('--norm', is_flag=True)
@pass_editor
def scale(editor, center, norm):
# from sklearn.preprocessing import scale
# editor.data = csc_matrix(scale(editor.data.todense(), with_mean=center, with_std=scale_var))
editor.data = editor.data.todense()
if center:
editor.data -= np.average(editor.data, axis=0)
if norm:
editor.data = normalize(editor.data, axis=0, copy=False)
editor.data = csc_matrix(editor.data)
@cli.command('replace-column')
@click.argument('col', type=click.INT)
@click.argument('scheme', type=click.Choice(['uniform', 'scale', 'weights']))
@click.option('--scale-col', default=0, help="scale specified column (default 0)")
@click.option('--scale-by', default=1.0, help="scale factor for vector specified by `--scale-col` (default 1)")
@click.option('--weights', type=click.STRING, default=None,
help="string containing python array with length n_col."
"values in array correspond weights of each remaining column for replacement linear combination.")
@pass_editor
def replace_column(editor, col, scheme, scale_col, scale_by, weights):
assert editor.data is not None, "load data before attempting to edit"
assert not(weights is None and scheme == 'weights'), "specify weighting scheme"
n_row, n_col = editor.data.shape
if scheme == 'weights':
weights = np.fromstring(weights, sep=' ')
debug_str=f'A*{np.array2string(weights)}^T'
elif scheme == 'scale':
weights = np.zeros((n_col-1,))
weights[scale_col] = scale_by
debug_str=f'{scale_by}*A[:,{scale_col}]'
elif scheme == 'uniform':
weights = np.array([1/(n_col-1)]*(n_col-1))
debug_str=f'average of other columns'
else:
return NotImplementedError
weights = np.insert(weights, col, 0)
new_col = editor.data * weights
from scipy.sparse import csc_matrix
editor.data[:,col] = csc_matrix(new_col.reshape((n_row,1)))
if editor.debug:
print(f"Replaced A[:,{col}] with "+debug_str)
return
@cli.command('insert-columns')
@click.argument('n', type=click.INT)
@click.option('--weights', type=click.STRING, default=None)
@pass_editor
def insert_columns(editor, n, weights):
assert editor.data is not None, "load data before attempting to edit"
# assert weights is not None or uniform, "either specify weights or use the `--uniform` flag"
if weights:
from json import loads
weights = loads(weights)
for spec in weights:
_insert_column(editor, spec.get('scheme'), spec.get('scale_col', 0), spec.get('scale_by', 1), spec.get('weights'))
return
for i in range(n):
_insert_column(editor, 'uniform', 0, 1, None)
return
@cli.command('insert-column')
@click.argument('scheme', type=click.Choice(['uniform', 'scale', 'weights', 'ones']))
@click.option('--scale-col', default=0, help="scale specified column (default 0)")
@click.option('--scale-by', default=1.0, help="scale factor for vector specified by `--scale-col` (default 1)")
@click.option('--weights', type=click.STRING, default=None,
help="string containing python array with length n_col."
"values in array correspond weights of each remaining column for replacement linear combination.")
@pass_editor
def insert_column(editor, scheme, scale_col, scale_by, weights):
_insert_column(editor, scheme, scale_col, scale_by, weights)
def _insert_column(editor, scheme, scale, scale_by, weights):
assert editor.data is not None, "load data before attempting to edit"
assert not (weights is None and scheme == 'weights'), "specify weighting scheme"
n_row, n_col = editor.data.shape
debug_str=''
if scheme == 'weights':
weights = np.fromstring(weights, sep=' ')
debug_str=f'A*{
|
np.array2string(weights)
|
numpy.array2string
|
# Created by <NAME>
# All right reserved
# Department of Computer Science
# the University of Warwick
# <EMAIL>
import itertools as it
import math
import random
import sys
from concurrent import futures
from copy import deepcopy
from os import remove
from os.path import abspath
import category_encoders as ce
import dill
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import torch
import torch.nn as nn
import torch.optim as optim
from matplotlib.widgets import Slider
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import OneHotEncoder
from torch.autograd import Variable
from torch.distributions import Categorical
from torch.multiprocessing import Pool
from dbestclient.ml.integral import approx_count, prepare_reg_density_data
from dbestclient.ml.embedding import columns2sentences,WordEmbedding
# https://www.katnoria.com/mdn/
# https://github.com/sagelywizard/pytorch-mdn
"""A module for a mixture density network layer
For more info on MDNs, see _Mixture Desity Networks_ by Bishop, 1994.
"""
class MDN(nn.Module):
"""A mixture density network layer
The input maps to the parameters of a MoG probability distribution, where
each Gaussian has O dimensions and diagonal covariance.
Arguments:
in_features (int): the number of dimensions in the input
out_features (int): the number of dimensions in the output
num_gaussians (int): the number of Gaussians per output dimensions
Input:
minibatch (BxD): B is the batch size and D is the number of input
dimensions.
Output:
(pi, sigma, mu) (BxG, BxGxO, BxGxO): B is the batch size, G is the
number of Gaussians, and O is the number of dimensions for each
Gaussian. Pi is a multinomial distribution of the Gaussians. Sigma
is the standard deviation of each Gaussian. Mu is the mean of each
Gaussian.
"""
def __init__(self, in_features, out_features, num_gaussians, device):
super(MDN, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.num_gaussians = num_gaussians
self.pi = nn.Sequential(
nn.Linear(in_features, num_gaussians),
nn.Softmax(dim=1)
)
self.sigma = nn.Linear(in_features, out_features * num_gaussians)
self.mu = nn.Linear(in_features, out_features * num_gaussians)
self.pi = self.pi.to(device)
self.mu = self.mu.to(device)
self.sigma = self.sigma.to(device)
def forward(self, minibatch):
pi = self.pi(minibatch)
sigma = torch.exp(self.sigma(minibatch))
sigma = sigma.view(-1, self.num_gaussians, self.out_features)
mu = self.mu(minibatch)
mu = mu.view(-1, self.num_gaussians, self.out_features)
return pi, sigma, mu
# ONEOVERSQRT2PI = 1.0 / math.sqrt(2 * math.pi)
def gaussian_probability(sigma, mu, data):
"""Returns the probability of `data` given MoG parameters `sigma` and `mu`.
Arguments:
sigma (BxGxO): The standard deviation of the Gaussians. B is the batch
size, G is the number of Gaussians, and O is the number of
dimensions per Gaussian.
mu (BxGxO): The means of the Gaussians. B is the batch size, G is the
number of Gaussians, and O is the number of dimensions per Gaussian.
data (BxI): A batch of data. B is the batch size and I is the number of
input dimensions.
Returns:
probabilities (BxG): The probability of each point in the probability
of the distribution in the corresponding sigma/mu index.
"""
data = data.unsqueeze(1).expand_as(sigma)
ret = 1.0 / math.sqrt(2 * math.pi) * torch.exp(-0.5 *
((data - mu) / sigma) ** 2) / sigma
return torch.prod(ret, 2)
def mdn_loss(pi, sigma, mu, target, device):
"""Calculates the error, given the MoG parameters and the target
The loss is the negative log likelihood of the data given the MoG
parameters.
"""
prob = pi * gaussian_probability(sigma, mu, target)
nll = -torch.log(torch.sum(prob, dim=1)).to(device)
return torch.mean(nll)
def sample(pi, sigma, mu):
"""Draw samples from a MoG.
"""
categorical = Categorical(pi)
pis = list(categorical.sample().data)
sample = Variable(sigma.data.new(sigma.size(0), sigma.size(2)).normal_())
for i, idx in enumerate(pis):
sample[i] = sample[i].mul(sigma[i, idx]).add(mu[i, idx])
return sample
def gaussion_predict(weights: list, mus: list, sigmas: list, xs: list, n_jobs=1):
if n_jobs == 1:
result = np.array([np.multiply(stats.norm(mus, sigmas).pdf(x),
weights).sum(axis=1).tolist() for x in xs]).transpose()
else:
with Pool(processes=n_jobs) as pool:
instances = []
results = []
for x in xs:
i = pool.apply_async(
gaussion_predict, (weights, mus, sigmas, [x], 1))
instances.append(i)
for i in instances:
result = i.get()
# print("partial result", result)
results.append(result)
result = np.concatenate(results, axis=1)
# with futures.ThreadPoolExecutor() as executor:
# for x in xs:
# future = executor.submit(
# gaussion_predict, weights, mus, sigmas, [x], 1)
# results.append(future.result())
# result = np.concatenate(results, axis=1)
return result
def gm(weights: list, mus: list, vars: list, x: list, b_plot=False, n_division=100):
""" given a list of points, calculate the gaussian mixture probability
Args:
weights (list): weights
mus (list): the centroids of gaussions.
vars (list): the variances.
x (list): the targeting points.
b_plot (bool, optional): whether return the value for plotting. Defaults to False.
n_division (int, optional): number of division, if b_plot=True. Defaults to 100.
Returns:
float: the pdf of a gaussian mixture.
"""
if not b_plot:
result = [stats.norm(mu_i, vars_i).pdf(
x)*weights_i for mu_i, vars_i, weights_i in zip(mus, vars, weights)]
result = sum(result)
# result = 0
# for index in range(len(weights)):
# result += stats.norm(mus[index], vars[index]
# ).pdf(x) * weights[index]
# print(result)
return result
else:
xs = np.linspace(-1, 1, n_division)
# ys = [gm(weights, mus, vars, xi, b_plot=False) for xi in xs]
ys = gm(weights, mus, vars, xs, b_plot=False)
return xs, ys
# plt.plot(xs, ys)
# plt.show()
def normalize(x_point: float, mean: float, width: float) -> float:
"""normalize the data point
Args:
x (float): the data point
mean (float): the mean value
width (float): the width
Returns:
float: the normalized value
"""
return (x_point - mean) / width * 2
def denormalize(x_point: float, mean: float, width: float) -> float:
"""de-normalize the data point
Args:
x (float): the data point
mean (float): the mean value
width (float): the width
Returns:
float: the de-normalized value
"""
return 0.5 * width * x_point + mean
def de_serialize(file: str):
"""de-serialize the model from a file.
Args:
file (str): the file path.
Returns:
Callable: the model.
"""
with open(file, 'rb') as f:
return dill.load(f)
class GenericMdn:
def __init__(self, config):
self.meanx = None
self.widthx = None
self.config = config
def fit(self, runtime_config):
raise NotImplementedError("Method fit() is not implemented.")
def fit_grid_search(self, runtime_config):
raise NotImplementedError(
"Method fit_grid_search() is not implemented.")
def predict(self, runtime_config):
raise NotImplementedError("Method predict() is not implemented.")
def normalize(self, xs: np.array):
"""normalize the data
Args:
x (list): the data points to be normalized.
mean (float): the mean value of x.
width (float): the range of x.
Returns:
list: the normalized data.
"""
return (xs - self.meanx) / self.widthx * 2
def denormalize(self, xs):
"""de-normalize the data
Args:
x (list): the data points to be de-normalized.
mean (float): the mean value of x.
width (float): the range of x.
Returns:
list: the de-normalized data.
"""
return 0.5 * self.widthx * xs + self.meanx
class RegMdnGroupBy():
""" This class implements the regression using mixture density network for group by queries.
"""
def __init__(self, config, b_store_training_data=False, b_normalize_data=True):
if b_store_training_data:
self.x_points = None # query range
self.y_points = None # aggregate value
self.z_points = None # group by balue
self.sample_x = None # used in the score() function
self.sample_g = None
self.sample_average_y = None
self.b_store_training_data = b_store_training_data
self.meanx = None
self.widthx = None
self.meany = None
self.widthy = None
self.model = None
self.last_xs = None
self.last_pi = None
self.last_mu = None
self.last_sigma = None
self.config = config
self.b_normalize_data = b_normalize_data
self.enc = None
def fit(self, z_group: list, x_points: list, y_points: list, runtime_config, lr: float = 0.001, n_workers=0):
"""fit the MDN regression model.
Args:
z_group (list): group by values
x_points (list): x points
y_points (list): y points
n_epoch (int, optional): number of epochs for training. Defaults to 100.
n_gaussians (int, optional): the number of gaussions. Defaults to 5.
n_hidden_layer (int, optional): the number of hidden layers. Defaults to 1.
n_mdn_layer_node (int, optional): the node number in the hidden layer. Defaults to 10.
lr (float, optional): the learning rate of the MDN network for training. Defaults to 0.001.
Raises:
ValueError: The hidden layer should be 1 or 2.
Returns:
RegMdnGroupBy: The regression model.
"""
n_epoch = self.config.config["n_epoch"]
n_gaussians = self.config.config["n_gaussians_reg"]
n_hidden_layer = self.config.config["n_hidden_layer"]
n_mdn_layer_node = self.config.config["n_mdn_layer_node_reg"]
b_grid_search = self.config.config["b_grid_search"]
encoder = self.config.config["encoder"]
device = runtime_config["device"]
if not b_grid_search:
if encoder == "onehot":
self.enc = OneHotEncoder(handle_unknown='ignore')
zs_encoded = z_group
zs_encoded = self.enc.fit_transform(zs_encoded).toarray()
elif encoder == "binary":
# print(z_group)
# prepare column names for binary encoding
columns = list(range(len(z_group[0])))
self.enc = ce.BinaryEncoder(cols=columns)
zs_encoded = self.enc.fit_transform(z_group).to_numpy()
elif encoder == "embedding":
sentences = columns2sentences(z_group, x_points, y_points)
self.enc = WordEmbedding()
self.enc.fit(sentences, gbs=["gb"],dim=self.config.config["n_embedding_dim"])
gbs_data = z_group.reshape(1,-1)[0]
zs_encoded = self.enc.predicts(gbs_data)
# raise TypeError("embedding is not supported yet.")
if self.b_normalize_data:
self.meanx = (np.max(x_points) + np.min(x_points)) / 2
self.widthx =
|
np.max(x_points)
|
numpy.max
|
import torch
import torch.nn as nn
import torch.tensor as tensor
import torch.nn.functional as F
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from logger import Logger
import h5py
import numpy as np
import glob
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import librosa
'''
# MNIST Dataset
dataset = dsets.MNIST(root='./data',
train=True,
transform=transforms.ToTensor(),
download=True)
# Data Loader (Input Pipeline)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=100,
shuffle=True)
'''
class SpectrogramData(Dataset):
"""Face Landmarks dataset."""
def __init__(self, data, labels, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.dataTensor = data
self.labelTensor = labels
self.transform = transform
def __len__(self):
return len(self.labelTensor)
def __getitem__(self, idx):
#img_name = os.path.join(self.root_dir,
# self.landmarks_frame.iloc[idx, 0])
#image = io.imread(img_name)
#landmarks = self.landmarks_frame.iloc[idx, 1:].as_matrix()
#landmarks = landmarks.astype('float').reshape(-1, 2)
if self.transform:
# print(np.shape(self.dataTensor[idx]))
sample = np.reshape(self.dataTensor[idx],(1,84,1))
sample = self.transform(sample)
sample = sample.type(torch.FloatTensor), self.labelTensor[idx].astype(int)
return sample
'''
# read hdf5 files and create a compact hdf5 file with all classes, some samples
file_count = 0
filenames = glob.glob('D:\\ImplementAI_data\\nsynth-train\\*.h5')
print(filenames)
h5pySizes = [12675,8773,32690,51821,34201,34477,13911,19474,5501,10208] #65474,
for file in glob.glob('D:\\ImplementAI_data\\nsynth-train\\*.h5'):
file_count=file_count+1
selection_index = np.random.randint(0,h5pySizes[file_count-1],int(0.25*h5pySizes[file_count-1]))
data_f = h5py.File(file,'r');
# data = np.mean(np.array(data_f['train']),axis=3)
data = np.array(data_f['train'])
labels = file_count*np.ones((np.shape(data)[0],))
data = data[selection_index]
labels = labels[selection_index]
print(np.shape(data),np.shape(labels))
if file_count==1:
Data_tensor = data
Label_tensor = labels
else:
Data_tensor = np.append(Data_tensor,data,axis=0)
Label_tensor = np.append(Label_tensor,labels,axis=0)
print(np.shape(Data_tensor),np.shape(Label_tensor))
h5f = h5py.File('D:\\ImplementAI_data\\nsynth-train\\train_data_allClasses.h5','w')
h5f.create_dataset('Data',data = Data_tensor)
h5f.create_dataset('labels',data = Label_tensor)
'''
lib_f = h5py.File('train_data_latent_vector.h5','r')
lib = lib_f['z_vec']
data = np.load('notearray.npy')
data1 = np.zeros((np.shape(data)[0],1,84))
for i in tqdm(range(np.shape(data)[0])):
c = np.abs(librosa.cqt(data[i], sr=16000, n_bins=84))
c = np.reshape(c,(1,84,84))
c = np.mean(c,axis=2)
data1[i] = c;
data = data1/np.max(data1)
labels = np.random.randint(0,np.shape(data)[0],np.shape(data)[0])
print(type(data),type(labels),data.dtype,labels.dtype)
dataset = SpectrogramData(data,labels,transform=transforms.ToTensor())
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=1,
shuffle=False)
def to_np(x):
return x.data.cpu().numpy()
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def accuracy_metric(output,targets):
_, predicted = torch.max(output.data,1)
# print(output.size(),targets.size(),predicted.size())
accuracy = (predicted==targets.data.type(torch.cuda.LongTensor)).sum()/len(targets)
return accuracy
#Encoder
pretrained_weights = torch.load('Q_encoder_weights_2.pt')
class Q_net(nn.Module):
def __init__(self,X_dim,N,z_dim):
super(Q_net, self).__init__()
self.xdim = X_dim
self.lin1 = nn.Linear(X_dim, N)
# self.lin2 = nn.Linear(N, N)
self.lin3 = nn.Linear(N, int(N/2))
self.lin3gauss = nn.Linear(int(N/2), z_dim)
def forward(self, x):
x = x.view(-1,self.xdim)
x = F.dropout(self.lin1(x), p=0.25, training=self.training)
x = F.relu(x)
# x = F.dropout(self.lin2(x), p=0.25, training=self.training)
# x = F.relu(x)
x = F.dropout(self.lin3(x), p=0.25, training=self.training)
x = F.relu(x)
xgauss = self.lin3gauss(x)
return xgauss
class Q_convNet(nn.Module):
def __init__(self,X_dim,N,z_dim):
super(Q_net, self).__init__()
self.xdim = X_dim
self.conv1 = nn.Conv1d(1,16,7,stride=3,padding=3) # 16X28
self.conv2 = nn.Conv1d(16,32,5,stride=2,padding=2) # 32X14
self.lin1 = nn.Linear(32*14, N)
# self.lin2 = nn.Linear(N, N)
# self.lin3 = nn.Linear(N, int(N/2))
self.lin3gauss = nn.Linear(N, z_dim)
def forward(self, x):
x = x.view(-1,1,self.xdim)
x = F.dropout(self.conv1(x), p=0.25, training=self.training)
x = F.relu(x)
x = F.dropout(self.conv2(x), p=0.25, training=self.training)
x = F.relu(x)
x = F.dropout(self.lin1(x), p=0.25, training=self.training)
x = F.relu(x)
xgauss = self.lin3gauss(x)
return xgauss
# Decoder
class P_net(nn.Module):
def __init__(self,X_dim,N,z_dim):
super(P_net, self).__init__()
self.lin1 = nn.Linear(z_dim, int(N/2))
self.lin2 = nn.Linear(int(N/2), N)
# self.lin3 = nn.Linear(N, N)
self.lin4 = nn.Linear(N, X_dim)
def forward(self, x):
x = F.dropout(self.lin1(x), p=0.25, training=self.training)
x = F.relu(x)
x = F.dropout(self.lin2(x), p=0.25, training=self.training)
x = F.relu(x)
# x = F.dropout(self.lin3(x), p=0.25, training=self.training)
x = self.lin4(x)
return F.sigmoid(x)
class P_convNet(nn.Module):
def __init__(self,X_dim,N,z_dim):
super(P_net, self).__init__()
self.lin1 = nn.Linear(z_dim, N)
self.lin2 = nn.Linear(N, 32*14)
# self.lin3 = nn.Linear(N, N)
self.lin4 = nn.Linear(N, X_dim)
def forward(self, x):
x = F.dropout(self.lin1(x), p=0.25, training=self.training)
x = F.relu(x)
x = F.dropout(self.lin2(x), p=0.25, training=self.training)
x = F.relu(x)
# x = F.dropout(self.lin3(x), p=0.25, training=self.training)
x = self.lin4(x)
return F.sigmoid(x)
# Discriminator
class D_net_gauss(nn.Module):
def __init__(self,N,z_dim):
super(D_net_gauss, self).__init__()
self.lin1 = nn.Linear(z_dim, N)
# self.lin2 = nn.Linear(N, N)
self.lin3 = nn.Linear(N, int(N/2))
self.lin4 = nn.Linear(int(N/2), 10)
def forward(self, x):
x = F.dropout(self.lin1(x), p=0.5, training=self.training)
x = F.relu(x)
# x = F.dropout(self.lin2(x), p=0.5, training=self.training)
# x = F.relu(x)
x = F.dropout(self.lin3(x), p=0.5, training=self.training)
x = F.relu(x)
# return F.sigmoid(self.lin3(x))
return F.log_softmax(self.lin4(x))
EPS = 1e-15
z_red_dims = 64
Q = Q_net(84,128,z_red_dims).cuda()
P = P_net(84,128,z_red_dims).cuda()
D_gauss = D_net_gauss(32,z_red_dims).cuda()
Q.load_state_dict(pretrained_weights)
# Set the logger
logger = Logger('./logs/z_120_fixed_LR_2')
# Set learning rates
gen_lr = 0.001
reg_lr = 0.001
#encode/decode optimizers
optim_P = torch.optim.Adam(P.parameters(), lr=gen_lr)
optim_Q_enc = torch.optim.Adam(Q.parameters(), lr=gen_lr)
#regularizing optimizers
optim_Q_gen = torch.optim.Adam(Q.parameters(), lr=reg_lr)
optim_D = torch.optim.Adam(D_gauss.parameters(), lr=reg_lr)
data_iter = iter(data_loader)
iter_per_epoch = len(data_loader)
print("iter per epoch =",iter_per_epoch)
total_step = np.shape(data)[0]
# Start training
for step in tqdm(range(total_step)):
# Reset the data_iter
if (step+1) % iter_per_epoch == 0:
data_iter = iter(data_loader)
print(step)
# Fetch the images and labels and convert them to variables
images, labels = next(data_iter)
print(torch.min(images),torch.max(images))
# print(images.size(),labels.size(),images.type(),labels.type())
images, labels = to_var(images.view(images.size(0), -1)), to_var(labels)
Q.eval()
# z_real_gauss = Variable(torch.randn(images.size()[0], z_red_dims) * 5.).cuda()
# D_real_gauss = D_gauss(z_real_gauss)
z_out = Q(images)
query_vector = to_np(z_out)
print(
|
np.shape(query_vector)
|
numpy.shape
|
# -*- coding: utf-8 -*-
"""Copyright 2015 <NAME>.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
6"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
import numpy.random as random
from numpy.random import randn
import numpy as np
from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.kalman import (unscented_transform, MerweScaledSigmaPoints,
JulierSigmaPoints)
from filterpy.common import Q_discrete_white_noise
import filterpy.stats as stats
from math import cos, sin
DO_PLOT = False
def test_sigma_plot():
""" Test to make sure sigma's correctly mirror the shape and orientation
of the covariance array."""
x = np.array([[1, 2]])
P = np.array([[2, 1.2],
[1.2, 2]])
kappa = .1
# if kappa is larger, than points shoudld be closer together
sp0 = JulierSigmaPoints(n=2, kappa=kappa)
sp1 = JulierSigmaPoints(n=2, kappa=kappa*1000)
w0, _ = sp0.weights()
w1, _ = sp1.weights()
Xi0 = sp0.sigma_points (x, P)
Xi1 = sp1.sigma_points (x, P)
assert max(Xi1[:,0]) > max(Xi0[:,0])
assert max(Xi1[:,1]) > max(Xi0[:,1])
if DO_PLOT:
plt.figure()
for i in range(Xi0.shape[0]):
plt.scatter((Xi0[i,0]-x[0, 0])*w0[i] + x[0, 0],
(Xi0[i,1]-x[0, 1])*w0[i] + x[0, 1],
color='blue')
for i in range(Xi1.shape[0]):
plt.scatter((Xi1[i, 0]-x[0, 0]) * w1[i] + x[0,0],
(Xi1[i, 1]-x[0, 1]) * w1[i] + x[0,1],
color='green')
stats.plot_covariance_ellipse([1, 2], P)
def test_julier_weights():
for n in range(1,15):
for k in
|
np.linspace(0,5,0.1)
|
numpy.linspace
|
from keras.engine import Model
from keras.layers import Flatten, Dense, Input
from keras_vggface.vggface import VGGFace
from os import listdir
from os.path import isdir
from PIL import Image
from matplotlib import pyplot
from numpy import savez_compressed
from numpy import asarray
from mtcnn.mtcnn import MTCNN
# develop a classifier for the 5 Celebrity Faces Dataset
from numpy import load
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from keras.callbacks import ModelCheckpoint
#custom parameters
nb_class = 2
vgg_custom_vgg_model = VGGFace(include_top=False, input_shape=(224, 224, 3))
last_layer = vgg_custom_vgg_model.get_layer('avg_pool').output
x = Flatten(name='flatten')(last_layer)
out = Dense(nb_class, activation='softmax', name='classifier')(x)
custom_vgg_custom_vgg_model = custom_vgg_model(vgg_custom_vgg_model.input, out)
# extract a single face from a given photograph
def extract_face(filename, required_size=(160, 160)):
# load image from file
image = Image.open(filename)
# convert to RGB, if needed
image = image.convert('RGB')
# convert to array
pixels = asarray(image)
# create the detector, using default weights
detector = MTCNN()
# detect faces in the image
results = detector.detect_faces(pixels)
# extract the bounding box from the first face
x1, y1, width, height = results[0]['box']
# bug fix
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
# extract the face
face = pixels[y1:y2, x1:x2]
# resize pixels to the custom_vgg_model size
image = Image.fromarray(face)
image = image.resize(required_size)
face_array =
|
asarray(image)
|
numpy.asarray
|
import numpy as np
import numpy.random as npr
import py.test
from hypothesis import assume
from hypothesis import given
from hypothesis.strategies import integers
import arlunio as ar
import arlunio.mask as mask
import arlunio.testing as T
@ar.definition
def MaskGenerator(width: int, height: int, *, seed=1024) -> mask.Mask:
"""A generator of random masks."""
gen = npr.default_rng(seed=seed)
return mask.Mask(gen.random(size=(height, width)) < 0.5)
class TestMask:
"""Test cases for the :code:`Mask` type."""
@py.test.mark.parametrize(
"arr", [np.array([True, False, True]), np.array([[True, False], [False, True]])]
)
def test_init(self, arr):
"""Ensure that masks can be created directly from an existing array,"""
m = mask.Mask(arr)
assert isinstance(m, mask.Mask)
assert (m == arr).all()
@py.test.mark.parametrize(
"arr", [np.array([True, False, True]), np.array([[True, False], [False, True]])]
)
def test_init_view(self, arr):
"""Ensure that masks can be created as a view on an existing numpy array."""
m = arr.view(mask.Mask)
assert isinstance(m, mask.Mask)
assert (m == arr).all()
@py.test.mark.parametrize(
"id, a, b, expected",
[
(
"broadcast with 'a = True'",
True,
mask.Mask(np.array([True, False, True])),
mask.Mask(np.full(3, True)),
),
(
"broadcast with 'a = False'",
False,
mask.Mask(np.array([True, False, True])),
mask.Mask(np.array([True, False, True])),
),
(
"broadcast with 'b = True'",
mask.Mask(np.array([True, False, True])),
True,
mask.Mask(np.full(3, True)),
),
(
"broadcast with 'b = False'",
mask.Mask(np.array([True, False, True])),
False,
mask.Mask(np.array([True, False, True])),
),
(
"broadcast with a = array",
np.array([True, False]),
mask.Mask(np.array([[False, True], [True, False]])),
mask.Mask(np.array([[True, True], [True, False]])),
),
(
"broadcast with b = array",
mask.Mask(np.array([[False, True], [True, False]])),
np.array([True, False]),
mask.Mask(np.array([[True, True], [True, False]])),
),
(
"one mask, one array of equal size",
mask.Mask(np.array([True, False, True])),
np.array([False, True, False]),
mask.Mask(np.array([True, True, True])),
),
(
"one array, one mask of equal size",
np.array([False, True, False]),
mask.Mask(np.array([True, False, True])),
mask.Mask(np.array([True, True, True])),
),
(
"two masks of equal size",
mask.Mask(np.array([False, True, False])),
mask.Mask(np.array([True, False, True])),
mask.Mask(np.array([True, True, True])),
),
],
)
def test_add(self, id, a, b, expected):
"""Ensure that masks can be added together, where 'add' in this case is a
boolean OR"""
result = a + b
assert isinstance(result, mask.Mask), "The result should also be a mask"
assert (result == expected).all()
@py.test.mark.parametrize(
"id, a, b, expected",
[
(
"broadcast with 'a = True",
True,
mask.Mask(np.array([True, False, True])),
mask.Mask(np.array([True, False, True])),
),
(
"broadcast with 'a = False",
False,
mask.Mask(np.array([True, False, True])),
mask.Mask(np.full(3, False)),
),
(
"broadcast with 'b = True'",
mask.Mask(np.array([True, False, True])),
True,
mask.Mask(np.array([True, False, True])),
),
(
"broadcast with 'b = False'",
mask.Mask(np.array([True, False, True])),
False,
mask.Mask(np.array([False, False, False])),
),
(
"broadcast with a = array",
np.array([True, False]),
mask.Mask(np.array([[True, False], [False, True]])),
mask.Mask(np.array([[True, False], [False, False]])),
),
(
"broadcast with b = array",
mask.Mask(np.array([[True, False], [False, True]])),
|
np.array([True, False])
|
numpy.array
|
""" Utility functions for contaminante """
import numpy as np
import pandas as pd
from tqdm import tqdm
import warnings
import lightkurve as lk
from astropy.stats import sigma_clip, sigma_clipped_stats
from numpy.linalg import solve
from scipy.sparse import csr_matrix, diags
import astropy.units as u
def search(targetid, mission, search_func=lk.search_targetpixelfile, quarter=None, sector=None, campaign=None):
"""Convenience function to help lightkurve searches
Parameters
----------
targetid : str
The ID of the target, either KIC, EPIC or TIC from Kepler, K2 or TESS
mission : str
Kepler, K2 or TESS
search_func : func
The search function to use, default is `lk.search_targetpixelfile`. Users may
want `lk.search_tesscut`
quarter : int, list or None
Quarter of Kepler data to use. Specify either using an integer (e.g. `1`) or
a range (e.g. `[1, 2, 3]`). `None` will return all quarters.
sector : int, list or None
Sector of TESS data to use. Specify either using an integer (e.g. `1`) or
a range (e.g. `[1, 2, 3]`). `None` will return all sectors.
campaign : int, list or None
Campaign of K2 data to use. Specify either using an integer (e.g. `1`) or
a range (e.g. `[1, 2, 3]`). `None` will return all campaigns.
Returns
-------
sr : lk.search.SearchResult
Search result object containing the valid files.
"""
if search_func == lk.search_targetpixelfile:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if mission.lower() == 'kepler':
sr = search_func(targetid, mission=mission, quarter=quarter)
elif (mission.lower() == 'ktwo') | (mission.lower() == 'k2'):
sr = search_func(targetid, mission=mission, campaign=campaign)
elif mission.lower() == 'tess':
sr = search_func(targetid, mission=mission, sector=sector)
else:
raise ValueError("No such mission as `'{}'`".format(mission))
numeric = int(''.join([char for char in "KIC {}".format(targetid) if char.isnumeric()]))
numeric_s = np.asarray([int(''.join([char for char in sr.target_name[idx] if char.isnumeric()])) for idx in range(len(sr))])
sr = lk.SearchResult(sr.table[numeric_s == numeric], )
elif search_func == lk.search_tesscut:
sr = search_func(targetid, sector=sector)
else:
raise ValueError('Search Function is wrong')
return sr
def build_X(tpf, flux, t_model=None, background=False, cbvs=None, spline=True, spline_period=2, sff=False):
"""Build a design matrix to model pixel in target pixel files
Parameters
----------
tpf : lightkurve.TargetPixelFile
Input target pixel file to make the design matrix for
flux : np.ndarray
The SAP flux to use for creating the design matrix
t_model: None or np.ndarray
The transit model, if None no transit model will be used in the design matrix
background: None or np.ndarray
Background model, useful for TESS data. If None will not be used in design matrix
cbvs: None or np.ndarray
Cotrending Basis vectors. If None will not be used in design matrix
spline: bool
Whether to use a B-Spline in time
spline_period: float
If using a spline, what time period the knots should be spaced at
sff : bool
Whether to use the SFF method of buildign centroids
Returns
-------
SA : scipy.sparse.csr_matrix
The design matrix to use to detrend the input TPF
"""
r, c = np.nan_to_num(tpf.pos_corr1), np.nan_to_num(tpf.pos_corr2)
r[np.abs(r) > 10] = 0
c[np.abs(r) > 10] = 0
if sff:
r = lk.SFFCorrector(lk.LightCurve(tpf.time, flux))
_ = r.correct()
centroids = r.X['sff']
else:
breaks = np.where((np.diff(tpf.time) > (0.0202 * 10)))[0] - 1
breaks = breaks[breaks > 0]
if r.sum() == 0:
ts0 = np.asarray([np.in1d(tpf.time, t) for t in np.array_split(tpf.time, breaks)])
ts1 = np.asarray([np.in1d(tpf.time, t) * (tpf.time - t.mean())/(t[-1] - t[0]) for t in np.array_split(tpf.time, breaks)])
centroids = np.vstack([ts0, ts1, ts1**2]).T
else:
rs0 = np.asarray([np.in1d(tpf.time, t) for t in np.array_split(tpf.time, breaks)])
rs1 = np.asarray([np.in1d(tpf.time, t) * (r - r[np.in1d(tpf.time, t)].mean()) for t in np.array_split(tpf.time, breaks)])
cs1 = np.asarray([np.in1d(tpf.time, t) * (c - c[np.in1d(tpf.time, t)].mean()) for t in np.array_split(tpf.time, breaks)])
centroids = np.vstack([
rs1, cs1, rs1*cs1,
rs1**2, cs1**2, rs1**2*cs1, rs1*cs1**2, rs1**2*cs1**2,
rs1**3*cs1**3, rs1**3*cs1**2, rs1**3*cs1, rs1**3, cs1**3, cs1**3*rs1, cs1**3*rs1**2]).T
A = np.copy(centroids)
if cbvs is not None:
A = np.hstack([A, np.nan_to_num(cbvs)])
if background:
bkg = lk.DesignMatrix(tpf.flux[:, ~tpf.create_threshold_mask()]).pca(3).values
A = np.hstack([A, bkg])
if spline:
spline_dm = lk.correctors.designmatrix.create_spline_matrix(tpf.time, n_knots=np.max([4, int((tpf.time[-1] - tpf.time[0])//spline_period)])).values
A = np.hstack([A, spline_dm])
SA = np.atleast_2d(flux).T * A
if t_model is not None:
SA = np.hstack([SA, np.atleast_2d(np.ones(len(tpf.time))).T, np.atleast_2d(t_model).T])
else:
SA = np.hstack([SA, np.atleast_2d(np.ones(len(tpf.time))).T])
return csr_matrix(SA)
def build_model(tpf, flux, cbvs=None, t_model=None, errors=False, cadence_mask=None, background=False, spline=True):
""" Build a model for the pixel level light curve
Parameters
----------
tpf : lightkurve.TargetPixelFile
Input target pixel file to make the design matrix for
flux : np.ndarray
The SAP flux to use for creating the design matrix
cbvs: None or np.ndarray
Cotrending Basis vectors. If None will not be used in design matrix
t_model: None or np.ndarray
The transit model, if None no transit model will be used in the design matrix
errors: bool
Whether to return the errors of the models
cadence_mask: None or np.ndarray
A mask to specify which cadences to use. Cadences where True will not be used in the analysis.
background: bool
Whether to estimate the background flux, useful for TESS
spline: bool
Whether to use a B-Spline in time
Returns
-------
model : np.ndarray
Model of the TPF, with shape ncadences x npixels x npixels.
model_err: np.ndarray
If errors is true, returns model errors
transit_pixels : np.ndarray
If t_model is specified, the weight of the transit in each pixel.
Shape npixel x npixel
transit_pixels_err : np.ndarray
If t_model is specified, the error of the weight of the transit in each pixel.
Shape npixel x npixel
aper : bool
The aperture that contains the transit signal (npixels x npixels)
"""
with warnings.catch_warnings():
# I don't want to fix runtime warnings...
warnings.simplefilter("ignore", category=RuntimeWarning)
if cadence_mask is None:
cadence_mask = np.ones(len(tpf.time)).astype(bool)
SA = build_X(tpf, flux, t_model=t_model, cbvs=cbvs, spline=spline, background=background, spline_period=10)
prior_sigma = np.ones(SA.shape[1]) * 1e-2
prior_mu = np.zeros(SA.shape[1])
if t_model is not None:
prior_mu[-2] = 1
else:
prior_mu[-1] = 1
model = np.zeros(tpf.flux.shape)
if errors:
model_err = np.zeros(tpf.flux.shape)
if t_model is not None:
transit_pixels = np.zeros(tpf.flux.shape[1:]) * np.nan
transit_pixels_err = np.zeros(tpf.flux.shape[1:]) * np.nan
#SA[:, :-1][t_model !=0 ] *= 0
# Fix Saturation
saturated = np.max(np.nan_to_num(tpf.flux), axis=0) > 1.4e5
saturated |= np.abs(np.gradient(saturated.astype(float), axis=0)) != 0
pixels = tpf.flux.copy()
pixels_err = tpf.flux_err.copy()
for jdx, s in enumerate(saturated.T):
if any(s):
l = (np.where(s)[0][s.sum()//2])
pixels[:, s, jdx] = np.nan
pixels[:, l, jdx] = tpf.flux[:, s, jdx].sum(axis=(1))
pixels_err[:, l, jdx] = ((tpf.flux_err[:, s, jdx]**2).sum(axis=(1))**0.5)/s.sum()
for idx in (range(tpf.shape[1])):
for jdx in range(tpf.shape[2]):
f = pixels[:, idx, jdx]
if (f < 0).any():
continue
fe = pixels_err[:, idx, jdx]
fe /=
|
np.nanmean(f)
|
numpy.nanmean
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""redis_hetergraph"""
import pgl
import redis
from redis import BlockingConnectionPool, StrictRedis
from redis._compat import b, unicode, bytes, long, basestring
from rediscluster.nodemanager import NodeManager
from rediscluster.crc import crc16
from collections import OrderedDict
import threading
import numpy as np
import time
import json
import pgl.graph as pgraph
import pickle as pkl
from pgl.utils.logger import log
import pgl.graph_kernel as graph_kernel
from pgl import heter_graph
import pgl.redis_graph as rg
class RedisHeterGraph(rg.RedisGraph):
"""Redis Heterogeneous Graph"""
def __init__(self, name, edge_types, redis_config, num_parts):
super(RedisHeterGraph, self).__init__(name, redis_config, num_parts)
self._num_edges = {}
self.edge_types = edge_types
self.e_type = None
self._edge_feat_info = {}
self._edge_feat_dtype = {}
self._edge_feat_shape = {}
def num_edges_by_type(self, e_type):
"""get edge number by specified edge type"""
if e_type not in self._num_edges:
self._num_edges[e_type] = int(
self._rs.get("%s:num_edges" % e_type))
return self._num_edges[e_type]
def num_edges(self):
"""num_edges"""
num_edges = {}
for e_type in self.edge_types:
num_edges[e_type] = self.num_edges_by_type(e_type)
return num_edges
def edge_feat_info_by_type(self, e_type):
"""get edge features information by specified edge type"""
if e_type not in self._edge_feat_info:
buff = self._rs.get("%s:ef:infos" % e_type)
if buff is not None:
self._edge_feat_info[e_type] = json.loads(buff.decode())
else:
self._edge_feat_info[e_type] = []
return self._edge_feat_info[e_type]
def edge_feat_info(self):
"""edge_feat_info"""
edge_feat_info = {}
for e_type in self.edge_types:
edge_feat_info[e_type] = self.edge_feat_info_by_type(e_type)
return edge_feat_info
def edge_feat_shape(self, e_type, key):
"""edge_feat_shape"""
if e_type not in self._edge_feat_shape:
e_feat_shape = {}
for k, shape, _ in self.edge_feat_info()[e_type]:
e_feat_shape[k] = shape
self._edge_feat_shape[e_type] = e_feat_shape
return self._edge_feat_shape[e_type][key]
def edge_feat_dtype(self, e_type, key):
"""edge_feat_dtype"""
if e_type not in self._edge_feat_dtype:
e_feat_dtype = {}
for k, _, dtype in self.edge_feat_info()[e_type]:
e_feat_dtype[k] = dtype
self._edge_feat_dtype[e_type] = e_feat_dtype
return self._edge_feat_dtype[e_type][key]
def sample_predecessor(self, e_type, nodes, max_degree, return_eids=False):
"""sample predecessor with the specified edge type"""
query = ["%s:d:%s" % (e_type, n) for n in nodes]
rets = rg.hmget_sample_helper(self._rs, query, self.num_parts,
max_degree)
v = []
eid = []
for buff in rets:
if buff is None:
v.append(np.array([], dtype="int64"))
eid.append(np.array([], dtype="int64"))
else:
npret = np.frombuffer(
buff, dtype="int64").reshape([-1, 2]).astype("int64")
v.append(npret[:, 0])
eid.append(npret[:, 1])
if return_eids:
return np.array(v), np.array(eid)
else:
return np.array(v)
def sample_successor(self, e_type, nodes, max_degree, return_eids=False):
"""sample successor with the specified edge type"""
query = ["%s:s:%s" % (e_type, n) for n in nodes]
rets = rg.hmget_sample_helper(self._rs, query, self.num_parts,
max_degree)
v = []
eid = []
for buff in rets:
if buff is None:
v.append(np.array([], dtype="int64"))
eid.append(np.array([], dtype="int64"))
else:
npret = np.frombuffer(
buff, dtype="int64").reshape([-1, 2]).astype("int64")
v.append(npret[:, 0])
eid.append(npret[:, 1])
if return_eids:
return np.array(v), np.array(eid)
else:
return np.array(v)
def predecessor(self, e_type, nodes, return_eids=False):
"""predecessor with the specified edge type"""
query = ["%s:d:%s" % (e_type, n) for n in nodes]
ret = rg.hmget_helper(self._rs, query, self.num_parts)
v = []
eid = []
for buff in ret:
if buff is not None:
npret = np.frombuffer(
buff, dtype="int64").reshape([-1, 2]).astype("int64")
v.append(npret[:, 0])
eid.append(npret[:, 1])
else:
v.append(np.array([], dtype="int64"))
eid.append(
|
np.array([], dtype="int64")
|
numpy.array
|
"""
The main script that serves as the entry-point for all kinds of training experiments.
"""
import matplotlib.pyplot as plt
import numpy as np
import pytorch_lightning as pl
import torch
import torchmetrics
import tqdm
from das.data.data_args import DataArguments
from das.model_analyzer.analysis_tasks.base import AnalysisTask
from das.model_analyzer.analyzer_args import AnalysisTaskArguments, AnalyzerArguments
from das.model_analyzer.utils import annotate_heatmap, heatmap
from das.models.model_args import ModelArguments
from das.utils.basic_args import BasicArguments
from das.utils.basic_utils import create_logger
from das.utils.evaluation import evaluate_clustering
from das.utils.metrics import TrueLabelConfidence
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.manifold._t_sne import TSNE
# setup logging
logger = create_logger(__name__)
class SimilarImagesClusteringTask(AnalysisTask):
def __init__(
self,
basic_args: BasicArguments,
data_args: DataArguments,
model_args: ModelArguments,
analyzer_args: AnalyzerArguments,
analysis_task_args: AnalysisTaskArguments,
) -> None:
super().__init__(
"similar_images_clustering",
basic_args,
data_args,
model_args,
analyzer_args,
analysis_task_args,
)
def generate_features(self):
# get data collator required for the model
self.datamodule.collate_fns = self.model.get_data_collators(
self.data_args, None
)
# setup the model for feature extraction
features_layer = self.model.features_layer()
# set up features layer output hook
def layer_output_hook(outputs={}):
def hook(module, input, output):
outputs["result"] = output
return hook
feature_layer_output = {}
if features_layer is not None:
features_layer.register_forward_hook(
layer_output_hook(feature_layer_output)
)
# get predictions over the test set
logger.info("Generating features from model...")
features_list = []
target_labels = []
with torch.no_grad():
for batch in tqdm.tqdm(self.datamodule.test_dataloader()):
for kk, vv in batch.items():
batch[kk] = vv.cuda()
self.model(**batch)
features_list.append(
torch.flatten(feature_layer_output["result"], start_dim=1).cpu()
)
target_labels.append(batch["label"].cpu())
features_list = torch.cat(features_list).numpy()
target_labels = torch.cat(target_labels)
# setup pca if required
if self.analysis_task_args.dim_reduction_method == "pca":
pca = PCA(
**self.analysis_task_args.dim_reduction_args,
random_state=self.basic_args.seed,
)
pca.fit(features_list)
features_list = pca.transform(features_list)
elif self.analysis_task_args.dim_reduction_method == "tsne":
tsne = TSNE(**self.analysis_task_args.dim_reduction_args)
features_list = tsne.fit_transform(features_list)
return features_list, target_labels
def cluster_features(self, features_list):
kmeans = KMeans(
n_clusters=self.num_labels, n_jobs=-1, random_state=self.basic_args.seed
)
kmeans.fit(features_list)
pred_labels = kmeans.labels_
# holds the cluster id and the images { id: [images] }
clusters = {}
for idx, cluster in enumerate(kmeans.labels_):
if cluster not in clusters.keys():
clusters[cluster] = []
clusters[cluster].append(idx)
else:
clusters[cluster].append(idx)
return clusters, pred_labels
def generate_metrics(self, target_labels, pred_labels):
return evaluate_clustering(
target_labels,
torch.from_numpy(pred_labels).to(target_labels.device),
calc_acc=True,
)
def visualize_clusters(self, clusters):
dataset = self.datamodule.test_dataset
for k, v in clusters.items():
fig = plt.figure(figsize=(25, 25))
files = [dataset[idx]["image_file_path"] for idx in v]
labels = [dataset[idx]["label"] for idx in v]
if len(files) > 10:
files = files[:10]
for idx, file in enumerate(files):
print(labels[idx])
ax = fig.add_subplot(5, 5, idx + 1)
ax.set_xlabel(f"{labels[idx]}")
img = plt.imread(file)
plt.imshow(img)
plt.axis("off")
plt.show()
def visualize_tsne(self, features_list):
logger.info("Visualizing TSNE features...")
# scale and move the coordinates so they fit [0; 1] range
def scale_to_01_range(x):
# compute the distribution range
value_range =
|
np.max(x)
|
numpy.max
|
import numpy as np
import random as rn
#import datetime
#import pandas as pd
def calculate_crowding(scores):
# print('CALCULATE_CROWDING start: ',datetime.datetime.now().strftime("%H:%M:%S.%f"))
# Crowding is based on chrmosome scores (not chromosome binary values)
# All scores are normalised between low and high
# For any one score, all solutions are sorted in order low to high
# Crowding for chromsome x for that score is the difference between th enext highest and next lowest score
# Total crowding value sums all crowding for all scores
population_size=len(scores[:,0])
number_of_scores=len(scores[0,:])
# create crowding matrix of population (row) and score (column)
crowding_matrix=np.zeros((population_size,number_of_scores))
# normalise scores
normed_scores = (scores-scores.min(0))/scores.ptp(0) # numpy ptp is range (max-min)
# Calculate crowding
for col in range(number_of_scores): # calculate crowding distance for each score in turn
crowding=np.zeros(population_size) # One dimensional array
crowding[0]=1 # end points have maximum crowding
crowding[population_size-1]=1 # end points have maximum crowding
sorted_scores=np.sort(normed_scores[:,col]) # sort scores
sorted_scores_index=np.argsort(normed_scores[:,col]) # index of sorted scores
crowding[1:population_size-1]=sorted_scores[2:population_size]-sorted_scores[0:population_size-2] # crowding distance
re_sort_order=np.argsort(sorted_scores_index) # re-sort to original order step 1
sorted_crowding=crowding[re_sort_order] # re-sort to orginal order step 2
crowding_matrix[:,col]=sorted_crowding # record crowding distances
crowding_distances=np.sum(crowding_matrix,axis=1) # Sum croding distances of all scores
return crowding_distances
# print('CALCULATE_CROWDING stop: ',datetime.datetime.now().strftime("%H:%M:%S"))
def crowding_selection(population,scores,number_to_select):
# This function selects a number of solutions based on tournament of crowding distances
# Two members of the population ar epicked at random
# The one with the higher croding dostance is always picked
crowding_distances=calculate_crowding(scores) # crowding distances for each member of the population
picked_population=np.zeros((number_to_select,len(population[0,:]))) # array of picked solutions (actual solution not ID)
picked_scores=np.zeros((number_to_select,len(scores[0,:]))) # array of scores for picked solutions
for i in range(number_to_select):
population_size=len(population[:,0])
fighter1ID=rn.randint(0,population_size-1) # 1st random ID
fighter2ID=rn.randint(0,population_size-1) # 2nd random ID
if crowding_distances[fighter1ID]>=crowding_distances[fighter2ID]: # 1st solution picked
picked_population[i,:]=population[fighter1ID,:] # add solution to picked solutions array
picked_scores[i,:]=scores[fighter1ID,:] # add score to picked solutions array
# remove selected solution from available solutions
population=np.delete(population,(fighter1ID), axis=0) # remove picked solution - cannot be chosen again
scores=np.delete(scores,(fighter1ID), axis=0) # remove picked score (as line above)
crowding_distances=np.delete(crowding_distances,(fighter1ID), axis=0) # remove crowdong score (as line above)
else: # solution 2 is better. Code as above for 1st solution winning
picked_population[i,:]=population[fighter2ID,:]
picked_scores[i,:]=scores[fighter2ID,:]
population=np.delete(population,(fighter2ID), axis=0)
scores=np.delete(scores,(fighter2ID), axis=0)
crowding_distances=np.delete(crowding_distances,(fighter2ID), axis=0)
return (picked_population,picked_scores)
def generate_random_population(rows,cols):
population=np.zeros((rows,cols)) # create array of zeros
for i in range(rows):
x=rn.randint(1,cols) # Number of 1s to add
population[i,0:x]=1 # Add requires 1s
np.random.shuffle(population[i]) # Shuffle the 1s randomly
return population
def pareto(scores):
# In this method the array 'scores' is passed to the function.
# Scores have been normalised so that higher values dominate lower values.
# The function returns a Boolean array identifying which rows of the array 'scores' are non-dominated (the Pareto front)
# Method based on assuming everything starts on Pareto front and then records dominated points
pop_size=len(scores[:,0])
pareto_front=np.ones(pop_size,dtype=bool)
for i in range(pop_size):
for j in range(pop_size):
if all (scores[j]>=scores[i]) and any (scores[j]>scores[i]):
# j dominates i
pareto_front[i]=0
break
return pareto_front
def normalise_score(score_matrix,norm_matrix):
# normalise 'score matrix' with reference to 'norm matrix' which gives scores that produce zero or one
norm_score=np.zeros(np.shape(score_matrix)) # create normlaises score matrix with same dimensions as original scores
number_of_scores=len(score_matrix[0,:]) # number of different scores
for col in range(number_of_scores): # normaise for each score in turn
score_zero=norm_matrix[col,0]
score_one=norm_matrix[col,1]
score_range=score_one-score_zero
norm_score[:,col]=(score_matrix[:,col]-score_zero)/score_range
return norm_score
def score(population,TARGET_THROMBOLYSIS_ADMISSIONS,FULL_TRAVEL_MATRIX,NODE_ADMISSIONS,TOTAL_ADMISSIONS,pareto_include,CALC_ALL,nscore_parameters,FULL_HOSPITAL_STATUS,ALLOWABLE_THROMBOLYSIS_DELAY,ALLOWABLE_THROMBECTOMY_DELAY,TRAVEL_MATRIX_BETWEEN_ALL_HOSPITALS_AND_CSC,TARGET_THROMBOLYSIS_TIME,TARGET_THROMBECTOMY_TIME,TARGET_THROMBECTOMY_ADMISSIONS,proportion_eligible_thrombectomy,THROMBOLYSIS_TARGET_DISTANCE_1,THROMBOLYSIS_TARGET_DISTANCE_2,THROMBOLYSIS_TARGET_DISTANCE_3,THROMBECTOMY_TARGET_DISTANCE_1,THROMBECTOMY_TARGET_DISTANCE_2,THROMBECTOMY_TARGET_DISTANCE_3,DOOR_TO_NEEDLE,INHOSPITAL_TRANSFER,DOOR_TO_GROINdir):
# print('SCORE start: ',datetime.datetime.now().strftime("%H:%M:%S.%f"))
#Only calculate the score that is needed by the pareto front, as determined by the array: pareto_include
#Unless CALC_ALL=True (set for the last generation) as then print out all the parameter values
CALC_ALL=True # MA reporting all
# Score_matrix:
# 0: Number of hospitals
# 1: Average distance
# 2: Maximum distance
# 3: Minimise Maximum thrombolysis admissions to any one hopsital
# 4: Maximise Minimum thrombolysis admissions to any one hopsital
# 5: Max/Min Admissions ratio
# 6: Proportion thrombolysis patients within target distance 1 (thrombolysis)
# 7: Proportion thrombolysis patients within target distance 2 (thrombolysis)
# 8: Proportion thrombolysis patients within target distance 3 (thrombolysis)
# 9: Proportion thrombolysis patients attending unit with target admission numbers
# 10: Proportion of thrombolysis patients meeting distance 1 and admissions target
# 11: Proportion of thrombolysis patients meeting distance 2 and admissions target
# 12: Proportion of thrombolysis patients meeting distance 3 and admissions target
# 13: Clinical benefit if thrombolysis (fixed door to needle = mins + fixed onset to travelling in ambulance time = mins + travel time which is model dependant). Additional benefit per 100 treatable patients
# 14: Proportion of patients receiving thrombolysis within target time
# 15: Proportion of patients receiving thrombectomy within target time
# 16: Proportion patients within target distance 1 (thrombectomy: include the direct travel and if necessary the transfer travel to get to CSC which includes the 60 mins that's been included in the inter travel matrix)
# 17: Proportion patients within target distance 2 (thrombectomy: include the direct travel and if necessary the transfer travel to get to CSC which includes the 60 mins that's been included in the inter travel matrix)
# 18: Proportion patients within target distance 3 (thrombectomy: include the direct travel and if necessary the transfer travel to get to CSC which includes the 60 mins that's been included in the inter travel matrix)
# 19: Proportion of CSC that have > TARGET THROMBECTOMY ADMISSIONS
# 20: Minimise the Maximum thrmbectomy admissions to any one hopsital
# 21: Maximise the Minimum thrombectomy admissions to any one hopsital
# 22: Acerage time to thrombectomy
pop_size=len(population[:,0]) # Count number of solutions to evaluate
#INITIALISE THE ARRAYS FOR STORING RESULTS
score_matrix=np.zeros((pop_size,nscore_parameters)) # Create an empty score matrix
thrombolysis_admissions_matrix=np.zeros((pop_size,len(FULL_TRAVEL_MATRIX[0,:])))#store the hospital admissions, col = hospital, row = population
thrombectomy_admissions_matrix=np.zeros((pop_size,len(FULL_TRAVEL_MATRIX[0,:])))#Where patients go for their thrombectomy. Store the hospital admissions, col = hospital, row = population
transferred_admissions_matrix=np.zeros((pop_size,len(FULL_TRAVEL_MATRIX[0,:])))#Where patients go for their thrombectomy. Store the hospital admissions, col = hospital, row = population
total_patients_matrix=np.zeros((pop_size,len(FULL_TRAVEL_MATRIX[0,:])))#Where patients go for their thrombectomy. Store the hospital admissions, col = hospital, row = population
for i in range(pop_size): # Loop through population of solutions
# print('POPULATION LOOP: ',i, " ",datetime.datetime.now().strftime("%H:%M:%S.%f"))
node_results=np.zeros((len(NODE_ADMISSIONS),16))
# Node results stores results by patient node. These are used in the calculation of results
# Node results may be of use to export at later date (e.g. for detailed analysis of one scenario)
# Col 0: Distance to closest hospital
# Col 1: Patients within target distance 1 (boolean)
# Col 2: Patients within target distance 2 (boolean)
# Col 3: Patients within target distance 3 (boolean)
# Col 4: Hospital ID
# Col 5: Number of admissions to hospital ID
# Col 6: Does hospital meet admissions target (boolean)
# Col 7: Admissions and target distance 1 both met (boolean)
# Col 8: Admissions and target distance 2 both met (boolean)
# Col 9: Admissions and target distance 3 both met (boolean)
# Col 10: Time to thrombolysis treatment
# Col 11: Time to thrombectomy treatment
# Col 12: Number of thrombectomy admissions to hospital ID
# Col 13: Travel time to CSC (so either just direct ot CSC, or the direct ot HASU + transfer travel)
# Node choice stores by patient node the nearest HASU and CSC and other travel values related to these nearest locations.
# Then use the user defined values to decide where that patient goes
# Col 0: Distance to closest solution hospital
# Col 1: Distance to closest solution CSC
# Col 2: ID of closest solution hospital
# Col 3: ID of closest solution CSC
# Col 4: Distance from HASU to closest CSC from closest solution hosptial
# Col 5: ID for the nearest solution CSC from the closest solution hospital
#KP REMOVE THIS LINES ONCE GET IT WORKING.... THIS IS JUST A KNOWN CASE TO CHECK IT WORK
# population[i] = np.array([0,1,1,0,1,1,0,1,1,0,1,1,0,1])
# Count hospitals in each solution
if 0 in pareto_include or CALC_ALL:
score_matrix[i,0]=np.sum(population[i])
#SET UP THE MASKS AND THEN MASK THE INPUT DATA
# Initiate matrix of number of admissions to each hospital (first hospital), and to each CSC
thrombolysis_admissions=np.zeros(np.int(score_matrix[i,0]))
thrombectomy_admissions=np.zeros(np.int(score_matrix[i,0]))
transferred_admissions=np.zeros(np.int(score_matrix[i,0]))
# print('A1.',datetime.datetime.now().strftime("%H:%M:%S.%f"))
# Create the mask to extract from the whole dataset just those hospitals open in the solution
mask_hospitals_in_solution=np.array(population[i],dtype=bool)#For each hospital in the full list: T/F for those open in the solution
# hospital_list=np.where(mask) # list of hospitals in selection. Not currently used
# Use this mask to include only the hospital open in the travel data matrix (from patient to hospitals)
masked_TRAVEL_MATRIX=FULL_TRAVEL_MATRIX[:,mask_hospitals_in_solution]
masked_HOSPITAL_STATUS=FULL_HOSPITAL_STATUS[:,mask_hospitals_in_solution]
#COLUMNS ARE ONLY EVER THE FIXED CSC, THEN FOR EACH SOLUTION THE ROWS ARE FILTERED DEPENDING ON THE HOSPITALS OPEN IN THE SOLUTION
masked_TRAVEL_MATRIX_BETWEEN_SOLUTION_HOSPITALS_AND_CSC=TRAVEL_MATRIX_BETWEEN_ALL_HOSPITALS_AND_CSC[mask_hospitals_in_solution,:]
#Get CSC id from hospital solution set, from the CSC set
mask_csc_from_solution_hospitals=masked_HOSPITAL_STATUS==2#For the number of hospitals in the solution, a T/F for if they are a CSC
mask_csc_from_solution_hospitals=np.array(mask_csc_from_solution_hospitals).reshape(-1,)
#want an array containing the ID's for the hospitals open in the solution that are CSCs. So if fromt he 127 full hospital list, a solution has 90 hosptials open and 40 CSCs then the array will have 40 items for ID between 0-90 that are the CSCs
#pull out the locations for all the "Trues" from the array "mask_csc_from_solution_hosptials"
csc_id_from_solution_hospitals = np.asarray(np.where(mask_csc_from_solution_hospitals==True))
#Calculate the neareset CSC for each hospital once and use it in the code below
# print('A2.',datetime.datetime.now().strftime("%H:%M:%S.%f"))
TIME_csc_nearest_to_HASU=np.empty(0)
ID_csc_nearest_to_HASU=np.empty(0)
for n in range(len(masked_TRAVEL_MATRIX_BETWEEN_SOLUTION_HOSPITALS_AND_CSC)):
TIME_csc_nearest_to_HASU=np.append(TIME_csc_nearest_to_HASU,np.amin(masked_TRAVEL_MATRIX_BETWEEN_SOLUTION_HOSPITALS_AND_CSC[n,:]))
ID_csc_nearest_to_HASU=np.append(ID_csc_nearest_to_HASU,csc_id_from_solution_hospitals[0,np.argmin(masked_TRAVEL_MATRIX_BETWEEN_SOLUTION_HOSPITALS_AND_CSC[n,:])]) #store the CSC ID (from the hospital solution list) that has the shortest distance from the first hosptial
# print('A3.',datetime.datetime.now().strftime("%H:%M:%S.%f"))
#mask the travel matrix so patients only choose their nearest CSC
masked_CSC_TRAVEL_MATRIX=masked_TRAVEL_MATRIX[:,mask_csc_from_solution_hospitals]
#Count number of CSC in SOLUTION
#https://stackoverflow.com/questions/28663856/how-to-count-the-occurrence-of-certain-item-in-an-ndarray-in-python
CSC_COUNT=(masked_HOSPITAL_STATUS == 2).sum()
#THIS SECTION DETERMINES WHERE TO GO FOR THROMBOLYSIS (BASED ON MODEL 1, 2, 3 or 4)
# Depending on the model use (1,2,3 or 4) calculate which is each patients first hosptial, and their distance.
#Important vairalbes returned are:
#1) first_hospital_masked_ID (this is the ID for the number of hospitals open in the solution)
#2) node_results[:,0] (the distance to the first_hosptials_masked_ID for each patient)
if ALLOWABLE_THROMBECTOMY_DELAY>0:
#MODEL4: GO TO HASU FIRST IF DOING SO DOESN'T DELAY GETTING TO CSC BY x MINUTES
# print('MODEL4 START',datetime.datetime.now().strftime("%H:%M:%S.%f"))
node_choice=np.zeros((len(NODE_ADMISSIONS),10))
#FIND CLOSEST HOSPITAL & CSC FROM THOSE OPEN IN THE SOLUTION
node_choice[:,0]=np.amin(masked_TRAVEL_MATRIX,axis=1) # distance to closest solution hospital
node_choice[:,1]=np.amin(masked_CSC_TRAVEL_MATRIX,axis=1) # distance to closest solution CSC
node_choice[:,2]=np.argmin(masked_TRAVEL_MATRIX,axis=1)# ID for the nearest solution hospital
node_choice[:,3]=csc_id_from_solution_hospitals[0,np.argmin(masked_CSC_TRAVEL_MATRIX,axis=1)] #store the CSC ID (from the hospital solution list) that has the shortest distance from the first hosptial
#'KP CAN CALCULATE THE NEAREST CSC FROM EACH HASU ONCE AND LOOK IT UP
node_choice[:,4]=TIME_csc_nearest_to_HASU[np.int_(node_choice[:,2])]
node_choice[:,5]=ID_csc_nearest_to_HASU[np.int_(node_choice[:,2])]
# print('B',datetime.datetime.now().strftime("%H:%M:%S.%f"))
delayed_thrombectomy=(node_choice[:,0]+node_choice[:,4])-node_choice[:,1]
mask_go_to_CSC=delayed_thrombectomy>ALLOWABLE_THROMBECTOMY_DELAY
node_results[:,0]=np.invert(mask_go_to_CSC)*node_choice[:,0]+mask_go_to_CSC*node_choice[:,1]
first_hospital_masked_ID=np.invert(mask_go_to_CSC)*node_choice[:,2]+mask_go_to_CSC*node_choice[:,3]
# print('C',datetime.datetime.now().strftime("%H:%M:%S.%f"))
elif ALLOWABLE_THROMBOLYSIS_DELAY>0:
#MODEL3: CSC has larger catchment
#Take the min distance for the CSC, and compare with the min distance of the other hosptials whilst taking the allowance into consideration
node_choice=np.zeros((len(NODE_ADMISSIONS),10))
#FIND CLOSEST HOSPITAL FROM ALL IN SOLUTION
node_choice[:,0]=np.amin(masked_TRAVEL_MATRIX,axis=1) # distance to closest solution hospital
node_choice[:,1]=np.amin(masked_CSC_TRAVEL_MATRIX,axis=1) # distance to closest solution CSC
node_choice[:,2]=np.argmin(masked_TRAVEL_MATRIX,axis=1)# ID for the nearest solution hospital
node_choice[:,3]=csc_id_from_solution_hospitals[0,np.argmin(masked_CSC_TRAVEL_MATRIX,axis=1)] #store the CSC ID (from the hospital solution list) that has the shortest distance from the first hosptial
#Binary mask for each patient if they go to a CSC first, or a non CSC first
mask_go_to_CSC=(node_choice[:,1]-ALLOWABLE_THROMBOLYSIS_DELAY)<node_choice[:,0]
#Use Binary mask to take the distance travel to first destination
node_results[:,0]=np.invert(mask_go_to_CSC)*node_choice[:,0]+mask_go_to_CSC*node_choice[:,1]
#Use Binary mask to take the ID to first destination
first_hospital_masked_ID=np.invert(mask_go_to_CSC)*node_choice[:,2]+mask_go_to_CSC*node_choice[:,3]
elif ALLOWABLE_THROMBOLYSIS_DELAY<=0:
#MODEL 1 (nearest) OR MODEL2 (go to CSC if within set time). Model 2 needs information from Model 1 first
#MODEL 1
#Distance to closest hospital [MODDEL1].
# print('MODEL 1&2 START',datetime.datetime.now().strftime("%H:%M:%S.%f"))
first_hospital_masked_ID=np.argmin(masked_TRAVEL_MATRIX,axis=1) # ID to first hospital
# print('D12',datetime.datetime.now().strftime("%H:%M:%S.%f"))
node_results[:,0]=np.amin(masked_TRAVEL_MATRIX,axis=1) # distance to first hospital (includes any adjustments for the CSC in model 3 - to be rectified in next step)
if ALLOWABLE_THROMBOLYSIS_DELAY<0:
#MODEL 2 (go to CSC if within set time)
# if ALLOWABLE_THROMBOLYSIS_DELAY<0:#GO TO CSC IF WITHIN x MINUTES, REGARDLESS OF ANOTHER CLOSER CENTRE. ELSE CHOOSE CLOSEST
# print('MODEL 2',datetime.datetime.now().strftime("%H:%M:%S.%f"))
csc_nearest_distance=np.amin(masked_CSC_TRAVEL_MATRIX,axis=1) # distance to closest solution CSC
csc_nearest_ID=csc_id_from_solution_hospitals[0,np.argmin(masked_CSC_TRAVEL_MATRIX,axis=1)] #store the CSC ID (from the hospital solution list) that has the shortest distance from the first hosptial
# "mask_go_to_CSC" in this part identifies patients that go to CSC because < n minutes, and not necessarily due to being closest. The next use of this array stores whether go to CSC or not, as more patients may go to a CSC as it's the closest even though it's > n minutes
mask_go_to_CSC=csc_nearest_distance<=-ALLOWABLE_THROMBOLYSIS_DELAY
# Take the distance from the CSC if go there as < fixed distance... else take it from the min of the full hospital options
node_results[:,0]=np.invert(mask_go_to_CSC)*node_results[:,0]+mask_go_to_CSC*csc_nearest_distance
# print('F',datetime.datetime.now().strftime("%H:%M:%S.%f"))
#Overwrite the "first_hospital_masked_ID" for the CSC ID when that is <45mins
first_hospital_masked_ID=np.invert(mask_go_to_CSC)*first_hospital_masked_ID+mask_go_to_CSC* csc_nearest_ID#node_choice[:,3]
#FOR ALL MODELS, NOW HAVE THE FIRST LOCATION THE PATIENTS ATTEND, SO NOW ALL GETTING THROMBOLYSIS. CHECK HOW MANY SATISFY THE TARGET DISTANCES:
# print('H',datetime.datetime.now().strftime("%H:%M:%S.%f"))
node_results[:,1]=node_results[:,0]<=THROMBOLYSIS_TARGET_DISTANCE_1 # =1 if target distance 1 met
node_results[:,2]=node_results[:,0]<=THROMBOLYSIS_TARGET_DISTANCE_2 # =1 if target distance 2 met
node_results[:,3]=node_results[:,0]<=THROMBOLYSIS_TARGET_DISTANCE_3 # =1 if target distance 3 met
# print('I',datetime.datetime.now().strftime("%H:%M:%S.%f"))
#THROMBECTOMY #THROMBECTOMY #THROMBECTOMY #THROMBECTOMY
#THIS SECTION DETERMINES WHERE TO GO FOR THROMBECTOMY. EITHER ALREADY AT A CSC, OR AT A HASU AND NEED A TRANSFER TO NEAREST CSC FROM HASU
first_hospital_masked_ID=first_hospital_masked_ID.astype(int)
#https://stackoverflow.com/questions/19676538/numpy-array-assignment-with-copy
#https://stackoverflow.com/questions/6431973/how-to-copy-data-from-a-numpy-array-to-another?rq=1
#Initialise arrays
csc_attended_ID = np.empty_like (first_hospital_masked_ID)
np.copyto(csc_attended_ID,first_hospital_masked_ID)
transfer_travel=np.empty(0)
thrombolysis_to_thrombectomy_time=np.empty(0)
csc_attended_ID = np.empty_like (first_hospital_masked_ID)
thrombectomy_admissions_transferred = np.empty_like (NODE_ADMISSIONS)#Initialise
#Use NUMPY FANCY INDEXING to populate the CSC and TIME using the first_hospital_masked_ID array (which holds the indices want for each patient)
transfer_travel=TIME_csc_nearest_to_HASU[first_hospital_masked_ID]
csc_attended_ID= ID_csc_nearest_to_HASU[first_hospital_masked_ID]
#Which patients are needing a transfer for thrombectomy? i.e which have a different 1st and 2nd hospital (who is not at a CSC for hteir first hosptial)
mask_transfer_patient=csc_attended_ID!=first_hospital_masked_ID#Boolean storing whether each patient is moving (=true, has a different first and second hospital location) or not moving (= false, has same first ans second hospital)
#For the patients that are needing a transfer, store the number of admissions (usually 10% of stroke admissions)
thrombectomy_admissions_transferred=mask_transfer_patient*(NODE_ADMISSIONS*proportion_eligible_thrombectomy)#Populate with admissions if that patient location has a different first and second hospital location
#Record thrombectomy admissions transfering to CSC
transferred_admissions=np.bincount(np.int_(csc_attended_ID),weights=thrombectomy_admissions_transferred) # np.bincount with weights sums
#Record thrombectomy admissions leaving HASU
transferred_admissions=transferred_admissions-np.bincount(np.int_(first_hospital_masked_ID),weights=thrombectomy_admissions_transferred) # np.bincount with weights sums
# print('N',datetime.datetime.now().strftime("%H:%M:%S.%f"))
thrombolysis_to_thrombectomy_time=transfer_travel+INHOSPITAL_TRANSFER+DOOR_TO_GROINdir
# print('O',datetime.datetime.now().strftime("%H:%M:%S.%f"))
node_results[:,4]=first_hospital_masked_ID # stores hospital ID in case table needs to be exported later, but bincount below doesn't work when stored in NumPy array (which defaults to floating decimal)
# Create matrix of number of admissions to each hospital (first hospital)
thrombolysis_admissions=np.bincount(np.int_(first_hospital_masked_ID),weights=NODE_ADMISSIONS) # np.bincount with weights sums
thrombolysis_admissions_matrix[i,mask_hospitals_in_solution]=thrombolysis_admissions#putting the hospital admissions into a matrix with column per hospital, row per solution. Used to output to sheet
# print('Q',datetime.datetime.now().strftime("%H:%M:%S.%f"))
# Create matrix of number of admissions to each hospital (CSC)
#csc_attended_ID use the same ID allocated to each hosptial as for the solution hospitals, and so not start from 0 for the first (i/e could just be ID 5 as a CSC) so use the "mask" to put the values to the matrix, and not "mask_full_CSC"
thrombectomy_admissions[0:np.int_((np.amax(csc_attended_ID)+1))]=np.bincount(np.int_(csc_attended_ID),weights=NODE_ADMISSIONS)*proportion_eligible_thrombectomy # np.bincount with weights sums
# print('R',datetime.datetime.now().strftime("%H:%M:%S.%f"))
thrombectomy_admissions_matrix[i,mask_hospitals_in_solution]=thrombectomy_admissions#putting the hospital admissions into a matrix with column per hospital, row per solution. Used to output to sheet
# print('S',datetime.datetime.now().strftime("%H:%M:%S.%f"))
transferred_admissions_matrix[i,mask_hospitals_in_solution]=transferred_admissions#putting the hospital admissions into a matrix with column per hospital, row per solution. Used to output to sheet
# print('T',datetime.datetime.now().strftime("%H:%M:%S.%f"))
# record closest hospital (unused)
node_results[:,5]=np.take(thrombolysis_admissions,np.int_(first_hospital_masked_ID)) # Lookup admissions to the thrombectomy hospital patient attends
node_results[:,12]=np.take(thrombectomy_admissions,np.int_(csc_attended_ID)) # Lookup admissions to the thrombectomy hospital patient attends
node_results[:,6]=node_results[:,5]>TARGET_THROMBOLYSIS_ADMISSIONS # =1 if admissions target met
node_results[:,13]=node_results[:,0]+transfer_travel
# Calculate average distance by multiplying node distance * admission numbers and divide by total admissions
if 1 in pareto_include or CALC_ALL:
weighted_distances=np.multiply(node_results[:,0],NODE_ADMISSIONS)
average_distance=np.sum(weighted_distances)/TOTAL_ADMISSIONS
score_matrix[i,1]=average_distance
# Max distance for any one patient
if 2 in pareto_include or CALC_ALL:
score_matrix[i,2]=np.max(node_results[:,0])
# Max, min and max/min number of thrombolysis admissions to each hospital
if 3 in pareto_include or CALC_ALL:
score_matrix[i,3]=np.max(thrombolysis_admissions)
if 4 in pareto_include or CALC_ALL:
score_matrix[i,4]=np.min(thrombolysis_admissions)
if 5 in pareto_include or CALC_ALL:
if score_matrix[i,4]>0:
score_matrix[i,5]=score_matrix[i,3]/score_matrix[i,4]
else:
score_matrix[i,5]=0
# Calculate proportion patients within target distance/time
if 6 in pareto_include or CALC_ALL:
score_matrix[i,6]=np.sum(NODE_ADMISSIONS[node_results[:,0]<=THROMBOLYSIS_TARGET_DISTANCE_1])/TOTAL_ADMISSIONS
if 7 in pareto_include or CALC_ALL:
score_matrix[i,7]=np.sum(NODE_ADMISSIONS[node_results[:,0]<=THROMBOLYSIS_TARGET_DISTANCE_2])/TOTAL_ADMISSIONS
if 8 in pareto_include or CALC_ALL:
score_matrix[i,8]=np.sum(NODE_ADMISSIONS[node_results[:,0]<=THROMBOLYSIS_TARGET_DISTANCE_3])/TOTAL_ADMISSIONS
# Calculate proportion patients attending hospital with target admissions
if 9 in pareto_include or CALC_ALL:
score_matrix[i,9]=np.sum(thrombolysis_admissions[thrombolysis_admissions>=TARGET_THROMBOLYSIS_ADMISSIONS])/TOTAL_ADMISSIONS
if 10 in pareto_include or CALC_ALL:
# Sum patients who meet distance taregts
node_results[:,7]=(node_results[:,1]+node_results[:,6])==2 # true if admissions and target distance 1 both met
sum_patients_addmissions_distance1_met=np.sum(NODE_ADMISSIONS[node_results[:,7]==1])
score_matrix[i,10]=sum_patients_addmissions_distance1_met/TOTAL_ADMISSIONS
if 11 in pareto_include or CALC_ALL:
# Sum patients who meet distance taregts
node_results[:,8]=(node_results[:,2]+node_results[:,6])==2 # true if admissions and target distance 2 both met
sum_patients_addmissions_distance2_met=np.sum(NODE_ADMISSIONS[node_results[:,8]==1])
score_matrix[i,11]=sum_patients_addmissions_distance2_met/TOTAL_ADMISSIONS
if 12 in pareto_include or CALC_ALL:
# Sum patients who meet distance taregts
node_results[:,9]=(node_results[:,3]+node_results[:,6])==2 # true if admissions and target distance 3 both met
sum_patients_addmissions_distance3_met=np.sum(NODE_ADMISSIONS[node_results[:,9]==1])
score_matrix[i,12]=sum_patients_addmissions_distance3_met/TOTAL_ADMISSIONS
if 13 in pareto_include or CALC_ALL:
# Clinical benefit
score_matrix[i,13]=0
if 14 in pareto_include or CALC_ALL:
#Time to thrombolysis
# print('U',datetime.datetime.now().strftime("%H:%M:%S.%f"))
node_results[:,10]=node_results[:,0]+DOOR_TO_NEEDLE
score_matrix[i,14]=np.sum(NODE_ADMISSIONS[node_results[:,10]<=TARGET_THROMBOLYSIS_TIME])/TOTAL_ADMISSIONS
if 15 in pareto_include or CALC_ALL:
#Time to thrombectomy = Journey to location1 + thrombolysis at location1 + transfer_time(if going to different location) + door_to_groin (different if in CSC for throbolysis, or transferred in)
# print('V',datetime.datetime.now().strftime("%H:%M:%S.%f"))
node_results[:,11]=node_results[:,10] + thrombolysis_to_thrombectomy_time
score_matrix[i,15]=np.sum(NODE_ADMISSIONS[node_results[:,11]<=TARGET_THROMBECTOMY_TIME])/TOTAL_ADMISSIONS
# Calculate proportion patients travel within target distance/time to get to their CSC location (for thrombectomy)
if 16 in pareto_include or CALC_ALL:
# print('W',datetime.datetime.now().strftime("%H:%M:%S.%f"))
score_matrix[i,16]=np.sum(NODE_ADMISSIONS[node_results[:,13]<=THROMBECTOMY_TARGET_DISTANCE_1])/TOTAL_ADMISSIONS
if 17 in pareto_include or CALC_ALL:
# print('X',datetime.datetime.now().strftime("%H:%M:%S.%f"))
score_matrix[i,17]=np.sum(NODE_ADMISSIONS[node_results[:,13]<=THROMBECTOMY_TARGET_DISTANCE_2])/TOTAL_ADMISSIONS
if 18 in pareto_include or CALC_ALL:
# print('Y',datetime.datetime.now().strftime("%H:%M:%S.%f"))
score_matrix[i,18]=np.sum(NODE_ADMISSIONS[node_results[:,13]<=THROMBECTOMY_TARGET_DISTANCE_3])/TOTAL_ADMISSIONS
#Proportion of CSC that have > TARGET THROMBECTOMY ADMISSIONS
if 19 in pareto_include or CALC_ALL:
# print('Z',datetime.datetime.now().strftime("%H:%M:%S.%f"))
score_matrix[i,19]=np.sum(thrombectomy_admissions[:]>=TARGET_THROMBECTOMY_ADMISSIONS)/CSC_COUNT
# Max and min of thrombectomy admissions to each hospital
if 20 in pareto_include or CALC_ALL:
score_matrix[i,20]=np.max(thrombectomy_admissions[mask_csc_from_solution_hospitals])
if 21 in pareto_include or CALC_ALL:
score_matrix[i,21]=np.min(thrombectomy_admissions[mask_csc_from_solution_hospitals])
# average time to thrombectomy (including door-in-door-out delay)
if 22 in pareto_include or CALC_ALL:
weighted_distances=np.multiply(node_results[:,13],NODE_ADMISSIONS)
average_distance=np.sum(weighted_distances)/TOTAL_ADMISSIONS
score_matrix[i,22]=average_distance
#output this table for each hospital:
#Hospital/Direct admissions (thrombolysis)/Transferred in admissions (for thrombectomy)/Thrombectomy admissions/Total patients
#Calculate clinical benefit: Emberson and Lee
#Use 115 mins for the onset til travelling in ambulance (30 mins onset to call + 40 mins call to travel + 45 mins door to needle) + ? travel time (as deterined by the combination of hospital open)
# if 13 in pareto_include or CALC_ALL:
# onset_to_treatment_time = distancekm_to_timemin(node_results[:,0])+115
# #constant to be used in the equation
# factor=(0.2948/(1 - 0.2948))
# #Calculate the adjusted odds ratio
# clinical_benefit=np.array(factor*np.power(10, (0.326956 + (-0.00086211 * onset_to_treatment_time))))
# # Patients that exceed the licensed onset to treatment time, set to a zero clinical benefit
# clinical_benefit[onset_to_treatment_time>270]=0
# #Probabilty of good outcome per node
# clinical_benefit = (clinical_benefit / (1 + clinical_benefit)) - 0.2948
# #Number of patients with a good outcome per node
# clinical_benefit = clinical_benefit*NODE_ADMISSIONS
# score_matrix[i,13]=np.sum(clinical_benefit)/TOTAL_ADMISSIONS *100
#hospital_admissions_matrix[i,:]=np.transpose(hospital_admissions)#putting the column into a row in the matrix
#np.savetxt('output/admissions_test.csv',hospital_admissions_matrix[i,:],delimiter=',',newline='\n')
total_patients_matrix=thrombolysis_admissions_matrix+transferred_admissions_matrix#putting the hospital admissions into a matrix with column per hospital, row per solution. Used to output to sheet
return (score_matrix,thrombolysis_admissions_matrix,transferred_admissions_matrix,thrombectomy_admissions_matrix,total_patients_matrix)
def unique_rows(a): # stolen off the interwebs
a = np.ascontiguousarray(a)
unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
def fix_hospital_status(l_population,l_HOSPITAL_STATUS):
#Takes the 5th column from the hospital.csv file and if "1" then open, "-1" then closed
HOSPITAL_STATUS_POPULATION=np.repeat(l_HOSPITAL_STATUS,len(l_population[:,0]),axis=0)#repeat the row "len(child_population[:,0])" number of times, so have 1 per solution row (matching the size of the child_population matrix)
l_population[HOSPITAL_STATUS_POPULATION==1]=1 # Fixes the open hospitals to have a value 1
l_population[HOSPITAL_STATUS_POPULATION==2]=1 # Fixes the open hospitals to have a value 1
l_population[HOSPITAL_STATUS_POPULATION==-1]=0 # Fixes the closed hospitals to have a value 0
return l_population
def f_location_crossover(l_parent, l_MAXCROSSOVERPOINTS,l_CHROMOSOMELENGTH):
number_crossover_points=rn.randint(1,l_MAXCROSSOVERPOINTS) # random, up to max
crossover_points=rn.sample(range(1,l_CHROMOSOMELENGTH), number_crossover_points) # pick random crossover points in gene, avoid first position (zero position)
crossover_points=np.append([0],np.sort(crossover_points)) # zero appended at front for calucation of interval to first crossover
intervals=crossover_points[1:]-crossover_points[:-1] # this gives array of number of ones, zeros etc in each section.
intervals=np.append([intervals],[l_CHROMOSOMELENGTH-np.amax(crossover_points)]) # adds in last interval of last cross-over to end of gene
# Build boolean arrays for cross-overs
current_bool=True # sub sections will be made up of repeats of boolean true or false, start with true
# empty list required for append
selection1=[]
for interval in intervals: # interval is the interval between crossoevrs (stored in 'intervals')
new_section=np.repeat(current_bool,interval) # create subsection of true or false
current_bool=not current_bool # swap true to false and vice versa
selection1=np.append([selection1],[new_section]) # add the new section to the existing array
selection1=np.array([selection1],dtype=bool) # **** not sure why this is needed but selection1 seems to have lost boolean nature
selection2=np.invert(selection1) # invert boolean selection for second cross-over product
crossover1=np.choose(selection1,l_parent) # choose from parents based on selection vector
crossover2=np.choose(selection2,l_parent)
children=np.append(crossover1,crossover2,axis=0)
return(children)
def f_uniform_crossover(l_parent, l_CHROMOSOMELENGTH):
#UNIFORM CROSSOVER MEANS EACH GENE HAS EQUAL CHANCE TO COME FROM EACH PARENT
fromparent1=np.random.random_integers(0,1,(1,l_CHROMOSOMELENGTH)) # create array of 1 rows and chromosome columns and fill with 0 or 1 for which parent to take the gene from
fromparent1=np.array(fromparent1,dtype=bool)
fromparent2=
|
np.invert(fromparent1)
|
numpy.invert
|
from typing import Dict, List
import networkx as nx
import numpy as np
from wknml import parse_nml
from wknml.nml_generation import generate_graph
from wknml.nml_utils import (
ensure_max_edge_length,
calculate_distance_between_nodes,
approximate_minimal_edge_length,
get_vector_between_nodes,
vector_length,
calculate_angle_between_vectors,
)
def test_ensure_max_edge_length():
with open("testdata/nml_with_too_long_edges.nml", "r") as file:
test_nml = parse_nml(file)
max_length = 2.0
scale =
|
np.array(test_nml.parameters.scale)
|
numpy.array
|
# file: test_loads.py
# vim:fileencoding=utf-8:ft=python:fdm=marker
#
# Author: <NAME> <<EMAIL>>
# Created: 2015-08-24 01:34:26 +0200
# Last modified: 2018-12-08T23:02:38+0100
"""
Tests for Load classes and load cases.
"""
import pytest
import beammech as bm
import numpy as np
E = 0.5 * 240000 # Young's Modulus of the beam's material in [MPa]
L = 1000 # Length of the beam in [mm]
P = -500 # Force in [N]
B = 400
H = 30
h = 26
Ix = B * (H ** 3 - h ** 3) / 12
G = 28
A = B * h
def test_load_goodargs(): # {{{1
"""beammech.Load with correct arguments"""
A = bm.Load(kg=1, pos=200)
assert A.size == -9.81
assert A.pos == 200
B = bm.Load(force=-20, pos=300)
assert B.size == -20
assert B.pos == 300
C = bm.Load(kg="1", pos="200")
assert C.size == -9.81
assert C.pos == 200
def test_load_badargs(): # {{{1
"""beammech.Load with faulty arguments"""
with pytest.raises(KeyError):
bm.Load() # All required arguments missing
with pytest.raises(KeyError):
bm.Load(kg=-20) # Required “pos” argument missing
# Required “force” or “kg” argument missing
with pytest.raises(KeyError):
bm.Load(pos=231)
# Required “force” argument misspelt
with pytest.raises(KeyError):
bm.Load(forse=-200, pos=300)
# Argument “pos” or “force” cannot be converted to float
with pytest.raises(ValueError):
bm.Load(force=-120, pos="end")
with pytest.raises(ValueError):
bm.Load(force="-q", pos=200)
def test_clamped_pointload(): # {{{1
"""Clamped beam with point load at end"""
results = bm.solve(
L,
None,
bm.Load(force=P, pos=L),
np.ones(L + 1) * E * Ix,
np.ones(L + 1) * G * A,
np.ones(L + 1) * H / 2,
-np.ones(L + 1) * H / 2,
False,
)
deflection_bm = results.y[L]
deflection_formula = P * L ** 3 / (3 * E * Ix)
reldiff = abs((deflection_bm - deflection_formula) / deflection_formula)
assert reldiff < 0.005
def test_clamped_distributed(): # {{{1
"""Clamped beam with distributed load"""
results = bm.solve(
L,
None,
bm.DistLoad(force=P, start=0, end=L),
np.ones(L + 1) * E * Ix,
np.ones(L + 1) * G * A,
np.ones(L + 1) * H / 2,
-np.ones(L + 1) * H / 2,
False,
)
deflection_bm = results.y[L]
deflection_formula = P * L ** 3 / (8 * E * Ix)
reldiff = abs((deflection_bm - deflection_formula) / deflection_formula)
assert reldiff < 0.005
def test_supported_central_pointload(): # {{{1
"""Ends supported beam with central point load"""
results = bm.solve(
L,
(0, L),
bm.Load(force=P, pos=L / 2),
np.ones(L + 1) * E * Ix,
np.ones(L + 1) * G * A,
np.ones(L + 1) * H / 2,
-np.ones(L + 1) * H / 2,
False,
)
deflection_bm = results.y[int(L / 2)]
deflection_formula = P * L ** 3 / (48 * E * Ix)
reldiff = abs((deflection_bm - deflection_formula) / deflection_formula)
assert reldiff < 0.005
def test_supported_distributed(): # {{{1
"""Ends supported beam with distributed load"""
results = bm.solve(
L,
(0, L),
bm.DistLoad(force=P, start=0, end=L),
np.ones(L + 1) * E * Ix,
np.ones(L + 1) * G * A,
np.ones(L + 1) * H / 2,
-np.ones(L + 1) * H / 2,
False,
)
deflection_bm = results.y[int(L / 2)]
deflection_formula = 5 * P * L ** 3 / (384 * E * Ix)
reldiff = abs((deflection_bm - deflection_formula) / deflection_formula)
assert reldiff < 0.005
def test_supported_triangl(): # {{{1
"""Ends supported beam with triangle load"""
results = bm.solve(
L,
(0, L),
bm.TriangleLoad(force=P, start=0, end=L),
np.ones(L + 1) * E * Ix,
np.ones(L + 1) * G * A,
np.ones(L + 1) * H / 2,
-
|
np.ones(L + 1)
|
numpy.ones
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Author: <NAME> (<EMAIL>)
# @Date: 2018-12-07 10:10
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases import *
from dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
|
np.random.seed(1)
|
numpy.random.seed
|
#!/usr/bin/env python
from __future__ import division
import numpy as np
from scipy.linalg import hankel
from scipy.signal import convolve2d
import scipy.io as sio
import matplotlib.pyplot as plt
from ..tools import *
def bispectrumdx(x, y, z, nfft=None, wind=None, nsamp=None, overlap=None):
"""
Parameters:
x - data vector or time-series
y - data vector or time-series (same dimensions as x)
z - data vector or time-series (same dimensions as x)
nfft - fft length [default = power of two > segsamp]
wind - window specification for frequency-domain smoothing
if 'wind' is a scalar, it specifies the length of the side
of the square for the Rao-Gabr optimal window [default=5]
if 'wind' is a vector, a 2D window will be calculated via
w2(i,j) = wind(i) * wind(j) * wind(i+j)
if 'wind' is a matrix, it specifies the 2-D filter directly
segsamp - samples per segment [default: such that we have 8 segments]
- if x is a matrix, segsamp is set to the number of rows
overlap - percentage overlap, allowed range [0,99]. [default = 50];
- if x is a matrix, overlap is set to 0.
Output:
Bspec - estimated bispectrum: an nfft x nfft array, with origin
at the center, and axes pointing down and to the right.
waxis - vector of frequencies associated with the rows and columns
of Bspec; sampling frequency is assumed to be 1.
"""
(lx, lrecs) = x.shape
(ly, nrecs) = y.shape
(lz, krecs) = z.shape
if lx != ly or lrecs != nrecs or ly != lz or nrecs != krecs:
raise Exception('x, y and z should have identical dimensions')
if ly == 1:
x = x.reshape(1,-1)
y = y.reshape(1,-1)
z = z.reshape(1,-1)
ly = nrecs
nrecs = 1
if not overlap: overlap = 50
overlap = max(0,min(overlap,99))
if nrecs > 1: overlap = 0
if not nsamp: nsamp = 0
if nrecs > 1: nsamp = ly
if nrecs == 1 and nsamp <= 0:
nsamp = np.fix(ly/ (8 - 7 * overlap/100))
if nfft < nsamp:
nfft = 2**nextpow2(nsamp)
overlap = np.fix(overlap/100 * nsamp)
nadvance = nsamp - overlap
nrecs = np.fix((ly*nrecs - overlap) / nadvance)
# create the 2-D window
if not wind: wind = 5
m = n = 0
try:
(m, n) = wind.shape
except ValueError:
(m,) = wind.shape
n = 1
except AttributeError:
m = n = 1
window = wind
# scalar: wind is size of Rao-Gabr window
if max(m, n) == 1:
winsize = wind
if winsize < 0: winsize = 5 # the window size L
winsize = winsize - (winsize%2) + 1 # make it odd
if winsize > 1:
mwind = np.fix(nfft/winsize) # the scale parameter M
lby2 = (winsize - 1)/2
theta = np.array([np.arange(-1*lby2, lby2+1)]) # force a 2D array
opwind = np.ones([winsize, 1]) * (theta**2) # w(m,n) = m**2
opwind = opwind + opwind.transpose() + (np.transpose(theta) * theta) # m**2 + n**2 + mn
opwind = 1 - ((2*mwind/nfft)**2) * opwind
Hex = np.ones([winsize,1]) * theta
Hex = abs(Hex) + abs(np.transpose(Hex)) + abs(Hex + np.transpose(Hex))
Hex = (Hex < winsize)
opwind = opwind * Hex
opwind = opwind * (4 * mwind**2) / (7 * np.pi**2)
else:
opwind = 1
# 1-D window passed: convert to 2-D
elif min(m, n) == 1:
window = window.reshape(1,-1)
if np.any(np.imag(window)) != 0:
print("1-D window has imaginary components: window ignored")
window = 1
if np.any(window) < 0:
print("1-D window has negative components: window ignored")
window = 1
lwind = np.size(window)
w = window.ravel(order='F')
# the full symmetric 1-D
windf = np.array(w[range(lwind-1, 0, -1) + [window]])
window = np.array([window], np.zeros([lwind-1,1]))
# w(m)w(n)w(m+n)
opwind = (windf * np.transpose(windf)) * hankel(np.flipud(window), window)
winsize =
|
np.size(window)
|
numpy.size
|
#BSD 3-Clause License
#=======
#
#Copyright (c) 2017, Xilinx Inc.
#All rights reserved.
#
#Based <NAME>'s MNIST example code
#Copyright (c) 2015-2016, <NAME>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
#EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import sys
import os
import time
from argparse import ArgumentParser
import numpy as np
np.random.seed(1234) # for reproducibility
# specifying the gpu to use
# import theano.sandbox.cuda
# theano.sandbox.cuda.use('gpu1')
import lasagne
import theano
import theano.tensor as T
import cPickle as pickle
import gzip
import binary_net
import lfc
from collections import OrderedDict
def loadFashionMNIST(prefix="t10k"):
# Define functions for loading Fashion MNIST images and labels.
import gzip
dl_dir = "fashion-mnist-train/"
def load_mnist_images(filename):
# Read the inputs in Yann LeCun's binary format.
with gzip.open(dl_dir + filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(-1, 1, 28, 28)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data
def load_mnist_labels(filename):
# Read the labels in Yann LeCun's binary format.
with gzip.open(dl_dir+filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# We can now download and read the images and labels with given prefix
X = load_mnist_images('%s-images-idx3-ubyte.gz' % prefix)
y = load_mnist_labels('%s-labels-idx1-ubyte.gz' % prefix)
return {"X":X, "y":y}
if __name__ == "__main__":
# Parse some command line options
parser = ArgumentParser(
description="Train the LFC network on the MNIST dataset")
parser.add_argument('-ab', '--activation-bits', type=int, default=1, choices=[1, 2],
help="Quantized the activations to the specified number of bits, default: %(default)s")
parser.add_argument('-wb', '--weight-bits', type=int, default=1, choices=[1],
help="Quantized the weights to the specified number of bits, default: %(default)s")
args = parser.parse_args()
learning_parameters = OrderedDict()
# Quantization parameters
learning_parameters.activation_bits = args.activation_bits
print("activation_bits = "+str(learning_parameters.activation_bits))
learning_parameters.weight_bits = args.weight_bits
print("weight_bits = "+str(learning_parameters.weight_bits))
# BN parameters
batch_size = 64
print("batch_size = "+str(batch_size))
# alpha is the exponential moving average factor
# alpha = .15
learning_parameters.alpha = .1
print("alpha = "+str(learning_parameters.alpha))
learning_parameters.epsilon = 1e-4
print("epsilon = "+str(learning_parameters.epsilon))
# Training parameters
num_epochs = 1000
print("num_epochs = "+str(num_epochs))
# Dropout parameters
learning_parameters.dropout_in = .2 # 0. means no dropout
print("dropout_in = "+str(learning_parameters.dropout_in))
learning_parameters.dropout_hidden = .5
print("dropout_hidden = "+str(learning_parameters.dropout_hidden))
# W_LR_scale = 1.
learning_parameters.W_LR_scale = "Glorot" # "Glorot" means we are using the coefficients from Glorot's paper
print("W_LR_scale = "+str(learning_parameters.W_LR_scale))
# Decaying LR
LR_start = .003
print("LR_start = "+str(LR_start))
LR_fin = 0.0000003
print("LR_fin = "+str(LR_fin))
LR_decay = (LR_fin/LR_start)**(1./num_epochs)
print("LR_decay = "+str(LR_decay))
# BTW, LR decay might good for the BN moving average...
save_path = "fashion-mnist-train/fashion_mnist_parameters.npz"
print("save_path = "+str(save_path))
shuffle_parts = 1
print("shuffle_parts = "+str(shuffle_parts))
print('Loading Fashion MNIST dataset...')
train_set_X = loadFashionMNIST("train")["X"][:1000]
train_set_y = loadFashionMNIST("train")["y"][:1000]
valid_set_X = loadFashionMNIST("train")["X"][1000:1350]
valid_set_y = loadFashionMNIST("train")["y"][1000:1350]
test_set_X = loadFashionMNIST("t10k")["X"]
test_set_y = loadFashionMNIST("t10k")["y"]
# Inputs are originally between [0, 255]
# Rescale to put them between [-1, +1]
train_set_X = train_set_X / 255.
valid_set_X = valid_set_X / 255.
test_set_X = test_set_X / 255.
train_set_X = 2*(train_set_X.reshape(-1, 1, 28, 28)) - 1.
valid_set_X = 2*(valid_set_X.reshape(-1, 1, 28, 28)) - 1.
test_set_X = 2*(test_set_X.reshape(-1, 1, 28, 28)) - 1.
# Binarise the inputs.
train_set_X = np.where(train_set_X < 0, -1, 1).astype(theano.config.floatX)
valid_set_X = np.where(valid_set_X < 0, -1, 1).astype(theano.config.floatX)
test_set_X = np.where(test_set_X < 0, -1, 1).astype(theano.config.floatX)
# flatten targets
train_set_y = np.hstack(train_set_y)
valid_set_y = np.hstack(valid_set_y)
test_set_y = np.hstack(test_set_y)
# Onehot the targets
train_set_y = np.float32(
|
np.eye(10)
|
numpy.eye
|
import numpy as np
import time
import threading
import sys
class KalmanFilter(object):
def __init__(self, filter_outliers=True, patience_ms=100., verbose=False):
self.tracked_objects = []
self.next_id = -1
self.Q = self.process_noise_covariance()
self.R = self.observation_noise_covariance()
# H is the measurement model which casts next state onto an ovservation vector
self.H = np.array([[1., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0.]], dtype=np.float64)
# other parameters
self.filter_outliers = filter_outliers
self.verbose = verbose
self.processing = False
self.patience_sec = patience_ms * 1e-3
# start the timer thread
self.thread_ = threading.Thread(target=self.timer_thread)
self.thread_is_alive = True
# Start the thread
self.exit_event = threading.Event()
self.daemon = True
self.thread_.start()
def get_new_id(self, max_id=9999):
self.next_id = ((self.next_id + 1) % (max_id + 1))
return self.next_id
def add_new_object(self, obj_class, x, y, z, yaw):
if self.verbose:
print('Adding new object: {}'.format(obj_class))
state = [x, y, z, yaw, 0., 0., 0., 0.]
state_sigma = np.zeros((8,8), dtype=np.float32)
state_sigma[:4,:4] = self.R
obj = {'id':self.get_new_id(), 'class':obj_class,
'state_mu':state, 'state_sigma':state_sigma, 'timestamp':time.time()}
self.tracked_objects.append(obj)
def find_matching_object_idx(self, obj_class, x, y, z, yaw):
return_idx = -1
min_dist = 1e5
for i, tracked_obj in enumerate(self.tracked_objects):
if obj_class == tracked_obj['class']:
dist = np.sqrt((x - tracked_obj['state_mu'][0]) ** 2 + \
(y - tracked_obj['state_mu'][1]) ** 2 + \
(z - tracked_obj['state_mu'][2]) ** 2 )
variance_xyz = [tracked_obj['state_sigma'][0,0], tracked_obj['state_sigma'][1,1], tracked_obj['state_sigma'][2,2]]
sigma_dist = np.sqrt(variance_xyz[0] + variance_xyz[1] + variance_xyz[2])
if dist < min_dist and dist < 3*sigma_dist:
min_dist = dist
# match found
return_idx = i
return return_idx
def timer_thread(self):
# If the child thread is still running
while self.thread_is_alive:
# wait if processing new data
while self.processing:
time.sleep(0.001)
self.processing = True
# iterate through each tracked object
for i, tracked_obj in enumerate(self.tracked_objects):
dt = time.time() - tracked_obj['timestamp']
if dt > self.patience_sec:
if self.verbose:
print('Removing tracked object [{}] with id: {} - no update received for {} secs'.format(
tracked_obj['class'], tracked_obj['id'], dt))
del self.tracked_objects[i]
self.processing = False
# sleep
time.sleep(self.patience_sec)
def teardown(self):
# wait for '2*patience_sec' to destroy all existing objects
time.sleep(2*self.patience_sec)
if self.verbose:
print('Tearing down the Kalman Filter.')
self.thread_is_alive = False
@staticmethod
def system_matrix(dt=0.033):
"""
Output:
A: 8x8 numpy array for the system matrix.
"""
A = np.array([[1., 0., 0., 0., dt, 0., 0., 0.],
[0., 1., 0., 0., 0., dt, 0., 0.],
[0., 0., 1., 0., 0., 0., dt, 0.],
[0., 0., 0., 1., 0., 0., 0., dt],
[0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 1.]], dtype=np.float64)
return A
@staticmethod
def process_noise_covariance():
"""
Output:
Q: 8x8 numpy array for the covariance matrix.
"""
Q = np.zeros((8,8), dtype=np.float64)
Q[4:,4:] = np.diag([0.2, 0.2, 0.2, 0.5])
return Q
@staticmethod
def observation_noise_covariance():
"""
Output:
R: 4x4 numpy array for the covariance matrix.
"""
sigma = np.diag([1.0, 1.0, 1.0, 2.0])
return sigma
def process(self, measurements):
"""
Input:
measurements: list of dictionary of measurement [class, x, y, z, yaw]
"""
tracked_objects = []
for measurement in measurements:
# wait if processing new data
while self.processing:
time.sleep(0.001)
self.processing = True
timestamp = time.time()
tracked_obj = None
# find association
associated_idx = self.find_matching_object_idx(
measurement['class'], measurement['x'], measurement['y'],
measurement['z'], measurement['yaw']
)
if associated_idx != -1:
dt = timestamp - self.tracked_objects[associated_idx]['timestamp']
# print(dt)
A = self.system_matrix(dt)
# Prediction
mu_bar_next = np.dot(A, self.tracked_objects[associated_idx]['state_mu'])
sigma_bar_next = np.dot(A, np.dot(self.tracked_objects[associated_idx]['state_sigma'], A.T)) \
+ self.Q
# compute Kalman Gain
kalman_gain_numerator = np.dot(sigma_bar_next, self.H.T)
kalman_gain_denominator = np.dot(self.H, np.dot(sigma_bar_next, self.H.T)) + self.R # this is the innovation covariance matrix, S
kalman_gain = np.dot(kalman_gain_numerator, np.linalg.inv(kalman_gain_denominator))
# Correction
observation = [measurement['x'], measurement['y'], measurement['z'], measurement['yaw']]
expected_observation = np.dot(self.H, mu_bar_next)
# let's compute Mahalanobis distance
S = kalman_gain_denominator
deviation = np.sqrt(np.dot((observation - expected_observation).T, np.dot(np.linalg.inv(S), (observation - expected_observation))))
# outlier rejection
if not self.filter_outliers or deviation <= 1.5:
mu_next = mu_bar_next +
|
np.dot(kalman_gain, (observation - expected_observation).T)
|
numpy.dot
|
import sys
import os
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import dok_matrix
import qcfractal.interface as ptl
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import FragmentMatcher
from cmiles.utils import mol_to_map_ordered_qcschema
from openforcefield.topology import Molecule
from openforcefield.topology import Topology
from openforcefield.typing.engines.smirnoff import ForceField
import openforcefield
import smirnoff99frosst as ff
import pickle
import time
import copy
from datetime import timedelta
from QUBEKit.mod_seminario import ModSeminario
from QUBEKit.parametrisation.base_parametrisation import Parametrisation
from QUBEKit.utils import constants
from simtk import openmm
import simtk
from pydantic.error_wrappers import ValidationError
import logging
logger = logging.getLogger()
logger.setLevel(level=logging.ERROR)
bohr2angstrom = 0.529177249
angstrom2nm = .1
hartree2kcalmol = 627.5096080306
client = ptl.FractalClient()
from geometric.internal import *
from geometric.molecule import Molecule as geometric_mol
from geometric import optimize
def CalcInternalHess(coords, gradx, Hx, IC):
"""
coords, gradx, hessx : Cartesian coordinates, gradients, and Hessian in atomic units.
IC : geomeTRIC internal coordinate system (look at line 1766 for how to create this)
"""
# Internal coordinate Hessian using analytic transformation
Hq = IC.calcHess(coords, gradx, Hx)
verbose = False
if verbose:
print("-=# Hessian of the energy in internal coordinates #=-\n")
print("{:20s} {:20s} : {:14s}\n".format('IC1 Name', 'IC2 Name', 'Value'))
for (i,j) in itertools.product(*([range(len(IC.Internals))]*2)):
print("{:20s} {:20s} : {: 14.6f}".format(str(IC.Internals[i]), str(IC.Internals[j]), Hq[i, j]*(hartree2kcalmol/bohr2angstrom**2)))
return Hq
#=========================================#
#| Set up the internal coordinate system |#
#=========================================#
# First item in tuple: The class to be initialized
# Second item in tuple: Whether to connect nonbonded fragments
# Third item in tuple: Whether to throw in all Cartesians (no effect if second item is True)
CoordSysDict = {'cart':(CartesianCoordinates, False, False),
'prim':(PrimitiveInternalCoordinates, True, False),
'dlc':(DelocalizedInternalCoordinates, True, False),
'hdlc':(DelocalizedInternalCoordinates, False, True),
'tric':(DelocalizedInternalCoordinates, False, False)}
coordsys = 'prim' # kwargs.get('coordsys', 'tric')
CVals = None
CoordClass, connect, addcart = CoordSysDict[coordsys.lower()]
Cons = None
def hyphenate_int_tuple(tup, offset=0):
return "{:d}".format(tup[0]+offset)+ ("-{:d}"*(len(tup)-1)).format(*([x+offset for x in tup[1:]]))
def oFF_key_in_IC_Hessian(index, Internals):
#off_str = hyphenate_int_tuple(index)
exists = False
debug = False
i = 0
n_atoms = len(index)
debug_lines = []
for i, Internal in enumerate(Internals):
hess_atoms = str(Internal).split()[1]
permutations = None
if(n_atoms == 2):
if(type(Internal) is not Distance):
continue
permutations = itertools.permutations(index)
elif(n_atoms == 3):
if(type(Internal) is not Angle):
continue
permutations = [[x[0],index[1],x[1]] for x in itertools.permutations([index[0],index[2]])]
elif(n_atoms == 4):
if(type(Internal) is OutOfPlane):
# geometic molecule puts center as the first index
#hess_atoms[0], hess_atoms[1] = hess_atoms[1], hess_atoms[0]
reordered_index = tuple([index[1], index[0], index[2], index[3]])
permutations = [[reordered_index[0]] + list(x) for x in itertools.permutations(reordered_index[1:])]
elif(type(Internal) is Dihedral):
# cross our fingers that dihedrals are sequential
permutations = [index, index[::-1]]
else:
continue
else:
raise IndexError("Invalid number of atoms.")
for order_i in permutations:
candidate = hyphenate_int_tuple(order_i)
debug_lines.append("comparing oFF indices " + str(candidate) + " to Hessian " + str(hess_atoms) + " failed")
if(candidate == hess_atoms):
if(debug):
print("HIT comparing oFF indices", candidate, "to Hessian", hess_atoms )
return True, i
if(debug):
print("MISS for", index, "DEBUG:")
for line in debug_lines:
print(line)
print()
return False, -1
def flatten_list(l):
return [val for vals in l for val in vals ]
def get_all_qcmolid_and_grads(td, minimum=False, get_energy=False, TorsionDrive=True, get_hessians=False, get_gradients=False):
hessian = None
ret_rec = None
ene = None
if(TorsionDrive):
angles = td.get_history(minimum=minimum).keys()
opt_rec_list = ([td.get_history(ang, minimum=minimum) for ang in angles])
if(minimum):
opt_rec = opt_rec_list
if(get_gradients):
ret_rec = ([opt.get_trajectory()[-1] for opt in opt_rec])
opt_ang = flatten_list(angles)
mol_id = [opt.get_final_molecule() for opt in opt_rec]
if(get_energy):
ene = [opt.get_final_energy() for opt in opt_rec]
else:
#TODO: Energy for intermediates
opt_rec = flatten_list(opt_rec_list)
ret_rec_list = ([opt.get_trajectory() for opt in opt_rec])
ret_rec = flatten_list(ret_rec_list)
ang_expand = [a * len(l) for a,l in zip(angles, opt_rec_list)]
ang_full = [[a]*l for a,l in zip(flatten_list(ang_expand),[len(x) for x in ret_rec_list])]
opt_ang = flatten_list(ang_full)
mol_id = [rec.molecule for rec in ret_rec]
if(energy):
ene = [opt.get_final_energy() for opt in opt_rec]
if(not get_gradients):
ret_rec = None
opt_ang = np.array(opt_ang)
srt = opt_ang.argsort()
mol_id = [mol_id[i] for i in srt]
if(get_gradients):
ret_rec = [ret_rec[i] for i in srt]
ene = np.array(ene)[srt]
opt_ang = opt_ang[srt]
info = {}
tddict = td.dict(encoding="json")
for key in ['keywords', 'id']:
info[key] = tddict[key]
tddict = None
else:
opt_rec = td
convert = hartree2kcalmol/bohr2angstrom**2
convert = 1
opt_ang = [None]
if(minimum):
if(get_gradients):
ret_rec = [opt.get_trajectory()[-1] for opt in opt_rec]
mol_id = [opt.get_final_molecule() for opt in opt_rec]
if(get_energy):
ene = [opt.get_final_energy() for opt in opt_rec]
if(get_hessians):
hessian = []
for i in mol_id:
try:
h = client.query_results(molecule=i.id,driver="hessian")[0].return_result
hessian.append(convert*np.array(h))
except (IndexError, TypeError):
print("No hessian for this geometry!")
hessian.append(None)
else:
ret_rec = [opt.get_trajectory() for opt in opt_rec]
if(get_energy):
ene = flatten_list([opt.energies for opt in opt_rec])
mol_id = [rec.molecule for a in ret_rec for rec in a]
if(get_hessians):
hessian = []
for i in mol_id:
try:
h = client.query_results(molecule=i.id,driver="hessian")[0].return_result
hessian.append(convert*np.array(h))
except (IndexError, TypeError):
print("No hessian for this geometry!")
hessian.append(None)
if(not get_gradients):
ret_rec = None
srt = np.arange(len(mol_id))
info = {}
tddict = td[0].dict(encoding="json")
for key in ['keywords', 'id']:
info[key] = tddict[key]
tddict = None
return mol_id, info, ret_rec, hessian, opt_ang, ene, srt
def get_qcmol_from_ids(client, mol_ids):
return [client.query_molecules(idx)[0] for idx in mol_ids]
return client.query_molecules(mol_ids)
def dump_qcmol_to_xyz(mols, fname, comment="", idx=None):
if(fname is None):
fid = None
else:
fid = open(fname,'w')
ret = np.array([qcmol2xyz(mol, fid, comment=comment, idx=idx) for mol in mols])
if(fid is not None):
fid.close()
return ret
def save_hessian(hessian, fid, comment="", map=None, idx=None):
#index_str = "all"
#if(idx is not None):
# index_str = str(idx)
# hessian = hessian[idx]
n = hessian.shape[0]//3
if(idx is None):
idx = range(n)
#atom_idx = idx
#idx = [(3*j + i) for j in idx for i in range(3)]
#print(hessian.shape)
#print(idx)
#if fid is not None:
# for line in hessian:
# line = [line[3*map[idx[i]] + j] for i in range(len(idx)) for j in range(3)]
# fid.write(("{:16.13f} "*len(line) + '\n').format(*(line)))
hess_mapped = np.empty((3*len(idx), 3*len(idx)))
# print(hess_mapped.shape, hessian.shape)
for (ii,jj),(i,j) in zip(itertools.product(range(len(idx)), range(len(idx))),itertools.product(idx, idx)):
hess_mapped[ii*3:ii*3+3,jj*3:jj*3+3] = hessian[map[idx[ii]]*3:map[idx[ii]]*3+3,map[idx[jj]]*3:map[idx[jj]]*3+3]
#idx = range(len(idx))
if(fid is not None):
fid.write("#{:s}\n{:d}\n".format(comment, len(idx)))
pairs = sorted(set([tuple(sorted(k)) for k in itertools.product(range(len(idx)), range(len(idx)))]))
for (i, j) in pairs:
atomi = i
atomj = j
if(atomj < atomi):
continue
# print("[", i,j,"] (",atomi, atomj,")", end=" ")
fid.write(("{:4d} {:4d}"+(" {:12.6f}"*9)+"\n").format(idx[atomi],idx[atomj],*hess_mapped[atomi*3:(atomi*3+3),atomj*3:(atomj*3+3)].reshape(-1)))
# print("\n")
return hess_mapped
def dump_hessians(hessians, fname, comment="", map=None, idx=None):
# print("Saving hessians to", fname)
if(fname is None):
fid = None
else:
fid = open(fname,'w')
ret = np.array([save_hessian(hessian_i, fid, comment=comment, map=map, idx=idx) for hessian_i in hessians])
if(fid is not None):
fid.close()
return ret
def qcmol2xyz(mol, fid, comment="", idx=None):
#fid.write(mol.to_string('xyz'))
#return
mol = dict(mol)
if(idx is None):
idx = np.arange(len(mol['symbols']))
syms = [mol['symbols'][i] for i in idx]
xyzs = (mol['geometry'] * bohr2angstrom)[idx]
struct = zip(syms, xyzs)
N = len(syms)
if(fid is not None):
fid.write("{:d}\n".format(N))
fid.write("{:s}\n".format(comment))
#print("***************")
#print(xyzs)
[fid.write(("{:s}"+"{:10.4f}"*3 + "\n").format(s,*xyz)) for s,xyz in struct]
return xyzs
def get_qcarchive_xyz_grad_ang(mol_smiles, ds, minimum=False, TorsionDrive=True, get_hessians=False, get_gradients=False, get_energy=False):
# get the torsiondrivedataset for some smiles pattern
#opts are optimizationdataset
isTD = (ds.data.collection == "torsiondrivedataset")
if(isTD):
main_rec = ds.data.records[mol_smiles]
td = ds.df.loc[main_rec.name, 'default']
else:
td = client.query_procedures(ds.data.records[mol_smiles].object_map["default"])
mol_ids, info, opt_recs, hessian, opt_ang, ene, srt = get_all_qcmolid_and_grads(td, minimum=minimum, get_energy=get_energy, TorsionDrive=isTD, get_hessians=get_hessians, get_gradients=get_gradients)
if(minimum):
mols = mol_ids
else:
mols = get_qcmol_from_ids(client, mol_ids)
return mols, info, opt_recs, hessian, opt_ang, ene, srt
def mm_potential(forcefield, top, xyz, charge=False):
if(charge):
system = forcefield.create_openmm_system(top)
else:
mols = [Molecule(mol.reference_molecule) for mol in top.topology_molecules]
for i,_ in enumerate(mols):
mols[i].partial_charges = simtk.unit.Quantity(np.zeros(mols[i].n_atoms), simtk.unit.elementary_charge)
system = forcefield.create_openmm_system(top, charge_from_molecules=mols)
integrator = openmm.VerletIntegrator(1.0 * simtk.unit.femtoseconds)
context = openmm.Context(system, integrator)
context.setPositions(xyz * angstrom2nm)
state = context.getState(getEnergy=True)
energy = state.getPotentialEnergy().in_units_of(simtk.unit.kilocalories_per_mole)
return energy
def something():
"""
this is supposed to make the index problem go away!!
Right now a big problem seems to be maps from rd/off and qcarchive indices
Or, it wasn't a problem until I tried to get energy of my systems
"""
return None
def calc_mm_energy(forcefield, top, xyz, component=None, charge=False):
if(component is None):
ene = mm_potential(forcefield, top, xyz, charge=charge)
return ene
modff = copy.deepcopy(forcefield)
force = modff.get_parameter_handler(component)
for term in force.parameters:
if(component == "vdW"):
term.epsilon *= 0.0
if(component in ["Bonds", "Angles"]):
term.k *= 0.0
if(component in ["ProperTorsions", "ImproperTorsions"]):
for i,_ in enumerate(term.k):
term.k[i] *= 0.0
ene = mm_potential(modff, top, xyz, charge=charge)
return ene
def calc_vdw_direct(xyz, labels):
na = len(xyz)
atoms = range(na)
ene = 0.0
r = distance.cdist(xyz,xyz)
for i in atoms:
ii = (i,)
for j in atoms:
if(j >= i):
break
jj = (j,)
eps = np.sqrt(labels[ii].epsilon * labels[jj].epsilon) / labels[jj].epsilon.unit
rmin = (labels[ii].rmin_half + labels[jj].rmin_half)/2.0
rmin = rmin / rmin.unit
rij = r[i,j]
a = rmin/rij
a = a**6
ene += eps * (a**2 - 2*a)
#print(i,j,"r", rij, "ene", ene, "rmin", rmin, "eps", eps, "a", a)
return ene * simtk.unit.kilocalorie_per_mole
def get_frag_matches(frag, ds):
#targets = [i.name for i in ds.data.records.values()]
targets = ds.data.records.keys()
#with open('test2.out', 'w') as fid:
# [fid.write(str(i) + '\n') for i in targets]
#test = ds.df.index
#with open('test3.out', 'w') as fid:
# [fid.write(str(i) + '\n') for i in test]
#test = [i.name for j,i in ds.data.records.items()]
#with open('test4.out', 'w') as fid:
# [fid.write(str(i) + '\n') for i in test]
p = FragmentMatcher.FragmentMatcher()
p.Init(frag)
matches = {}
hits = 0
for mol_smiles in targets:
smiles_pattern = ds.data.records[mol_smiles].name
smiles_pattern = ds.data.records[mol_smiles].attributes['canonical_isomeric_explicit_hydrogen_mapped_smiles']
mol = Chem.MolFromSmiles(smiles_pattern)
mol = Chem.AddHs(mol)
if(p.HasMatch(mol)):
for match in p.GetMatches(mol, uniquify=0): # since oFF is a redundant set
# deg = mol.GetAtomWithIdx(match[0]).GetDegree()
if(not (mol_smiles in matches)):
matches[mol_smiles] = []
matches[mol_smiles].append(list(match))
hits += 1
print("Found", len(matches.keys()), "molecules with", hits, "hits")
return matches
def split_frag_matches(matches, splits):
split_matches = {}
for split in splits:
frags_from_split = {}
for mol,match in matches.items():
#frags_from_split[mol] = [list(np.asarray(frag)[split]) for frag in match]
#split_matches[tuple(split)] = frags_from_split
split_matches[mol] = {tuple(split): [list(np.asarray(frag)[split]) for frag in match] }
return split_matches
def argsort_labels(l):
letter = [i[0] for i in l]
num = [int(i[1:]) for i in l]
return np.lexsort((num, letter))
def measure_parameters(job_spec, ds,
log=sys.stdout, db_out=None, append=False, skip_if_present=False,
minimum=True, oFF_label=True, empty=False,
out_dir=None, save_mol_xyz=False, save_mol_frag=False,
get_gradients=False, get_qm_energy=False, get_mm_energy=False, gen_MM_charge=False,
get_hessians=False, save_hessian=False, save_hessian_frag=False,
stepTiming=False, totalTiming=False,
save_mol_xyz_debug=False, verbose=True):
valence_params = ["Bonds", "Angles", "ProperTorsions", "ImproperTorsions"]
measure_vtable = {"Bonds": calc_bond_length,
"Angles": calc_angle_degree,
"ProperTorsions": calc_proper_torsion,
"ImproperTorsions": calc_improper_torsion}
if(out_dir is None):
out_dir = os.getcwd()
#TODO: check each job_spec for valid
#calcs = parameters
#matches = match_db[match_key]
#for atom_req, measure_key in zip([2,3,4,4], valence_params):
# if(not (measure_key in calcs)):
# continue
# for smiles,match in matches.items():
# n = len(match)
# for n in [m for m in match]:
# if (len(n) < atom_req):
# log.write( "{:s} calculation requested but smiles {:s} match {:d} has too few atoms\n\n".format(calc, smiles, n))
# return
ffversion = 'smirnoff99Frosst-1.0.9.offxml'
db = None
if (db_out is not None):
if(os.path.exists(os.path.join(out_dir,db_out)) and append):
if(db_out.split('.')[-1] == "npz"):
db = dict(np.load(db_out, allow_pickle=True))
else:
with open(os.path.join(out_dir,db_out),'rb') as fid:
db = pickle.load(fid)
if(db is None):
db = {'frag_info': [],
'oFF': {"ff_version": ffversion, 'oFF_version': openforcefield.__version__} }
db['qca_info'] = { "collection_type": ds.data.collection,
"collection_name": ds.data.name }
db['oFF'].setdefault("a0", {})
db['oFF'].setdefault("b0", {})
db['oFF'].setdefault("i0", {})
db['oFF'].setdefault("t0", {})
db.setdefault("mol_data", {})
# idea here is to only store unique torsiondrive angles once, use hash to determine which td_ang to use
db.setdefault("td_ang", {})
if(oFF_label):
forcefield = ForceField(os.path.join(ff.get_forcefield_dirs_paths()[0],ffversion), disable_version_check=True)
# for mol_n, (target, idx_list) in enumerate(matches.items()):
totaltime = time.time()
log.write("== Dataset {:s}\n".format(ds.data.name))
for mol_n, (target, config) in enumerate(job_spec.items()):
#print(target)
mapped = ds.data.records[target].attributes['canonical_isomeric_explicit_hydrogen_mapped_smiles']
real_smiles = ds.data.records[target].name
target_smiles = mapped #ds.data.records[target].name
if(real_smiles in db['mol_data']):
mol_data = db['mol_data'][real_smiles]
if(skip_if_present):
continue
else:
mol_data = {'qca_key': target }
mol = Chem.MolFromSmiles(target_smiles)
n_heavy = mol.GetNumAtoms()
mol = Chem.AddHs(mol)
n_total = mol.GetNumAtoms()
n_hydrogen = n_total - n_heavy
ids = AllChem.EmbedMultipleConfs(mol, numConfs=1, params=AllChem.ETKDG())
Chem.rdmolops.AssignStereochemistryFrom3D(mol)
superdebug = False
if(superdebug):
debugfid = open("debug.out", 'w')
syms = [a.GetSymbol() for a in mol.GetAtoms()]
mol2 = mol_to_map_ordered_qcschema(mol, mapped)
orig_xyz = np.array(mol.GetConformer(0).GetPositions())/bohr2angstrom
mapped_xyz = np.array(mol2['geometry']).reshape(-1,3)
mapped_idx = distance.cdist(orig_xyz, mapped_xyz).argmin(axis=1)
if(superdebug):
debugfid.write("From smiles built mol\n")
[debugfid.write(str(i)+'\n') for i in enumerate(orig_xyz)]
debugfid.write("Index map (rd to qc?)\n")
[debugfid.write(str(i)+'\n') for i in enumerate(mapped_idx)]
debugfid.write("From remapped mol from cmiles\n")
[debugfid.write(str(i)+'\n') for i in enumerate(mapped_xyz)]
debugfid.write("\n")
# putting an from the QCMol indices will give you the index in the analyses (e.g. torsiondrive indices)
# qcmol data is 0-based, so is this map; but note that hessian labels are 1-based
log.write("Result= {:8d} Molecule= {:50s} ".format(mol_n, real_smiles, ))
log.flush()
result_time=time.time()
elapsed=time.time()
try:
mols, info, opts, hessian, ang, ene, srt = get_qcarchive_xyz_grad_ang(target, ds, \
minimum=minimum, get_hessians=get_hessians, get_gradients=get_gradients, get_energy=get_qm_energy)
except (ValidationError, TypeError) as e:
log.write("ERROR: QCArchive returned nothing\n\n")
continue
log.write("\nQCID= {:>10s} QCDataRecord= {:50s} \n".format(ds.data.records[target].object_map['default'], target))
minstr = ".min" if minimum else ".all"
mol_out_fname = os.path.join(out_dir,"mol_"+str(mol_n)+minstr+".xyz") if save_mol_xyz else None
atoms = dump_qcmol_to_xyz(mols, mol_out_fname, comment=real_smiles, idx=mapped_idx)
if(superdebug):
debugfid.write("from qcdata base\n")
[debugfid.write(str(j[0]) + " " + str(i[0]) + " " + \
str(i[1]*bohr2angstrom)+'\n') \
for j in enumerate(mols) \
for i in enumerate(dict(j[1])['geometry'])]
if('from_qcmol_to_label_map' not in mol_data):
mapped_idx_inv = distance.cdist(orig_xyz, mapped_xyz).argmin(axis=0)
mol_data['from_qcmol_to_label_map'] = mapped_idx_inv
if(superdebug):
debugfid.write("Index map (qc to rd?)\n")
[debugfid.write(str(i)+'\n') for i in enumerate(mapped_idx_inv)]
debugfid.write("mapped from qcdatabase using mapped_idx_inv\n")
[debugfid.write(str(j[0]) + " " + str(i[0]) + " " + \
str(j[1].geometry[i[1]] *bohr2angstrom)+'\n') \
for j in enumerate(mols) \
for i in enumerate(mapped_idx_inv)]
if(superdebug):
debugfid.write("mapped from qcdatabase using mapped_idx\n")
[debugfid.write(str(j[0]) + " " + str(i[0]) + " " + \
str(j[1].geometry[i[1]] *bohr2angstrom)+'\n') \
for j in enumerate(mols) \
for i in enumerate(mapped_idx)]
debugfid.write("first record in atoms (should be same as above\n")
[debugfid.write(str(j[0]) + " " + str(i[0]) + " " + \
str(i[1]*bohr2angstrom)+'\n') \
for j in enumerate(mols) \
for i in enumerate(atoms[j[0]])]
debugfid.close()
queryelapsed=str(timedelta(seconds=time.time() - elapsed))
if(len(mols) == 0):
if(stepTiming):
log.write("\n QueryTime= {:s}\n".format(queryelapsed))
log.write("ERROR: QCArchive returned nothing\n\n")
continue
elapsed=time.time()
if('info' not in mol_data):
mol_data['info'] = info
if('n_hydrogen' not in mol_data):
mol_data['n_hydrogen'] = n_hydrogen
if('n_heavy' not in mol_data):
mol_data['n_heavy'] = n_heavy
ang_hash = hash(tuple(ang))
if(ang_hash not in db["td_ang"]):
db["td_ang"][ang_hash] = ang
if('td_ang' not in mol_data):
mol_data["td_ang"] = ang_hash
ic_hess = None
IC = None
if(hessian is not None):
if("hessian" not in mol_data):
mol_data["hessian"] = {}
mol_data["hessian"]["cartesian"] = {}
hessian = np.array([hess_i.reshape(len(syms)*3, len(syms)*3) for hess_i in hessian])
if(save_mol_xyz):
IC = CoordClass(geometric_mol(mol_out_fname), build=True,
connect=connect, addcart=addcart, constraints=Cons,
cvals=CVals[0] if CVals is not None else None )
convert = hartree2kcalmol/bohr2angstrom**2
ic_hess = CalcInternalHess(orig_xyz[mapped_idx]/bohr2angstrom,
np.array(opts[0].return_result).reshape(-1,3)[mapped_idx].flatten(),
hessian[0]/convert, IC)
#ic_hess *= (hartree2kcalmol/bohr2angstrom**2)
hessian_out_fname = os.path.join(out_dir,"mol_"+str(mol_n)+minstr+".hessian.nxmxmx9.dat") \
if save_hessian else None
atom_hessian = dump_hessians(hessian, hessian_out_fname, comment=real_smiles, map=mapped_idx ,idx=None)
if(save_mol_xyz_debug):
rdmol_out_fname = os.path.join(out_dir,"rdmol_"+str(mol_n)+minstr+".xyz")
with open(rdmol_out_fname,'w') as fid:
qcmol2xyz({'symbols':syms, "geometry":orig_xyz}, fid, comment="rdkit conformer "+ real_smiles)
if(len(syms) != len(mols[0].symbols)):
log.write("\nERROR: conformer has different number of atoms than QCMolecule.\n")
log.write("Likely H assignment incorrect. Check the mol and rdmol xyz files. Skipping.\n")
continue
if(get_qm_energy):
if('energy' not in mol_data):
mol_data['energy'] = {}
mol_data['energy']['qm'] = ene
if(oFF_label):
mmol = Molecule.from_rdkit(mol, allow_undefined_stereo=True)
top = Topology().from_molecules(mmol)
#topapp = Topology().from_molecules(mmol)
# these will use the unmapped indices, which are the indices from the matches
labels = forcefield.label_molecules(top)[0]
if(get_mm_energy):
if('energy' not in mol_data):
mol_data['energy'] = {'oFF': {} }
elif('oFF' not in mol_data['energy']):
mol_data['energy']['oFF'] = {}
n_qcmol = len(mols)
#reporter = openmm.app.pdbreporter.PDBReporter("min_ene.pdb", 1, False)
#systemapp = forcefield.create_openmm_system(topapp)
#integratorapp = openmm.VerletIntegrator(1.0 * simtk.unit.femtoseconds)
#sim = openmm.app.Simulation(topapp.to_openmm(), systemapp, integratorapp)
for qcmol_i,qcmol in enumerate(atoms):
#xyz = dict(qcmol)['geometry']
#mmol.conformers[0] = qcmol
xyz = qcmol
#print("raw0", orig_xyz[0:2], xyz[0:2], labels['vdW'][(0,)], labels['vdW'][(1,)])
#print("raw", orig_xyz[0], xyz[0]* bohr2angstrom)
#xyz2 = np.array(xyz)[mapped_idx] * bohr2angstrom
#print("real", orig_xyz[0], xyz2[0])
#xyz = np.array(xyz)[mapped_idx_inv] * bohr2angstrom
#print("real", orig_xyz[0], xyz[0])
#print("real", orig_xyz[0], mapped_xyz[0])
total_ene = calc_mm_energy(forcefield, top, xyz, charge=gen_MM_charge)
log.write(" Conformation energy {:4d}/{:4d}\n".format(qcmol_i+1,n_qcmol))
log.flush()
if(get_qm_energy):
ene_str = "{:13.8f} a.u.".format(ene[qcmol_i])
log.write(" QM {:18s}= {:s} \n".format("Energy", ene_str))
log.flush()
ene_name_str = "Energy" if gen_MM_charge else "EnergyNoElec"
log.write(" MM {:18s}= {:10.5f} {:s}\n".format(ene_name_str,
(total_ene/total_ene.unit ),
str(total_ene.unit)))
log.flush()
#sim.context.setPositions(xyz * angstrom2nm)
#state = sim.context.getState(getEnergy = True, getPositions=True)
#sim_ene = state.getPotentialEnergy()
#reporter.report(sim, state)
#log.write(" MM {:18s}= {:9.4f} {:s}\n\n".format("App Energy",
# (sim_ene.value_in_unit(simtk.unit.kilocalorie_per_mole)),
# str(simtk.unit.kilocalorie_per_mole)))
#log.flush()
#sim.minimizeEnergy()
#state = sim.context.getState(getEnergy = True, getPositions=True)
#sim_min_ene = state.getPotentialEnergy()
#reporter.report(sim, state)
#log.write(" MM {:18s}= {:9.4f} {:s}\n\n".format("App Min Energy",
# (sim_min_ene.value_in_unit(simtk.unit.kilocalorie_per_mole)),
# str(simtk.unit.kilocalorie_per_mole)))
#log.flush()
ene_sum = total_ene
#if('direct_vdw' not in mol_data['energy']['oFF']):
# mol_data['energy']['oFF']['direct_vdw'] = []
#mol_data['energy']['oFF']['direct_vdw'].append(calc_vdw_direct(xyz, labels['vdW']))
#print(mol_data['energy']['oFF']['direct_vdw'])
if('epot' not in mol_data['energy']['oFF']):
mol_data['energy']['oFF']['epot'] = []
mol_data["energy"]['oFF']['epot'].append(total_ene)
for component in ['vdW' ] + valence_params:
energy_sans_component = calc_mm_energy(forcefield, top, xyz, component=component, charge=gen_MM_charge)
energy_component = total_ene - energy_sans_component
if(component not in mol_data["energy"]['oFF']):
mol_data["energy"]['oFF'][component] = []
mol_data["energy"]['oFF'][component].append(energy_component)
ene_sum -= energy_component
log.write(" MM {:18s}= {:10.5f} {:s}\n".format(component,
(energy_component/energy_component.unit ),
str(energy_component.unit)))
log.flush()
if(gen_MM_charge):
if('Electrostatics' not in mol_data['energy']['oFF']):
mol_data['energy']['oFF']['Electrostatics'] = []
mol_data['energy']['oFF']['Electrostatics'].append(ene_sum)
log.write(" MM {:18s}= {:10.5f} {:s}\n\n".format("Electrostatics",
(ene_sum/ene_sum.unit ),
str(ene_sum.unit)))
#log.write(" MM {:18s}= {:10.5f} {:s}\n\n".format("Direct vdW",
# (mol_data['energy']['oFF']['direct_vdw'][qcmol_i] / mol_data['energy']['oFF']['direct_vdw'][qcmol_i].unit ),
# str(mol_data['energy']['oFF']['direct_vdw'][qcmol_i].unit)))
log.flush()
log.write("\n Conformation energy summary\n")
if(len(ene) > 1):
if(get_qm_energy):
ene_str = "{:13.8f} +- {:13.8f} a.u.".format(np.mean(ene), np.std(ene))
log.write(" QM {:18s}= {:s} \n".format("Energy", ene_str))
log.write(" MM {:18s}= {:10.5f} +- {:10.5f} {:s}\n\n".format(ene_name_str,
np.mean([i/i.unit for i in mol_data["energy"]['oFF']['epot']]),
np.std([i/i.unit for i in mol_data["energy"]['oFF']['epot']]),
str(mol_data["energy"]['oFF']['epot'][0].unit)))
components_to_print = ['vdW' ] + valence_params
if(gen_MM_charge):
components_to_print += ['Electrostatics']
for component in components_to_print:
log.write(" MM {:18s}= {:10.5f} +- {:10.5f} {:s}\n".format(component,
np.mean([i/i.unit for i in mol_data["energy"]['oFF'][component]]),
np.std([i/i.unit for i in mol_data["energy"]['oFF'][component]]),
str(mol_data["energy"]['oFF'][component][0].unit)))
#for x in labels.items():
# print(list(x[1]))
column_index = {}
hits = 0
if(stepTiming and get_mm_energy):
elapsed=str(timedelta(seconds=time.time() - elapsed))
log.write("\n EnergyTime= {:s}\n\n".format(elapsed))
elapsed=time.time()
for job in config:
parameters = job['measure']
query = job['query']
#log.write("==== Fragment {:s}\n".format(query))
db['frag_info'].append({ "query": query, "map": job['splits'], "measurements": parameters, "minimum": minimum })
# job[mol].append({'query': smiles,
# 'measure': parameters,
# 'splits': splits,
# 'matches': split_matches[mol] })
for complete_frag in job['matches']:
for split in job['splits']:
#for split in job['splits']:
match = [complete_frag[i] for i in split]
#print("Working on", match)
remapped = mapped_idx[match]
# print(mol_n, "\nMATCH", match)
#calculate the requested measurements
for measure_key in parameters:
measure_fn = measure_vtable[measure_key]
#if(measure_key == "Bonds" and measure_key in column_index):
# print("COL KEY BEFORE MOD:", column_index[measure_key])
if(not (measure_key in mol_data)):
mol_data[measure_key] = {}
mol_data[measure_key]["indices"] = {}
column_index[measure_key] = 0
elif(not empty):
column_index[measure_key] = max([x['column_idx'] for x in mol_data[measure_key]["indices"].values()]) + 1
#if(measure_key == "Bonds"):
# print("COL KEY AFTER MOD:", column_index[measure_key])
measure_db, label_db = build_measurement_db(atoms, match, measure_fn, \
oFF_label=True, labels=labels, measure_key=measure_key, \
improper_flag=("ImproperTorsions" in parameters ), empty=empty)
db["oFF"].update(label_db)
param_uniq = []
index_uniq = []
# add measurement to molecules data
if ("values" not in mol_data[measure_key]):
if(not empty):
mol_data[measure_key]["values"] = measure_db["values"]
index_uniq = measure_db['indices'].keys()
else:
for param in measure_db["indices"].keys():
#print("Considering", param)
n_atoms = len(param)
permutations = None
if(n_atoms == 2):
if(measure_key != "Bonds"):
continue
permutations = itertools.permutations(param)
elif(n_atoms == 3):
if(measure_key != "Angles"):
continue
permutations = [[x[0],param[1],x[1]] for x in itertools.permutations([param[0],param[2]])]
elif(n_atoms == 4):
if(measure_key == "ImproperTorsions"):
permutations = [[x[0],param[1],x[1],x[2]] for x in itertools.permutations([param[0]]+list(param[2:]))]
elif(measure_key == "ProperTorsions"):
# cross our fingers that dihedrals are sequential
permutations = [param, param[::-1]]
else:
continue
exists = False
for order_i in permutations:
candidate = tuple(order_i)
if(candidate in mol_data[measure_key]["indices"] ):
exists=True
break
if(not exists):
if(not empty):
param_uniq.append(measure_db["indices"][param]["column_idx"])
#print("Not present", param)
index_uniq.append(param)
#measure_db["indices"][param]["column_idx"] = len(param_uniq) - 1
#else:
#print("Already contained, not adding")
if(not empty):
#for uniq in param_uniq:
#print(mol_data[measure_key]["values"].shape, np.atleast_2d(measure_db["values"][:,param_uniq]).shape)
mol_data[measure_key]["values"] = np.hstack((mol_data[measure_key]["values"], np.atleast_2d(measure_db["values"][:,param_uniq])))
#print("index_uniq is", index_uniq)
add_data = 0
for index_key in index_uniq:
if(index_key[::-1] in mol_data[measure_key]["indices"]):
index_key = index_idx[::-1]
mol_data[measure_key]["indices"][index_key] = measure_db["indices"][index_key]
mol_data[measure_key]["indices"][index_key].setdefault("oFF", None)
if(not empty):
mol_data[measure_key]["indices"][index_key]["column_idx"] = column_index[measure_key]
column_index[measure_key] += 1
if(hessian is not None):
for (i,j) in itertools.product(*[index_key]*2):
if((i,j) not in mol_data["hessian"]["cartesian"]):
mol_data["hessian"]["cartesian"][(i,j)] = hessian[:,(i*3):(i*3+3),(j*3):(j*3+3)] #.reshape(-1,9)
prefix="frag"
if(oFF_label and index_key in measure_db["indices"]):
prefix=measure_db["indices"][index_key]['oFF']
frag_fname= os.path.join(out_dir,"mol_"+str(mol_n)+"."+prefix+"_"+hyphenate_int_tuple(index_key,1)+minstr+".hessian.nx9.dat") if save_hessian_frag else None
#dump_hessians(hessian, frag_fname, comment=str(remapped[data_key]), idx=remapped[data_key])
#print("idx_key", index_key, "mapped_idx", mapped_idx, "remapped", remapped, "match", match)
dump_hessians(hessian, frag_fname, comment=str(index_key), map=mapped_idx, idx=index_key )
#dump_hessians(hessian, frag_fname, comment=str(index_key), idx=index_key)
#dump_hessians(hessian, frag_fname, comment=str(remapped), idx=remapped)
##############################################
########################################################################
prefix="frag"
if(oFF_label):
prefix=measure_db["indices"][index_key]['oFF']
#frag_fname= "mol_"+str(mol_n)+"."+prefix+"_"+"{:d}".format(match[0])+("-{:d}"*(len(match)-1)).format(*(match[1:]))+minstr+".xyz"
frag_fname= os.path.join(out_dir,"mol_"+str(mol_n)+"."+prefix+"_"+hyphenate_int_tuple(index_key,1)+minstr+".xyz") if save_mol_frag else None
#dump_qcmol_to_xyz(mols, frag_fname, comment=str(remapped), idx=remapped)
dump_qcmol_to_xyz(mols, frag_fname, comment=str(index_key), idx=[mapped_idx[i] for i in index_key])
hessian_hits = []
if(oFF_label and (IC is not None)):
no_hess_to_label_match = set()
hits = 0
#print("starting IC labeling..")
if(coordsys not in mol_data["hessian"]):
mol_data["hessian"][coordsys] = {}
mol_data["hessian"][coordsys]['IC'] = IC
mol_data["hessian"][coordsys]['values'] = {}
prune_hessian = False
if(prune_hessian):
for measure_key in valence_params:
if(measure_key not in mol_data):
continue
for (idx_i,index_key_i) in enumerate(mol_data[measure_key]['indices'].keys()):
print("trying", index_key_i, mol_data[measure_key]['indices'][index_key_i]['oFF'], end=" ")
hess_key_i = tuple([x+1 for x in index_key_i])
exists, i = oFF_key_in_IC_Hessian(hess_key_i, IC.Internals)
if(exists):
hessian_hits.append(i)
hits+=1
print("HIT", i, "total", hits)
if(index_key_i not in mol_data["hessian"][coordsys]['values']):
mol_data["hessian"][coordsys]['values'][index_key_i] = {}
mol_data["hessian"][coordsys]['values'][index_key_i][index_key_i] = ic_hess[i, i]
for j,Internal in enumerate(IC.Internals):
hess_key_j = tuple(str(Internal).split()[1].split("-"))
index_key_j = tuple([int(x)-1 for x in hess_key_j])
mol_data["hessian"][coordsys]['values'][index_key_i][index_key_j] = ic_hess[i, j]
if(index_key_j not in mol_data["hessian"][coordsys]['values']):
mol_data["hessian"][coordsys]['values'][index_key_j] = {}
mol_data["hessian"][coordsys]['values'][index_key_j][index_key_j] = ic_hess[j, j]
mol_data["hessian"][coordsys]['values'][index_key_j][index_key_i] = ic_hess[j, i]
else:
#print("MISS", i)
no_hess_to_label_match.add(hess_key_i)
else:
for i,Internal in enumerate(IC.Internals):
hess_key_i = tuple(str(Internal).split()[1].split("-"))
index_key_i = tuple([int(x)-1 for x in hess_key_i])
hessian_hits.append(i)
if(index_key_i not in mol_data["hessian"][coordsys]['values']):
mol_data["hessian"][coordsys]['values'][index_key_i] = {}
for j,Internal in enumerate(IC.Internals):
hess_key_j = tuple(str(Internal).split()[1].split("-"))
index_key_j = tuple([int(x)-1 for x in hess_key_j])
mol_data["hessian"][coordsys]['values'][index_key_i][index_key_j] = ic_hess[i, j]
if(index_key_j not in mol_data["hessian"][coordsys]['values']):
mol_data["hessian"][coordsys]['values'][index_key_j] = {}
mol_data["hessian"][coordsys]['values'][index_key_j][index_key_j] = ic_hess[j, j]
mol_data["hessian"][coordsys]['values'][index_key_j][index_key_i] = ic_hess[j, i]
# print the parameter search results
if(oFF_label):
total_keys_with_labels = 0
total_keys_any_labels = 0
for measure_key in valence_params:
counts = {}
if(measure_key not in mol_data):
continue
all_params = [param['oFF'] for param in list(mol_data[measure_key]['indices'].values())]
for param in all_params:
if(param is None):
param = "None"
counts.setdefault(param, 0)
counts[param] += 1
if(param[1] != "0"):
total_keys_with_labels += 1
total_keys_any_labels += 1
found_labels = list(counts.keys())
label_sort_idx = argsort_labels(found_labels)
label_str = ("{:3d} {:3s} "*len(counts.keys())).format(*flatten_list([(counts[found_labels[i]],found_labels[i]) for i in label_sort_idx]))
log.write(" {:18s}= {:3d} | {:s}\n".format(measure_key,
sum([val for val in counts.values()]), label_str))
all_labels = 0
for x in valence_params:
if(x in labels):
all_labels += sum([1 for l in labels[x]])
log.write("\n oFF label coverage: {:8d}/{:8d}\n".format(total_keys_with_labels, all_labels))
if( total_keys_with_labels < all_labels):
if(verbose):
log.write(" oFF terms not mapped:\n")
for param in valence_params:
for term in labels[param]:
if(param in mol_data):
#print(term, term in mol_data[param]["indices"].keys() )
#print(term[::-1], term[::-1] in mol_data[param]["indices"].keys() )
if(term in mol_data[param]["indices"]):
continue
elif(term[::-1] in mol_data[param]["indices"]):
continue
term_str=hyphenate_int_tuple(term, 1)
log.write(" {:5s} {:s}\n".format(param, term_str))
if(IC is not None):
log.write("\n oFF map to IC Hessian: {:8d}/{:8d}\n".format(len(hessian_hits), len(IC.Internals)))
if(len(hessian_hits) < len(IC.Internals)):
#print("hessian_hits", sorted(hessian_hits))
hessian_misses = [x for x in range(len(IC.Internals)) if x not in hessian_hits]
#print("hessian_misses", hessian_misses)
#[print(str(internal)) for internal in IC.Internals]
log.write(" IC Hessian terms not mapped: {:d}\n".format(len(hessian_misses)))
if(verbose):
[log.write(" {:s}\n".format(str(IC.Internals[term]))) for term in hessian_misses]
log.write("\n")
# add this mol to the main db and save to disk
db["mol_data"][real_smiles] = mol_data
if(db_out is not None):
if(db_out.split('.')[-1] == "npz"):
np.savez(os.path.join(out_dir,db_out), data=np.arange(items), **db)
else:
with open(os.path.join(out_dir,db_out),'wb') as fid:
pickle.dump(db, fid)
elapsed=str(timedelta(seconds=time.time() - elapsed))
result_time = str(timedelta(seconds=time.time() - result_time))
if(stepTiming):
log.write(" QueryTime= {:s}\n".format(queryelapsed))
log.write(" AnalysisTime= {:s}\n".format(elapsed))
log.write(" ResultTime= {:s}\n\n".format(result_time))
else:
log.write("\n\n")
totaltime=str(timedelta(seconds=time.time() - totaltime))
if(totalTiming):
log.write("TotalTime= {:s}\n".format(totaltime))
return db
def calc_bond_length(atoms, idx):
"""calculates distance from first atom to remaining atoms"""
return np.linalg.norm(atoms[:,idx[1],:] - atoms[:,idx[0],:], axis=1)
def calc_angle_degree(atoms, idx):
"""calculates angle between origin and consecutive atom pairs"""
mags = np.linalg.norm(atoms[:,[idx[0],idx[2]],:] - atoms[:,idx[1],:][:,np.newaxis,:], axis=2)
atoms_trans = atoms - atoms[:,idx[1],:][:,np.newaxis,:]
unit = atoms_trans[:,[idx[0],idx[2]],:] / mags[:,:,np.newaxis]
costheta = (unit[:,0,:] * unit[:,1,:]).sum(axis=1)
np.clip(costheta, -1.0, 1.0, out=costheta)
return np.arccos(costheta)*180/np.pi
def calc_proper_torsion(atoms, idx):
"""calculates proper torsion of [i, j, k, l]"""
noncenter = [idx[0]]+idx[2:]
mags = np.linalg.norm(atoms[:,noncenter,:] - atoms[:,idx[1],:][:,np.newaxis,:], axis=2)
atoms_trans = atoms - atoms[:,idx[1],:][:,np.newaxis,:]
unit = atoms_trans[:,noncenter,:] / mags[:,:,np.newaxis]
#these are all Nx3
v0 = -unit[:,0,:]
v1 = unit[:,1,:]
v2 = unit[:,2,:]-unit[:,1,:]
w1 = np.cross(v0,v1)
w2 = np.cross(v1,v2)
w1_mag = np.linalg.norm(w1,axis=1)
w2_mag = np.linalg.norm(w2,axis=1)
mask = (w1_mag * w2_mag) > 0
# should be Nx1 costhetas
costheta = np.ones((atoms.shape[0]))
costheta[mask]= (w1[mask] * w2[mask]).sum(axis=1) / (w1_mag[mask]*w2_mag[mask])
np.clip(costheta, -1.0, 1.0, out=costheta)
theta = np.arccos(costheta)*180/np.pi
#distance = np.zeros((atoms.shape[0]))
#distance[mask] = ((w2[mask]*v0[mask]).sum(axis=1)/w2_mag[mask])
##theta[distance > 0] = 180 - theta[distance > 0]
theta[np.abs(theta) >= 180] %= 180.0
return theta
def calc_improper_torsion(atoms, idx, match_geometric=True):
"""calculates improper torsion of [i, center, j, k]"""
noncenter = [idx[0]]+idx[2:]
mags = np.linalg.norm(atoms[:,noncenter,:] - atoms[:,idx[1],:][:,np.newaxis,:], axis=2)
atoms_trans = atoms - atoms[:,idx[1],:][:,np.newaxis,:]
unit = atoms_trans[:,noncenter,:] / mags[:,:,np.newaxis]
#these are all Nx3
v0 = -unit[:,0,:]
v1 = unit[:,1,:]-unit[:,0,:]
v2 = unit[:,1,:]-unit[:,2,:]
w1 = np.cross(v0,v1)
w2 = np.cross(v1,v2)
w1_mag = np.linalg.norm(w1,axis=1)
w2_mag =
|
np.linalg.norm(w2,axis=1)
|
numpy.linalg.norm
|
"""
Path Planning Sample Code with Closed loop RRT for car like robot.
author: AtsushiSakai(@Atsushi_twi)
"""
import random
import math
import copy
import numpy as np
import pure_pursuit
import matplotlib.pyplot as plt
import sys
sys.path.append("../ReedsSheppPath/")
try:
import reeds_shepp_path_planning
import unicycle_model
except:
raise
show_animation = True
target_speed = 10.0 / 3.6
STEP_SIZE = 0.1
class RRT():
"""
Class for RRT Planning
"""
def __init__(self, start, goal, obstacleList, randArea,
maxIter=200):
"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Ramdom Samping Area [min,max]
"""
self.start = Node(start[0], start[1], start[2])
self.end = Node(goal[0], goal[1], goal[2])
self.minrand = randArea[0]
self.maxrand = randArea[1]
self.obstacleList = obstacleList
self.maxIter = maxIter
def try_goal_path(self):
goal = Node(self.end.x, self.end.y, self.end.yaw)
newNode = self.steer(goal, len(self.nodeList) - 1)
if newNode is None:
return
if self.CollisionCheck(newNode, self.obstacleList):
# print("goal path is OK")
self.nodeList.append(newNode)
def Planning(self, animation=True):
"""
Pathplanning
animation: flag for animation on or off
"""
self.nodeList = [self.start]
self.try_goal_path()
for i in range(self.maxIter):
rnd = self.get_random_point()
nind = self.GetNearestListIndex(self.nodeList, rnd)
newNode = self.steer(rnd, nind)
# print(newNode.cost)
if newNode is None:
continue
if self.CollisionCheck(newNode, self.obstacleList):
nearinds = self.find_near_nodes(newNode)
newNode = self.choose_parent(newNode, nearinds)
if newNode is None:
continue
self.nodeList.append(newNode)
self.rewire(newNode, nearinds)
self.try_goal_path()
if animation and i % 5 == 0:
self.DrawGraph(rnd=rnd)
# generate coruse
path_indexs = self.get_best_last_indexs()
flag, x, y, yaw, v, t, a, d = self.search_best_feasible_path(
path_indexs)
return flag, x, y, yaw, v, t, a, d
def search_best_feasible_path(self, path_indexs):
print("Start search feasible path")
best_time = float("inf")
fx = None
# pure pursuit tracking
for ind in path_indexs:
path = self.gen_final_course(ind)
flag, x, y, yaw, v, t, a, d = self.check_tracking_path_is_feasible(
path)
if flag and best_time >= t[-1]:
print("feasible path is found")
best_time = t[-1]
fx, fy, fyaw, fv, ft, fa, fd = x, y, yaw, v, t, a, d
print("best time is")
print(best_time)
if fx:
fx.append(self.end.x)
fy.append(self.end.y)
fyaw.append(self.end.yaw)
return True, fx, fy, fyaw, fv, ft, fa, fd
return False, None, None, None, None, None, None, None
def calc_tracking_path(self, path):
path = np.array(path[::-1])
ds = 0.2
for i in range(10):
lx = path[-1, 0]
ly = path[-1, 1]
lyaw = path[-1, 2]
move_yaw = math.atan2(path[-2, 1] - ly, path[-2, 0] - lx)
if abs(lyaw - move_yaw) >= math.pi / 2.0:
print("back")
ds *= -1
lstate = np.array(
[lx + ds * math.cos(lyaw), ly + ds * math.sin(lyaw), lyaw])
# print(lstate)
path =
|
np.vstack((path, lstate))
|
numpy.vstack
|
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from scipy import signal
import math
from matplotlib import rc
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=False)
rc('mathtext', fontset='cm')
##########################
#### ESTE ES EL USADO ####
##########################
# auxiliar function for plot ticks of equal length in x and y axis despite its scales.
def convert_display_to_data_coordinates(transData, length=10):
# create a transform which will take from display to data coordinates
inv = transData.inverted()
# transform from display coordinates to data coordinates in x axis
data_coords = inv.transform([(0, 0), (length, 0)])
# get the length of the segment in data units
yticks_len = data_coords[1, 0] - data_coords[0, 0]
# transform from display coordinates to data coordinates in y axis
data_coords = inv.transform([(0, 0), (0, length)])
# get the length of the segment in data units
xticks_len = data_coords[1, 1] - data_coords[0, 1]
return xticks_len, yticks_len
def sample_crosscorrelation(u, x, p):
n = len(x)
r = np.zeros(p+1)
for i in np.arange(p+1):
r[i] = np.mean(u[:n-i] * x[i:])
return r
#### Parametros del problema ####
# Orden del filtro FIR a identificar
p = 11
# Largo de la señal de entrada
N = 1000
# Número de experimentos para calcular el error cuadrático medio de la estimacion
nexp = 10000
# Potencia de la señal de entrada
pot_u1 = 1
# Filtro a estimar (FIR con h[n] triangular)
h = signal.triang(p)
# Potencia de las señales de salida del filtro
pot_x1 = pot_u1 * np.sum(np.square(h))
# Potencia del ruido aditivo
pot_w = pot_x1 # para snr = 0 dB
# pot_w = 1
# Coeficientes del filtro IIR de orden dos para generar el ruido coloreado.
b = [1]
a = [1, -0.8, 0.64] # polos de magnitud 0.8 y fase pi/3
#a = [1, -0.9, 0.81] # polos de magnitud 0.9 y fase pi/3
# Potencia del ruido coloreado (proceso AR(2))
pot_u2 = (1 + a[2]) / ((1 - a[2]) * ((1 + a[2])**2 - a[1]**2))
# Error cuadratico medio de la estimacion de cada coeficiente
mse_h1 = np.zeros(p)
mse_h2 = np.zeros(p)
r_uu2_accum = np.zeros(p)
#np.random.seed(42)
np.random.seed(46)
for i in np.arange(nexp):
###### Entradas
### 1 - WGN.
u1 = math.sqrt(pot_u1) * np.random.randn(N)
### 2 - Ruido coloreado, obtenido mediante el filtrado de ruido blanco con un filtro IIR de orden 2 (Proceso
### autorregresivo de orden 2, AR(2))
# Ruido blanco filtrado
u2 = signal.lfilter(b, a, np.random.randn(N))
# normalización a potencia pot_u1
u2 = math.sqrt(pot_u1 / pot_u2) * u2
###### Salida del sistema a identificar
x1 = signal.lfilter(h, 1, u1)
x2 = signal.lfilter(h, 1, u2)
###### Salidas observadas contaminadas con ruido blanco, SNR = 0 dB
# Ruido aditivo
w = math.sqrt(pot_w) * np.random.randn(N)
# Salidas observadas
x1_obs = x1 + w
x2_obs = x2 + w
###### Cálculo de los estimadores MVU de los coeficientes del sistema desconocido
###### Autocorrelacion de la entrada y correlación cruzada entre la entrada y la salida en cada caso
### Entrada u1[n]
### Autocorrelacion
r_uu1 = sample_crosscorrelation(u1, u1, p-1)
# Correlacion cruzada entre u1[n] y x[n]
r_u1x = sample_crosscorrelation(u1, x1_obs, p-1)
# Matriz de autocorrelacion
Ru1 = linalg.toeplitz(r_uu1)
# Estimador de los coeficientes
h1_est = linalg.solve(Ru1, r_u1x)
### Entrada u2[n]
### Autocorrelacion
r_uu2 = sample_crosscorrelation(u2, u2, p-1)
# Correlacion cruzada entre u1[n] y x[n]
r_u2x = sample_crosscorrelation(u2, x2_obs, p-1)
# Matriz de autocorrelacion
Ru2 = linalg.toeplitz(r_uu2)
# Estimador de los coeficientes
h2_est = linalg.solve(Ru2, r_u2x)
# Calculo del error cuadrático medio de la estimacion
mse_h1 = mse_h1 + np.square(h1_est - h)
mse_h2 = mse_h2 + np.square(h2_est - h)
# Calculo de la autocorrelacion de u2 como promedio en todos los experimentos. No se usa, es solo para verificacion.
r_uu2_accum = r_uu2_accum + r_uu2
mse_h1 = mse_h1 / nexp
mse_h2 = mse_h2 / nexp
r_uu2_accum = r_uu2_accum / nexp
###### Cálculo analítico de la psd y la autocorrelación del ruido coloreado
# numero de muestras de la fft
nfft = 1024
# Respuesta en frecuencia del filtro generador del ruido coloreado
w, U2 = signal.freqz(b, a, worN=nfft, whole=True)
# PSD del ruido coloreado
psd_u2 = np.square(np.absolute(U2))
# Autocorrelacion del ruido coloreado
r_uu2_analitic = np.real(np.fft.ifft(psd_u2))
pot_u2_alt = r_uu2_analitic[0]
r_uu2_analitic = r_uu2_analitic / pot_u2_alt
# Normalizacion a potencia unitaria
psd_u2 = psd_u2[:nfft//2] / pot_u2_alt
w = w[:nfft//2]
## Gráficas
# comparacion de la autocorrelación analitica y muestral (para verificación)
plt.figure(0)
plt.plot(np.arange(p), r_uu2_analitic[:p], 'sk')
plt.plot(np.arange(p), r_uu2_accum[:p], 'sr')
plt.title("Autocorrelación muestral y autocorrelación analítica")
plt.xlabel("Retardo (muestras)")
plt.ylabel("Amplitud")
plt.legend(["analítica", "muestral"])
### Respuesta la impulso del filtro a identificar
nmin = 0
nmax = p-1
ymin = 0
ymax = np.amax(h)
delta_n = 1
nmin_ax = nmin - delta_n
nmax_ax = nmax + 2 * delta_n
delta_y = 0.3
ymax_ax = ymax + delta_y
ymin_ax = ymin - delta_y
n = np.arange(nmin, nmax + 1)
baseline = -0.25
fontsize1 = 12
fontsize2 = 14
yt_sep = 0.12
fig = plt.figure(1, figsize=(6, 2), frameon=False)
plt.xlim(nmin_ax, nmax_ax)
plt.ylim(ymin_ax, ymax_ax)
plt.annotate("", xytext=(nmin_ax, 0), xycoords='data', xy=(nmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
(markers, stemlines, bl) = plt.stem(n, h, linefmt='k', markerfmt='sk', use_line_collection=True)
plt.setp(markers, markersize=4.5)
plt.setp(bl, visible=False)
plt.text(nmax_ax, baseline, '$k$', fontsize=fontsize2, ha='right', va='baseline')
for i in np.arange(1, p):
plt.text(i, baseline, '${}$'.format(i), fontsize=fontsize1, ha='center', va='baseline')
i = 0
plt.text(i-yt_sep, baseline, '${}$'.format(i), fontsize=fontsize1, ha='right', va='baseline')
plt.text(0.3, ymax_ax, '$h[k]$', fontsize=fontsize2, ha='left', va='center')
plt.plot([0, 0.15], [1, 1], 'k')
plt.text(-yt_sep, 1, '$1$', fontsize=fontsize1, ha='right', va='center')
plt.axis('off')
plt.savefig('system_identification_impulse_response.pdf', bbox_inches='tight')
# length of the ticks for all plots (6 pixels)
display_length = 6 # in pixels
### PSD y autocorrelación de la entrada u2[n] (ruido coloreado)
fig = plt.figure(2, figsize=(6, 5), frameon=False)
# PSD de u2[n]
xmin = 0
xmax = math.pi
ymin = 0
ymax = np.amax(psd_u2)
delta_x = 0.35
xmin_ax = xmin - 0.1
xmax_ax = xmax + delta_x
delta_y = 1.2
ymin_ax = ymin - delta_y
ymax_ax = ymax + delta_y
baseline = -1
ylm = -0.07
ax = plt.subplot2grid((4, 1), (0, 0), rowspan=2, colspan=1)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# horizontal and vertical ticks length
xtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length)
# axis arrows
plt.annotate("", xytext=(xmin, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.plot(w, psd_u2, color='k', linewidth=2)
plt.text(xmax_ax, baseline, '$\omega\,\mathrm{(rad)}$', fontsize=fontsize2, ha='center', va='baseline')
# xticks
plt.plot([math.pi/3, math.pi/3], [0, xtl], 'k')
plt.plot([math.pi/2, math.pi/2], [0, xtl], 'k')
plt.plot([math.pi, math.pi], [0, xtl], 'k')
plt.plot([math.pi/3, math.pi/3], [0, ymax], 'k--', lw=1)
# xticks labels
plt.text(math.pi/3, baseline, '$\dfrac{\pi}{3}$', fontsize=fontsize2, ha='center', va='baseline')
plt.text(math.pi/2, baseline, '$\dfrac{\pi}{2}$', fontsize=fontsize2, ha='center', va='baseline')
plt.text(math.pi, baseline, '$\pi$', fontsize=fontsize2, ha='center', va='baseline')
plt.text(-ylm/2, baseline, '$0$', fontsize=fontsize2, ha='left', va='baseline')
# yticks and labels
for i in np.arange(0, ymax, 4):
plt.plot([0, ytl], [i, i], 'k')
plt.text(ylm, i, '${:.0f}$'.format(i), fontsize=fontsize2, ha='right', va='center')
# y axis label
plt.text(-ylm, ymax_ax, '$S_{u_2u_2}(\omega)}$', fontsize=fontsize2, ha='left', va='center')
plt.axis('off')
# Autocorrelación de u2[n]
# se usa la misma escala que la PSD para homogeneidad de las gráficas.
ymax = ymax / 2
ymax_ax = (ymax_ax - ymin_ax) / 2
ymin_ax = -ymax_ax
baseline = -1.1
ax = plt.subplot2grid((4, 1), (2, 0), rowspan=2, colspan=1)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
plt.annotate("", xytext=(xmin, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
# numero de muestras para las las gráficas
M = 31
n = np.linspace(0, xmax, M)
# factor de normalizacion de la autocorrelacion
fnorm = ymax / np.amax(np.absolute(r_uu2_analitic))
(markers, stemlines, bl) = plt.stem(n, r_uu2_analitic[:M] * fnorm, linefmt='k', markerfmt='sk',
use_line_collection=True)
plt.setp(markers, markersize=4.5)
plt.setp(bl, visible=False)
plt.text(xmax_ax, baseline, '$k$', fontsize=fontsize2, ha='right', va='baseline')
# plt.plot([math.pi, math.pi], [0, xtl], 'k')
# xtick and xticks labels
i = [10, 20, 30]
for ii in i:
plt.text(ii * xmax / (M - 1), baseline, '${:.0f}$'.format(ii), fontsize=fontsize2, ha='center', va='baseline')
tt = ii * math.pi / 30
plt.plot([tt, tt], [0, xtl], 'k')
# yticks and labels
for i in np.arange(-1, 2):
plt.plot([0, ytl], [i * fnorm, i * fnorm], 'k')
plt.text(ylm, i * fnorm, '${:.0f}$'.format(i), fontsize=fontsize2, ha='right', va='center')
# y axis label
plt.text(-ylm, ymax_ax, '$r_{u_2u_2}[k]$', fontsize=fontsize2, ha='left', va='center')
plt.axis('off')
plt.savefig('system_identification_u2_input_v2.pdf', bbox_inches='tight')
### Realización de las entradas u1[n] y u2[v]
fig = plt.figure(3, figsize=(9, 5), frameon=False)
# u1[n]
# número de muestras para las las gráficas
M = 101
xmin = 0
xmax = M-1
ymin = -3
ymax = 3
delta_x = 7
xmin_ax = xmin - 2
xmax_ax = xmax + delta_x
delta_y = 0
ymin_ax = ymin - delta_y
ymax_ax = ymax + delta_y
baseline = -0.7
ylm = -1.5
ax = plt.subplot2grid((4, 1), (0, 0), rowspan=2, colspan=1)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# horizontal and vertical ticks length
xtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length)
# axis arrows
plt.annotate("", xytext=(xmin, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
(markers, stemlines, bl) = plt.stem(np.arange(M), u1[:M], linefmt='k', markerfmt='sk', use_line_collection=True)
plt.setp(markers, markersize=3.5)
plt.setp(stemlines, linewidth=1)
plt.setp(bl, visible=False)
for i in np.arange(-2, 3):
plt.plot([0, ytl], [i, i], 'k')
plt.text(ylm, i, '${:.0f}$'.format(i), fontsize=fontsize1, ha='right', va='center')
plt.text(xmax_ax, baseline, '$n$', fontsize=fontsize2, ha='right', va='baseline')
plt.text(-ylm, ymax_ax, '$u_1[n]$', fontsize=fontsize2, ha='left', va='center')
plt.axis('off')
ax = plt.subplot2grid((4, 1), (2, 0), rowspan=2, colspan=1)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# horizontal and vertical ticks length
xtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length)
# axis arrows
plt.annotate("", xytext=(xmin, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
(markers, stemlines, bl) = plt.stem(np.arange(M), u2[:M], linefmt='k', markerfmt='sk', use_line_collection=True)
plt.setp(markers, markersize=3.5)
plt.setp(stemlines, linewidth=1)
plt.setp(bl, visible=False)
for i in np.arange(-2, 3):
plt.plot([0, ytl], [i, i], 'k')
plt.text(ylm, i, '${:.0f}$'.format(i), fontsize=fontsize1, ha='right', va='center')
plt.text(xmax_ax, baseline, '$n$', fontsize=fontsize2, ha='right', va='baseline')
plt.text(-ylm, ymax_ax, '$u_2[n]$', fontsize=fontsize2, ha='left', va='center')
plt.axis('off')
plt.savefig('system_identification_inputs_v2.pdf', bbox_inches='tight')
### Realización de las salidas x1[n] y x2[v]
fig = plt.figure(4, figsize=(9, 5), frameon=False)
ymin = -8
ymax = 8
delta_y = 0
ymin_ax = ymin - delta_y
ymax_ax = ymax + delta_y
baseline = -0.7 * 8 / 3
ylm = -1.5
ax = plt.subplot2grid((4, 1), (0, 0), rowspan=2, colspan=1)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# horizontal and vertical ticks length
xtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length)
# axis arrows
plt.annotate("", xytext=(xmin, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
(markers, stemlines, bl) = plt.stem(np.arange(M), x1[:M], linefmt='k', markerfmt='sk', use_line_collection=True)
plt.setp(markers, markersize=3.5)
plt.setp(stemlines, linewidth=1)
plt.setp(bl, visible=False)
(markers, stemlines, bl) = plt.stem(
|
np.arange(M)
|
numpy.arange
|
import networkx as nx
import numpy as np
import scipy.sparse.linalg as lg
from gem.embedding.static_graph_embedding import StaticGraphEmbedding
class LaplacianEigenmaps(StaticGraphEmbedding):
hyper_params = {
'method_name': 'lap_eigmap_svd'
}
def __init__(self, *args, **kwargs):
""" Initialize the LaplacianEigenmaps class
Args:
d: dimension of the embedding
"""
super(LaplacianEigenmaps, self).__init__(*args, **kwargs)
def learn_embedding(self, graph=None,
is_weighted=False, no_python=False):
if not graph:
raise ValueError('graph needed')
graph = graph.to_undirected()
l_sym = nx.normalized_laplacian_matrix(graph)
w, v = lg.eigs(l_sym, k=self._d + 1, which='SM')
idx = np.argsort(w) # sort eigenvalues
w = w[idx]
v = v[:, idx]
self._X = v[:, 1:]
p_d_p_t = np.dot(v, np.dot(np.diag(w), v.T))
eig_err = np.linalg.norm(p_d_p_t - l_sym)
print('Laplacian matrix recon. error (low rank): %f' % eig_err)
return self._X.real
def get_edge_weight(self, i, j):
return np.exp(
-np.power(
|
np.linalg.norm(self._X[i, :] - self._X[j, :])
|
numpy.linalg.norm
|
import numpy as np
import scipy
from scipy.interpolate import BSpline
def main():
#test use
rmin = 0
rmax = 4
nbins = 5
order = 2
bases = get_bases(rmin, rmax, nbins, order)
def write_bases(rmin, rmax, nbins, saveto, ncont=1000, **kwargs):
bases = get_bases(rmin, rmax, nbins, ncont=ncont, **kwargs)
np.savetxt(saveto, bases.T)
return saveto
# get knot vectors
def get_kvs(rmin, rmax, nbins, order):
nknots = order+2
kvs = np.empty((nbins, nknots))
width = (rmax-rmin)/(nbins-order)
for i in range(order):
val = i+1
kvs[i,:] = np.concatenate((np.full(nknots-val, rmin), np.linspace(rmin+width, rmin+width*val, val)))
kvs[nbins-i-1] = np.concatenate((np.linspace(rmax-width*val, rmax-width, val), np.full(nknots-val, rmax)))
for j in range(nbins-2*order):
idx = j+order
kvs[idx] = rmin+width*j + np.arange(0,nknots)*width
return kvs
def get_bases(rmin, rmax, nbins, order=None, ncont=1000):
if not order:
print("No order given, defaulting to 1 (linear)")
if nbins<order*2:
# does it have to be 2*order + 1? seems fine for piecewise, but for higher orders?
raise ValueError("nbins must be at least twice the order")
kvs = get_kvs(rmin, rmax, nbins, order)
rcont =
|
np.linspace(rmin, rmax, ncont)
|
numpy.linspace
|
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import numpy as np
import os
import mdtraj
from itertools import combinations, product
# from pyemma.coordinates.data import featurizer as ft
from pyemma.coordinates.data.featurizer import MDFeaturizer, CustomFeature, _parse_pairwise_input
# from pyemma.coordinates.tests.test_discretizer import create_water_topology_on_disc
path = os.path.join(os.path.split(__file__)[0], 'data')
xtcfile = os.path.join(path, 'bpti_mini.xtc')
pdbfile = os.path.join(path, 'bpti_ca.pdb')
asn_leu_pdb = """
ATOM 559 N ASN A 69 19.168 -0.936 -10.274 1.00 27.50 N
ATOM 560 CA ASN A 69 20.356 -0.049 -10.419 1.00 25.52 C
ATOM 561 C ASN A 69 21.572 -0.418 -9.653 1.00 24.26 C
ATOM 562 O ASN A 69 22.687 -0.336 -10.171 1.00 24.33 O
ATOM 563 CB ASN A 69 19.965 1.410 -10.149 1.00 26.49 C
ATOM 564 CG ASN A 69 18.932 1.881 -11.124 1.00 26.35 C
ATOM 565 OD1 ASN A 69 18.835 1.322 -12.224 1.00 26.77 O
ATOM 566 ND2 ASN A 69 18.131 2.864 -10.745 1.00 24.85 N
ATOM 567 N LEU A 70 21.419 -0.824 -8.404 1.00 23.02 N
ATOM 568 CA LEU A 70 22.592 -1.275 -7.656 1.00 23.37 C
ATOM 569 C LEU A 70 23.391 -2.325 -8.448 1.00 25.78 C
ATOM 570 O LEU A 70 24.647 -2.315 -8.430 1.00 25.47 O
ATOM 571 CB LEU A 70 22.202 -1.897 -6.306 1.00 22.17 C
ATOM 572 CG LEU A 70 23.335 -2.560 -5.519 1.00 22.49 C
ATOM 573 CD1 LEU A 70 24.578 -1.665 -5.335 1.00 22.56 C
ATOM 574 CD2 LEU A 70 22.853 -3.108 -4.147 1.00 24.47 C
""" *2 ### asn-leu-asn-leu
def verbose_assertion_minrmsd(ref_Y, test_Y, test_obj):
for jj in np.arange(test_Y.shape[1]):
ii = np.argmax(np.abs(ref_Y-test_Y[:,jj]))
assert np.allclose(ref_Y, test_Y[:,jj], atol=test_obj.atol), \
'Largest discrepancy between reference (ref_frame %u)' \
' and test: %8.2e, for the pair %f, %f at frame %u'%\
(test_obj.ref_frame,
(ref_Y-test_Y[:,jj])[ii],
ref_Y[ii], test_Y[ii,jj], ii)
class TestFeaturizer(unittest.TestCase):
@classmethod
def setUpClass(cls):
import tempfile
cls.asn_leu_pdbfile = tempfile.mkstemp(suffix=".pdb")[1]
with open(cls.asn_leu_pdbfile, 'w') as fh:
fh.write(asn_leu_pdb)
cls.asn_leu_traj = tempfile.mktemp(suffix='.xtc')
# create traj for asn_leu
n_frames = 4001
traj = mdtraj.load(cls.asn_leu_pdbfile)
ref = traj.xyz
new_xyz = np.empty((n_frames, ref.shape[1], 3))
noise = np.random.random(new_xyz.shape)
new_xyz[:, :,: ] = noise + ref
traj.xyz=new_xyz
traj.time=np.arange(n_frames)
traj.save(cls.asn_leu_traj)
super(TestFeaturizer, cls).setUpClass()
@classmethod
def tearDownClass(cls):
try:
os.unlink(cls.asn_leu_pdbfile)
except EnvironmentError:
pass
super(TestFeaturizer, cls).tearDownClass()
def setUp(self):
self.pdbfile = pdbfile
self.traj = mdtraj.load(xtcfile, top=self.pdbfile)
self.feat = MDFeaturizer(self.pdbfile)
self.atol = 1e-5
self.ref_frame = 0
self.atom_indices = np.arange(0, self.traj.n_atoms/2)
def test_select_backbone(self):
inds = self.feat.select_Backbone()
def test_select_all(self):
self.feat.add_all()
assert (self.feat.dimension() == self.traj.n_atoms * 3)
refmap = np.reshape(self.traj.xyz, (len(self.traj), self.traj.n_atoms * 3))
assert (np.all(refmap == self.feat.map(self.traj)))
def test_select(self):
sel = np.array([1, 2, 5, 20], dtype=int)
self.feat.add_selection(sel)
assert (self.feat.dimension() == sel.shape[0] * 3)
refmap = np.reshape(self.traj.xyz[:, sel, :], (len(self.traj), sel.shape[0] * 3))
assert (np.all(refmap == self.feat.map(self.traj)))
def test_distances(self):
sel = np.array([1, 2, 5, 20], dtype=int)
pairs_expected = np.array([[1, 5], [1, 20], [2, 5], [2, 20], [5, 20]])
pairs = self.feat.pairs(sel, excluded_neighbors=2)
assert(pairs.shape == pairs_expected.shape)
assert(np.all(pairs == pairs_expected))
self.feat.add_distances(pairs, periodic=False) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs_expected.shape[0])
X = self.traj.xyz[:, pairs_expected[:, 0], :]
Y = self.traj.xyz[:, pairs_expected[:, 1], :]
D = np.sqrt(np.sum((X - Y) ** 2, axis=2))
assert(np.allclose(D, self.feat.map(self.traj)))
def test_inverse_distances(self):
sel = np.array([1, 2, 5, 20], dtype=int)
pairs_expected = np.array([[1, 5], [1, 20], [2, 5], [2, 20], [5, 20]])
pairs = self.feat.pairs(sel, excluded_neighbors=2)
assert(pairs.shape == pairs_expected.shape)
assert(np.all(pairs == pairs_expected))
self.feat.add_inverse_distances(pairs, periodic=False) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs_expected.shape[0])
X = self.traj.xyz[:, pairs_expected[:, 0], :]
Y = self.traj.xyz[:, pairs_expected[:, 1], :]
Dinv = 1.0/np.sqrt(np.sum((X - Y) ** 2, axis=2))
assert(np.allclose(Dinv, self.feat.map(self.traj)))
def test_ca_distances(self):
sel = self.feat.select_Ca()
assert(np.all(sel == range(self.traj.n_atoms))) # should be all for this Ca-traj
pairs = self.feat.pairs(sel, excluded_neighbors=0)
self.feat.add_distances_ca(periodic=False) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs.shape[0])
X = self.traj.xyz[:, pairs[:, 0], :]
Y = self.traj.xyz[:, pairs[:, 1], :]
D = np.sqrt(np.sum((X - Y) ** 2, axis=2))
assert(np.allclose(D, self.feat.map(self.traj)))
def test_contacts(self):
sel = np.array([1, 2, 5, 20], dtype=int)
pairs_expected = np.array([[1, 5], [1, 20], [2, 5], [2, 20], [5, 20]])
pairs = self.feat.pairs(sel, excluded_neighbors=2)
assert(pairs.shape == pairs_expected.shape)
assert(np.all(pairs == pairs_expected))
self.feat.add_contacts(pairs, threshold=0.5, periodic=False) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs_expected.shape[0])
X = self.traj.xyz[:, pairs_expected[:, 0], :]
Y = self.traj.xyz[:, pairs_expected[:, 1], :]
D = np.sqrt(np.sum((X - Y) ** 2, axis=2))
C = np.zeros(D.shape)
I = np.argwhere(D <= 0.5)
C[I[:, 0], I[:, 1]] = 1.0
assert(np.allclose(C, self.feat.map(self.traj)))
def test_angles(self):
sel = np.array([[1, 2, 5],
[1, 3, 8],
[2, 9, 10]], dtype=int)
self.feat.add_angles(sel)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
self.assertEqual(len(self.feat.describe()), self.feat.dimension())
def test_angles_deg(self):
sel = np.array([[1, 2, 5],
[1, 3, 8],
[2, 9, 10]], dtype=int)
self.feat.add_angles(sel, deg=True)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -180.0))
assert(np.alltrue(Y <= 180.0))
def test_angles_cossin(self):
sel = np.array([[1, 2, 5],
[1, 3, 8],
[2, 9, 10]], dtype=int)
self.feat.add_angles(sel, cossin=True)
assert(self.feat.dimension() == 2 * sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_dihedrals(self):
sel = np.array([[1, 2, 5, 6],
[1, 3, 8, 9],
[2, 9, 10, 12]], dtype=int)
self.feat.add_dihedrals(sel)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
self.assertEqual(len(self.feat.describe()), self.feat.dimension())
def test_dihedrals_deg(self):
sel = np.array([[1, 2, 5, 6],
[1, 3, 8, 9],
[2, 9, 10, 12]], dtype=int)
self.feat.add_dihedrals(sel, deg=True)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -180.0))
assert(np.alltrue(Y <= 180.0))
self.assertEqual(len(self.feat.describe()), self.feat.dimension())
def test_dihedrials_cossin(self):
sel = np.array([[1, 2, 5, 6],
[1, 3, 8, 9],
[2, 9, 10, 12]], dtype=int)
self.feat.add_dihedrals(sel, cossin=True)
assert(self.feat.dimension() == 2 * sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrals(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_backbone_torsions()
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.map(traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrals_deg(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_backbone_torsions(deg=True)
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.map(traj)
assert(np.alltrue(Y >= -180.0))
assert(np.alltrue(Y <= 180.0))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrals_cossin(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_backbone_torsions(cossin=True)
traj = mdtraj.load(self.asn_leu_traj, top=self.asn_leu_pdbfile)
Y = self.feat.map(traj)
self.assertEqual(Y.shape, (len(traj), 3*4)) # (3 phi + 3 psi)*2 [cos, sin]
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
assert "COS" in desc[0]
assert "SIN" in desc[1]
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrials_chi(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_chi1_torsions()
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.map(traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrials_chi_cossin(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_chi1_torsions(cossin=True)
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.map(traj)
assert(np.alltrue(Y >= -np.pi))
assert(
|
np.alltrue(Y <= np.pi)
|
numpy.alltrue
|
# -*- coding: UTF-8 -*-
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
from ..meas_info import create_info
from ...transforms import rotation3d_align_z_axis
from ...channels import make_dig_montage
from ..constants import FIFF
from ...utils import warn, _check_pandas_installed
from ..pick import pick_info
_supported_megs = ['neuromag306']
_unit_dict = {'m': 1,
'cm': 1e-2,
'mm': 1e-3,
'V': 1,
'mV': 1e-3,
'uV': 1e-6,
'T': 1,
'T/m': 1,
'T/cm': 1e2}
NOINFO_WARNING = 'Importing FieldTrip data without an info dict from the ' \
'original file. Channel locations, orientations and types ' \
'will be incorrect. The imported data cannot be used for ' \
'source analysis, channel interpolation etc.'
def _validate_ft_struct(ft_struct):
"""Run validation checks on the ft_structure."""
if isinstance(ft_struct, list):
raise RuntimeError('Loading of data in cell arrays is not supported')
def _create_info(ft_struct, raw_info):
"""Create MNE info structure from a FieldTrip structure."""
if raw_info is None:
warn(NOINFO_WARNING)
sfreq = _set_sfreq(ft_struct)
ch_names = ft_struct['label']
if raw_info:
info = raw_info.copy()
missing_channels = set(ch_names) - set(info['ch_names'])
if missing_channels:
warn('The following channels are present in the FieldTrip data '
'but cannot be found in the provided info: %s.\n'
'These channels will be removed from the resulting data!'
% (str(missing_channels), ))
missing_chan_idx = [ch_names.index(ch) for ch in missing_channels]
new_chs = [ch for ch in ch_names if ch not in missing_channels]
ch_names = new_chs
ft_struct['label'] = ch_names
if 'trial' in ft_struct:
if ft_struct['trial'].ndim == 2:
ft_struct['trial'] = np.delete(ft_struct['trial'],
missing_chan_idx,
axis=0)
if 'avg' in ft_struct:
if ft_struct['avg'].ndim == 2:
ft_struct['avg'] = np.delete(ft_struct['avg'],
missing_chan_idx,
axis=0)
info['sfreq'] = sfreq
ch_idx = [info['ch_names'].index(ch) for ch in ch_names]
pick_info(info, ch_idx, copy=False)
else:
montage = _create_montage(ft_struct)
info = create_info(ch_names, sfreq)
info.set_montage(montage)
chs = _create_info_chs(ft_struct)
info['chs'] = chs
info._update_redundant()
return info
def _create_info_chs(ft_struct):
"""Create the chs info field from the FieldTrip structure."""
all_channels = ft_struct['label']
ch_defaults = dict(coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
cal=1.0,
range=1.0,
unit_mul=FIFF.FIFF_UNITM_NONE,
loc=np.array([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1]),
unit=FIFF.FIFF_UNIT_V)
try:
elec = ft_struct['elec']
except KeyError:
elec = None
try:
grad = ft_struct['grad']
except KeyError:
grad = None
if elec is None and grad is None:
warn('The supplied FieldTrip structure does not have an elec or grad '
'field. No channel locations will extracted and the kind of '
'channel might be inaccurate.')
chs = list()
for idx_chan, cur_channel_label in enumerate(all_channels):
cur_ch = ch_defaults.copy()
cur_ch['ch_name'] = cur_channel_label
cur_ch['logno'] = idx_chan + 1
cur_ch['scanno'] = idx_chan + 1
if elec and cur_channel_label in elec['label']:
cur_ch = _process_channel_eeg(cur_ch, elec)
elif grad and cur_channel_label in grad['label']:
cur_ch = _process_channel_meg(cur_ch, grad)
else:
if cur_channel_label.startswith('EOG'):
cur_ch['kind'] = FIFF.FIFFV_EOG_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG
elif cur_channel_label.startswith('ECG'):
cur_ch['kind'] = FIFF.FIFFV_ECG_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR
elif cur_channel_label.startswith('STI'):
cur_ch['kind'] = FIFF.FIFFV_STIM_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_NONE
else:
warn('Cannot guess the correct type of channel %s. Making '
'it a MISC channel.' % (cur_channel_label,))
cur_ch['kind'] = FIFF.FIFFV_MISC_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_NONE
chs.append(cur_ch)
return chs
def _create_montage(ft_struct):
"""Create a montage from the FieldTrip data."""
# try to create a montage
montage_pos, montage_ch_names = list(), list()
for cur_ch_type in ('grad', 'elec'):
if cur_ch_type in ft_struct:
cur_ch_struct = ft_struct[cur_ch_type]
available_channels = np.where(np.in1d(cur_ch_struct['label'],
ft_struct['label']))[0]
tmp_labels = cur_ch_struct['label']
if not isinstance(tmp_labels, list):
tmp_labels = [tmp_labels]
cur_labels = np.asanyarray(tmp_labels)
montage_ch_names.extend(
cur_labels[available_channels])
montage_pos.extend(
cur_ch_struct['chanpos'][available_channels])
montage = None
if (len(montage_ch_names) > 0 and len(montage_pos) > 0 and
len(montage_ch_names) == len(montage_pos)):
montage = make_dig_montage(
ch_pos=dict(zip(montage_ch_names, montage_pos)),
# XXX: who grants 'head'?? this is BACKCOMPAT but seems a BUG
coord_frame='head',
)
return montage
def _set_sfreq(ft_struct):
"""Set the sample frequency."""
try:
sfreq = ft_struct['fsample']
except KeyError:
try:
t1 = ft_struct['time'][0]
t2 = ft_struct['time'][1]
difference = abs(t1 - t2)
sfreq = 1 / difference
except KeyError:
raise ValueError('No Source for sfreq found')
return sfreq
def _set_tmin(ft_struct):
"""Set the start time before the event in evoked data if possible."""
times = ft_struct['time']
time_check = all(times[i][0] == times[i - 1][0]
for i, x in enumerate(times))
if time_check:
tmin = times[0][0]
else:
tmin = None
return tmin
def _create_events(ft_struct, trialinfo_column):
"""Create an event matrix from the FieldTrip structure."""
event_type = ft_struct['trialinfo']
event_number = range(len(event_type))
if trialinfo_column < 0:
raise ValueError('trialinfo_column must be positive')
available_ti_cols = 1
if event_type.ndim == 2:
available_ti_cols = event_type.shape[1]
if trialinfo_column > (available_ti_cols - 1):
raise ValueError('trialinfo_column is higher than the amount of'
'columns in trialinfo.')
event_trans_val = np.zeros(len(event_type))
if event_type.ndim == 2:
event_type = event_type[:, trialinfo_column]
events = np.vstack([np.array(event_number), event_trans_val,
event_type]).astype('int').T
return events
def _create_event_metadata(ft_struct):
"""Create event metadata from trialinfo."""
pandas = _check_pandas_installed(strict=False)
if not pandas:
warn('The Pandas library is not installed. Not returning the original '
'trialinfo matrix as metadata.')
return None
metadata = pandas.DataFrame(ft_struct['trialinfo'])
return metadata
def _process_channel_eeg(cur_ch, elec):
"""Convert EEG channel from FieldTrip to MNE.
Parameters
----------
cur_ch: dict
Channel specific dictionary to populate.
elec: dict
elec dict as loaded from the FieldTrip structure
Returns
-------
cur_ch: dict
The original dict (cur_ch) with the added information
"""
all_labels =
|
np.asanyarray(elec['label'])
|
numpy.asanyarray
|
from __future__ import division
import numpy as np
from .fft import fftfreq, fft, ifft
try:
from accelerate.mkl.fftpack import fft, ifft
except ImportError:
try:
from pyfftw.interfaces.numpy_fft import fft, ifft
except ImportError:
from numpy.fft import fft, ifft
__authors__ = "<NAME>, <NAME>"
__all__ = ['gaussian', 'hamming', 'hilbert_transform']
def gaussian(X, rate, center, sd):
time = X.shape[-1]
freq = fftfreq(time, 1./rate)
k = np.exp((-(np.abs(freq) - center)**2)/(2 * (sd**2)))
k /= np.linalg.norm(k)
return k
def hamming(X, rate, min_freq, max_freq):
time = X.shape[-1]
freq = fftfreq(time, 1./rate)
pos_in_window = np.logical_and(freq >= min_freq, freq <= max_freq)
neg_in_window = np.logical_and(freq <= -min_freq, freq >= -max_freq)
k = np.zeros(len(freq))
window_size = np.count_nonzero(pos_in_window)
window = np.hamming(window_size)
k[pos_in_window] = window
window_size = np.count_nonzero(neg_in_window)
window = np.hamming(window_size)
k[neg_in_window] = window
k /= np.linalg.norm(k)
return k
def hilbert_transform(X, rate, filters=None, phase=None, X_fft_h=None):
"""
Apply bandpass filtering with Hilbert transform using
a prespecified set of filters.
Parameters
----------
X : ndarray (n_channels, n_time)
Input data, dimensions
rate : float
Number of samples per second.
filters : filter or list of filters (optional)
One or more bandpass filters
Returns
-------
Xh : ndarray, complex
Bandpassed analytic signal
"""
if not isinstance(filters, list):
filters = [filters]
time = X.shape[-1]
freq = fftfreq(time, 1. / rate)
Xh = np.zeros((len(filters),) + X.shape, dtype=np.complex)
if X_fft_h is None:
# Heavyside filter
h = np.zeros(len(freq))
h[freq > 0] = 2.
h[0] = 1.
h = h[np.newaxis, :]
X_fft_h =
|
fft(X)
|
numpy.fft.fft
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 26 14:47:27 2021
@author: vader
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm_notebook as tqdm
import itertools
import pandas as pd
import tifffile
from tifffile import memmap
import imageio as io
import os
import shutil
def min_max(img_list):
min_val=np.ones((img_list[0].shape[-1]))*999999
max_val=np.zeros((img_list[0].shape[-1]))
for img in img_list:
mx=np.max(img,axis=(0,1))
mn=np.min(img,axis=(0,1))
min_val=np.minimum(min_val,mn)
max_val=np.maximum(max_val,mx)
return min_val,max_val
def norm_img(img,min_val,max_val):
img=img.astype(np.float32)
# for i in range(img[0].shape[-1]):
# img[:,:,i] = (img[:,:,i] - min_val[i]) /(max_val[i] - min_val[i])
img = (img - min_val) /(max_val - min_val)
return img
def store_to_disk(x,name):
# shutil.copyfile(path,name)
# return memmap(name, mode='r+',dtype=np.float32)
if type(x)==str:
x=memmap(x, mode='r')
x=x.astype(np.float32)
np.save(name,x)
return np.load(name+'.npy',mmap_mode='r+')
def collect_data(img,gt,backround_labels=[0]):
samples,labels=[],[]
if img.shape[0] == gt.shape[0] and img.shape[1] == gt.shape[1]:
for label in np.unique(gt):
if label in backround_labels: continue
else:
ind=np.nonzero(gt == label)
samples += list(img[ind])
labels += len(ind[0])*[label]
else: print ('Images have different shapes')
return np.asarray(samples),np.asarray(labels)
def conc_images(img1,gt1,img2,gt2,dtype='int16'):
top, bottom, left, right = [10]*4
img1=cv2.copyMakeBorder(img1,top,bottom,left,right,cv2.BORDER_CONSTANT,value=[0])
img2=cv2.copyMakeBorder(img2,top,bottom,left,right,cv2.BORDER_CONSTANT,value=[0])
gt1=cv2.copyMakeBorder(gt1,top,bottom,left,right,cv2.BORDER_CONSTANT,value=[0])
gt2=cv2.copyMakeBorder(gt2,top,bottom,left,right,cv2.BORDER_CONSTANT,value=[0])
dif=img1.shape[1]-img2.shape[1]
if dif <= 0:
# if dif==0:dif=-1
a=np.zeros((img1.shape[0],dif*(-1),img1.shape[2]), dtype=dtype)
im1=
|
np.concatenate((img1,a),axis=1)
|
numpy.concatenate
|
"""
This script compare results (in VTK format) from SRH-2D and HEC-RAS 2D
"""
import numpy as np
from scipy import interpolate
from matplotlib import pyplot as plt
import pyHMT2D
from pyHMT2D.Misc.tools import setNumpyArrayValueToNaN
plt.rc('text', usetex=False) #allow the use of Latex for math expressions and equations
plt.rc('font', family='serif') #specify the default font family to be "serif"
def plot_1D_profile():
vtk_handler = pyHMT2D.Misc.vtkHandler()
# Define the filename of VTK file
srh_filename = 'SRH-2D/SRH2D_backwater_curve_C_0024.vtk'
hec_filename = 'HEC-RAS-2D/RAS2D_channel_0012.vtk'
# Set the points between which the line is constructed.
p1 = [259651, 4.52109e+06, 0]
p2 = [269601, 4.52104e+06, 0]
# Define the numer of interpolation points
numPoints = 100
# for SRH-2D
reader = vtk_handler.readVTK_UnstructuredGrid(srh_filename) # read the VTKfile
line = vtk_handler.createVtkLine(p1, p2, numPoints) # Create the line
points, U, elev_srh_2d = vtk_handler.probeUnstructuredGridVTKOverLine(line, reader,
'Water_Elev_m') # interpolate the data over the line
# print(points)
# print(U)
max_x = np.max(points[:, 0])
min_x = np.min(points[:, 0])
U = setNumpyArrayValueToNaN(U, 0) # Set the zero's to NaN's
plt.plot((points[:, 0] - max_x) / 1000, U, label='SRH-2D') # plot the data
# plot the bottom
plt.plot((points[:, 0] - max_x) / 1000, elev_srh_2d, color='k')
# for HEC-RAS
reader = vtk_handler.readVTK_UnstructuredGrid(hec_filename) # read the VTKfile
line = vtk_handler.createVtkLine(p1, p2, numPoints) # Create the line
points, U, elev_ras_2d = vtk_handler.probeUnstructuredGridVTKOverLine(line, reader,
'Water_Elev_m') # interpolate the data over the line
# print(points)
# print(U)
max_x = np.max(points[:, 0])
min_x = np.min(points[:, 0])
U = setNumpyArrayValueToNaN(U, 0) # Set the zero's to NaN's
plt.scatter(((points[:, 0] - max_x) / 1000)[::5], U[::5], marker='o', facecolors='none', edgecolors='r', s=10,
label='HEC-RAS 2D')
# plot the bottom
plt.plot((points[:, 0] - max_x) / 1000, elev_ras_2d, color='k')
# Also perform a 1D backwater calculation
#load Backwater_1D_Data configuration data
my_backwater_1D_data = pyHMT2D.Hydraulic_Models_Data.Backwater_1D_Data("Backwater-1D/backwater_1d.json")
#create a Backwater_1D_Model object
my_backwater_1D_model = pyHMT2D.Hydraulic_Models_Data.Backwater_1D_Model()
#set the simulation case in the Backwater_1D_Model object
my_backwater_1D_model.set_simulation_case(my_backwater_1D_data)
#run the Backwater_1D_Model model
my_backwater_1D_model.run_model()
#sample some results as manufactured solution
xmin = np.min(my_backwater_1D_data.gridx)
xmax =
|
np.max(my_backwater_1D_data.gridx)
|
numpy.max
|
"""
CNN script for v6 of the lensing estimation testing
- like v5 but with a wider field of view but keeping the high resolution.
Incorporating Sequence approach to loading training data.
"""
import os
import pathlib
import sys
import time
# pylint: disable=wrong-import-position
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.keras as keras
import pymaster as nmt
class TrainingGenerator(keras.utils.Sequence):
"""
Generator to load training data in batches.
"""
def __init__(self, file_mask, x_arr_id, y_arr_id, n_files, batches_per_file=1, shuffle=True, augment=True):
self.file_mask = file_mask
self.x_arr_id = x_arr_id
self.y_arr_id = y_arr_id
self.batches_per_file = batches_per_file
self.n_batches = n_files * batches_per_file
self.shuffle = shuffle
self.augment = augment
self.file_xy = None
self.batch_size = None
def __len__(self):
return self.n_batches
@staticmethod
def augment_data(xy, shuffle=True):
"""
Augment data by applying 7 unique combinations of rotations and reflections.
Expecting data in shape (n_samples, 2, npix, npix, 1).
Will return data in shape (8*n_samples, 2, npix, npix, 1).
"""
n_samp = xy.shape[0]
npix = xy.shape[2]
shape = (n_samp, 2, npix, npix, 1)
assert xy.shape == shape, xy.shape
xy_out = np.concatenate((xy, np.full((7 * n_samp, 2, npix, npix, 1), np.nan)), axis=0)
xy = xy_out[:n_samp, ...]
# Rotate 90
xy_out[n_samp:(2 * n_samp), ...] = np.rot90(xy, 1, axes=(3, 2))
# Rotate 180
xy_out[(2 * n_samp):(3 * n_samp), ...] = np.rot90(xy, 2, axes=(3, 2))
# Rotate 270
xy_out[(3 * n_samp):(4 * n_samp), ...] = np.rot90(xy, 3, axes=(3, 2))
# Flip vert
xy_out[(4 * n_samp):(5 * n_samp), ...] = np.flip(xy, axis=2)
# Flip horiz
xy_out[(5 * n_samp):(6 * n_samp), ...] = np.flip(xy, axis=3)
# Rotate 90 + flip vert
xy_out[(6 * n_samp):(7 * n_samp), ...] = np.flip(xy_out[n_samp:(2 * n_samp), ...], axis=2)
# Rotate 90 + flip horiz
xy_out[(7 * n_samp):(8 * n_samp), ...] = np.flip(xy_out[n_samp:(2 * n_samp), ...], axis=3)
# Shuffle
if shuffle:
np.random.default_rng().shuffle(xy_out, axis=0)
return xy_out
def __getitem__(self, idx):
idx_within_file = idx % self.batches_per_file
# If first batch in file, load data and stack into a single array of shape (n_samp, n_pix, n_pix, n_channel)
if idx_within_file == 0:
train_path = self.file_mask.format(idx=idx)
with np.load(train_path) as data:
self.file_xy = np.stack((data[self.x_arr_id], data[self.y_arr_id]), axis=1)
self.file_xy = self.file_xy[..., np.newaxis]
self.batch_size = int(np.ceil(self.file_xy.shape[0] / self.batches_per_file))
# Select slice for this batch
batch_start_idx = idx_within_file * self.batch_size
batch_stop_idx = batch_start_idx + self.batch_size
batch_xy = self.file_xy[batch_start_idx:batch_stop_idx, ...]
# Augment and shuffle
if self.augment:
batch_xy = self.__class__.augment_data(batch_xy, shuffle=self.shuffle)
# Re-split into x and y, do some checks and return
x = batch_xy[:, 0, ...]
y = batch_xy[:, 1, ...]
assert np.all(np.isfinite(x)), x
assert np.all(np.isfinite(y)), y
assert np.amin(x) > 0, np.amin(x)
assert np.amax(x) < 1, np.amax(x)
assert np.amin(y) > 0, np.amin(y)
assert np.amax(y) < 1, np.amax(y)
return x, y
def load_validation_data(data_path, x_arr_id, y_arr_id, shuffle=True):
"""
Loads validation data from a single npz file where x and y are in data[x_arr_id] and data[y_arr_id].
Shuffles if requested, but doesn't augment.
"""
# Load data and stack into a single array of shape (n_samp, n_pix, n_pix, n_channel)
with np.load(data_path) as data:
x = data[x_arr_id]
y = data[y_arr_id]
xy = np.stack((x, y), axis=1)
xy = xy[..., np.newaxis]
print('Loaded validation data, length', len(xy))
# Shuffle
if shuffle:
np.random.default_rng().shuffle(xy, axis=0)
# Split into x and y
x = xy[:, 0, ...]
y = xy[:, 1, ...]
# Do some final checks and return
assert np.all(
|
np.isfinite(x)
|
numpy.isfinite
|
####
#
# The MIT License (MIT)
#
# Copyright 2019, 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####
import unittest
import numpy as np
from rosvm.ranksvm.mkl_utils import frobenius_product, kernel_alignment, LinearMKLer
from sklearn.model_selection import ShuffleSplit
from sklearn.datasets import make_classification
from sklearn.metrics.pairwise import linear_kernel
class TestFrobeniusProduct(unittest.TestCase):
def test_correctness(self):
for _ in range(100):
A = np.random.uniform(-10, 10, (25, 60))
B = np.random.uniform(-5, 5, (25, 60))
# <A, A>_F
AA_F = frobenius_product(A)
np.testing.assert_allclose(AA_F, np.trace(A.T @ A))
# <A, B>_F
AB_F = frobenius_product(A, B)
np.testing.assert_allclose(AB_F, np.trace(A.T @ B))
class TestKernelAlignment(unittest.TestCase):
def test_corner_cases(self):
A = np.random.uniform(-10, 10, (25, 60))
KA = A @ A.T
# Kernel aligned with it self
np.testing.assert_equal(kernel_alignment(KA, KA, False), 1.0)
np.testing.assert_equal(kernel_alignment(KA, KA, True), 1.0)
def test_correctness(self):
self.skipTest("How to test?")
class TestLinearMKLer(unittest.TestCase):
def test_unimkl(self):
# Set up list of features
X = [
np.random.RandomState(1932).randn(102, 738),
np.random.RandomState(455).randn(102, 12),
np.random.RandomState(212).randn(102, 231),
np.random.RandomState(32).randn(102, 324)
]
# Set up list of kernels
Kx = [
X[0] @ X[0].T,
X[1] @ X[1].T,
X[2] @ X[2].T,
X[3] @ X[3].T
]
# Split training and test
train, test = next(ShuffleSplit(random_state=102).split(Kx[0]))
# Fit the transformer
trans = LinearMKLer(method="unimkl").fit([Kx_k[np.ix_(train, train)] for Kx_k in Kx])
assert (isinstance(trans, LinearMKLer)), "Fit function must return a 'LinearMKLer'."
# Check the kernel weights
np.testing.assert_equal(trans._kernel_weights, np.ones((4,)) / 4)
# Transform (combine) the kernels
np.testing.assert_equal(
trans.transform([Kx_k[np.ix_(train, train)] for Kx_k in Kx]),
np.mean(np.array([Kx_k[np.ix_(train, train)] for Kx_k in Kx]), axis=0))
np.testing.assert_equal(
trans.transform([Kx_k[np.ix_(test, train)] for Kx_k in Kx]),
np.mean(np.array([Kx_k[np.ix_(test, train)] for Kx_k in Kx]), axis=0))
def test_align(self):
# Create simple classification dataset
n_features = 6
n_informative = 2
n_redundant = 2
X, y = make_classification(n_redundant=n_redundant, n_features=n_features, n_informative=n_informative,
n_samples=200, random_state=192, shuffle=False, n_clusters_per_class=1)
y[y == 0] = -1
# Calculate kernels for the different types features and the output
n_noise = n_features - n_informative - n_redundant
Kx_inf = linear_kernel(X[:, :n_informative])
Kx_red = linear_kernel(X[:, n_informative:(n_informative + n_redundant)])
Kx_noise = linear_kernel(X[:, -n_noise:])
Ky = np.outer(y, y)
# --------------------------------------
# Fit the transformer
trans = LinearMKLer(method="align").fit([Kx_inf, Kx_red, Kx_noise, Kx_noise], Ky)
assert (isinstance(trans, LinearMKLer)), "Fit function must return a 'LinearMKLer'."
# Check the kernel weights
np.testing.assert_allclose(np.round(trans._kernel_weights, 6),
np.array([0.719934, 0.670636, 0.000601, 0.000601]))
# Transform (combine) the kernels
Kx_mkl = trans.transform([Kx_inf, Kx_red, Kx_noise, Kx_noise])
self.assertTrue(kernel_alignment(Kx_mkl, Ky, True) > 0.7)
# --------------------------------------
# Fit the transformer
trans = LinearMKLer(method="align", center_before_combine=True).fit([Kx_inf, Kx_red, Kx_noise, Kx_noise], Ky)
assert (isinstance(trans, LinearMKLer)), "Fit function must return a 'LinearMKLer'."
# Check the kernel weights
np.testing.assert_allclose(np.round(trans._kernel_weights, 6),
np.array([0.719934, 0.670636, 0.000601, 0.000601]))
# Transform (combine) the kernels
Kx_mkl = trans.transform([Kx_inf, Kx_red, Kx_noise, Kx_noise])
self.assertTrue(kernel_alignment(Kx_mkl, Ky, True) > 0.7)
np.testing.assert_allclose(np.sum(Kx_mkl, axis=0), np.zeros((Kx_mkl.shape[0],)), atol=1e-12)
def test_alignf(self):
# Create simple classification dataset
n_features = 6
n_informative = 2
n_redundant = 2
X, y = make_classification(n_redundant=n_redundant, n_features=n_features, n_informative=n_informative,
n_samples=200, random_state=192, shuffle=False, n_clusters_per_class=1)
y[y == 0] = -1
# Calculate kernels for the different types features and the output
n_noise = n_features - n_informative - n_redundant
Kx_inf = linear_kernel(X[:, :n_informative])
Kx_red = linear_kernel(X[:, n_informative:(n_informative + n_redundant)])
Kx_noise = linear_kernel(X[:, -n_noise:])
Ky = np.outer(y, y)
# --------------------------------------
# Fit the transformer
Kx_l = [Kx_red, Kx_noise, Kx_noise, Kx_inf]
trans = LinearMKLer(method="alignf").fit(Kx_l, Ky)
# Check the kernel weights
np.testing.assert_equal(trans._kernel_weights, np.array([0, 0, 0, 1]))
self.assertEqual(np.linalg.norm(trans._kernel_weights), 1.)
# Transform (combine) the kernels
Kx_mkl = trans.transform(Kx_l)
np.testing.assert_equal(kernel_alignment(Kx_mkl, Ky), kernel_alignment(Kx_inf, Ky))
# --------------------------------------
# Fit the transformer
Kx_l = [Kx_red, Kx_noise, Kx_noise, Kx_inf]
trans = LinearMKLer(method="alignf", center_before_combine=True).fit(Kx_l, Ky)
# Check the kernel weights
np.testing.assert_equal(trans._kernel_weights,
|
np.array([0, 0, 0, 1])
|
numpy.array
|
""" Unit tests for the NURBS curve computations """
# -------------------------------------------------------------------------------------------------------------------- #
# Importing packages
# -------------------------------------------------------------------------------------------------------------------- #
import sys
import os
import time
import pdb
import numpy as np
import nurbspy as nrb
import scipy.integrate
# -------------------------------------------------------------------------------------------------------------------- #
# Prepare the NURBS curve test suite
# -------------------------------------------------------------------------------------------------------------------- #
def test_nurbs_curve_float_input():
""" Test the Bezier curve computation in scalar mode for real and complex input """
# Define the array of control points
P = np.zeros((2, 2))
P[:, 0] = [0.00, 0.00]
P[:, 1] = [1.00, 1.00]
# Create the NURBS curve
bezierCurve = nrb.NurbsCurve(control_points=P)
# Check real
values_real = bezierCurve.get_value(u=0.5).flatten()
assert np.sum((values_real - np.asarray([0.5, 0.5])) ** 2) ** (1 / 2) < 1e-6
# Check complex
values_complex = bezierCurve.get_value(u=0.5 + 0.5j).flatten()
assert np.sum((values_complex - np.asarray([0.5 + 0.5j, 0.5 + 0.5j])) ** 2) ** (1 / 2) < 1e-6
def test_nurbs_curve_integer_input():
""" Test the Bezier curve computation in scalar mode for real and complex input """
# Define the array of control points
P = np.zeros((2, 2))
P[:, 0] = [0.00, 0.00]
P[:, 1] = [1.00, 1.00]
# Create the NURBS curve
bezierCurve = nrb.NurbsCurve(control_points=P)
# Check u=0
values_real = bezierCurve.get_value(u=0).flatten()
assert np.sum((values_real - np.asarray([0.0, 0.0])) ** 2) ** (1 / 2) < 1e-6
# Check u=1
values_real = bezierCurve.get_value(u=1).flatten()
assert np.sum((values_real - np.asarray([1.0, 1.0])) ** 2) ** (1 / 2) < 1e-6
def test_nurbs_curve_endpoint_interpolation():
""" Test the NURBS curve end-point interpolation property """
# Define the array of control points
P = np.zeros((3, 4))
P[:, 0] = [1.00, 2.00, 1.00]
P[:, 1] = [2.00, 1.00, 2.00]
P[:, 2] = [3.00, 2.00, 3.00]
P[:, 3] = [4.00, 1.00, 4.00]
# Define the array of control point weights
W = np.asarray([2.00, 3.00, 1.00, 2.00])
# Maximum index of the control points (counting from zero)
n = np.shape(P)[1] - 1
# Define the order of the basis polynomials
p = 2
# Define the knot vector (clamped spline)
# p+1 zeros, n-p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
U = np.concatenate((np.zeros(p), np.linspace(0, 1, n - p + 2), np.ones(p)))
# Create the NURBS curve
nurbsCurve = nrb.NurbsCurve(control_points=P, weights=W, degree=p, knots=U)
# Check the corner point values
assert np.sum((nurbsCurve.get_value(u=0.00).flatten() - P[:, 0]) ** 2) ** (1 / 2) < 1e-6
assert np.sum((nurbsCurve.get_value(u=1.00).flatten() - P[:, -1]) ** 2) ** (1 / 2) < 1e-6
def test_nurbs_curve_endpoint_curvature():
""" Test the NURBS curve end-point curvature property """
# Define the array of control points
P = np.zeros((3,11))
P[:, 0] = [0.00, 0.00, 0.00]
P[:, 1] = [0.10, 0.50, 0.00]
P[:, 2] = [0.20, 0.00, 0.50]
P[:, 3] = [0.30, 0.50, 1.00]
P[:, 4] = [0.40, 0.00, 0.50]
P[:, 5] = [0.50, 0.50, 0.00]
P[:, 6] = [0.60, 0.00, 0.50]
P[:, 7] = [0.70, 0.50, 1.00]
P[:, 8] = [0.80, 0.00, 0.50]
P[:, 9] = [0.90, 0.00, 0.00]
P[:, 10] = [1.00, 0.00, 0.00]
# Maximum index of the control points (counting from zero)
n = np.shape(P)[1] - 1
# Define the array of control point weights
W = np.asarray([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
# Define the order of the basis polynomials
p = 4
# Define the knot vector (clamped spline)
# p+1 zeros, n-p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
U = np.concatenate((np.zeros(p), np.linspace(0, 1, n - p + 2), np.ones(p)))
# Create the NURBS curve
myCurve = nrb.NurbsCurve(control_points=P, weights=W, degree=p, knots=U)
# Get NURBS curve parameters
p = myCurve.p
U = myCurve.U
P = myCurve.P
W = myCurve.W
n = np.shape(P)[1] - 1
# Get the endpoint curvature numerically
curvature_1a = myCurve.get_curvature(u=0.00)[0]
curvature_1b = myCurve.get_curvature(u=1.00)[0]
# Get the endpoint curvature analytically
curvature_2a = (p - 1) / p * (U[p+1] / U[p+2]) * (W[2] * W[0] / W[1]**2) * \
np.sum(np.cross(P[:, 1] - P[:, 0], P[:, 2] - P[:, 0])**2)**(1/2) * \
np.sum((P[:, 1] - P[:, 0])**2)**(-3/2)
curvature_2b = (p - 1) / p * (1 - U[n]) / (1 - U[n-1]) * (W[n] * W[n-2] / W[n-1]**2) * \
np.sum(np.cross(P[:, n-1] - P[:, n], P[:, n-2] - P[:, n])**2)**(1/2) * \
np.sum((P[:, n-1] - P[:, n])**2)**(-3/2)
# Check the error
error_curvature_start = np.sqrt((curvature_1a - curvature_2a) ** 2)
error_curvature_end = np.sqrt((curvature_1b - curvature_2b) ** 2)
print('Start point curvature error : ', error_curvature_start)
print('End point curvature error : ', error_curvature_end)
assert error_curvature_start < 1e-10
assert error_curvature_end < 1e-6
def test_nurbs_curve_arclength():
""" Test the NURBS curve arc-length computation """
# Define the array of control points
P = np.zeros((3,11))
P[:, 0] = [0.00, 0.00, 0.00]
P[:, 1] = [0.10, 0.50, 0.00]
P[:, 2] = [0.20, 0.00, 0.50]
P[:, 3] = [0.30, 0.50, 1.00]
P[:, 4] = [0.40, 0.00, 0.50]
P[:, 5] = [0.50, 0.50, 0.00]
P[:, 6] = [0.60, 0.00, 0.50]
P[:, 7] = [0.70, 0.50, 1.00]
P[:, 8] = [0.80, 0.00, 0.50]
P[:, 9] = [0.90, 0.00, 0.00]
P[:, 10] = [1.00, 0.00, 0.00]
# Maximum index of the control points (counting from zero)
n = np.shape(P)[1] - 1
# Define the array of control point weights
W = np.asarray([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
# Define the order of the basis polynomials
p = 4
# Define the knot vector (clamped spline)
# p+1 zeros, n-p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
U = np.concatenate((np.zeros(p), np.linspace(0, 1, n - p + 2), np.ones(p)))
# Create the NURBS curve
myCurve = nrb.NurbsCurve(control_points=P, weights=W, degree=p, knots=U)
# Compute the arc length using fixed order quadrature
length_fixed = myCurve.get_arclength()
# Compute the arc length using adaptative quadrature
def get_arclegth_differential(u):
dCdu = myCurve.get_derivative(u, order=1)
dLdu = np.sqrt(np.sum(dCdu ** 2, axis=0)) # dL/du = [(dx_0/du)^2 + ... + (dx_n/du)^2]^(1/2)
return dLdu
length_adaptative = scipy.integrate.quad(get_arclegth_differential, 0, 1)[0]
# Check the arc length error
arc_length_error = np.abs(length_fixed - length_adaptative)
print("The arc length computation error is : ", arc_length_error)
assert arc_length_error < 0.04
def test_nurbs_curve_example_1():
""" Test the B-Spline curve computation against a known example (Ex2.2 from the NURBS book) """
# Define the array of control points
P2 = np.asarray([1, 3, 5])
P3 = np.asarray([2, 1, 4])
P4 = np.asarray([3, 0, 6])
P = np.zeros((3, 8))
P[:, 2] = P2
P[:, 3] = P3
P[:, 4] = P4
# Define the order of the basis polynomials
p = 2
# Define the knot vector (clamped spline)
# p+1 zeros, n-p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
# u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10
U = np.asarray([0.00, 0.00, 0.00, 1.00, 2.00, 3.00, 4.00, 4.00, 5.00, 5.00, 5.00])
# Create the B-Spline curve
myBSpline = nrb.NurbsCurve(control_points=P, knots=U, degree=p)
# Evaluate the B-Spline curve numerically
u = 5/2 # u-parameter
values_numeric = myBSpline.get_value(u)
# Evaluate the B-Spline curve analytically (NURBS book page 82)
values_analytic = (1/8*P2 + 6/8*P3 + 1/8*P4)[:, np.newaxis]
# Check the error
error = np.sum((values_analytic - values_numeric) ** 2) ** (1 / 2)
print('The two-norm of the evaluation error is : ', error)
assert error < 1e-8
def test_nurbs_curve_example_2():
""" Test the NURBS curve value against a known example (NURBS book section 7.5) """
# Create a circle using 4 arcs of 90 degrees
# Define the array of control points
P = np.zeros((2, 9))
P[:, 0] = [1.00, 0.00]
P[:, 1] = [1.00, 1.00]
P[:, 2] = [0.00, 1.00]
P[:, 3] = [-1.00, 1.00]
P[:, 4] = [-1.00, 0.00]
P[:, 5] = [-1.00, -1.00]
P[:, 6] = [0.00, -1.00]
P[:, 7] = [1.00, -1.00]
P[:, 8] = [1.00, 0.00]
# Define the array of control point weights
W = np.asarray([1, np.sqrt(2) / 2, 1, np.sqrt(2) / 2, 1, np.sqrt(2) / 2, 1, np.sqrt(2) / 2, 1])
# Define the order of the basis polynomials
p = 2
# Define the knot vector (clamped spline)
# p+1 zeros, n minus p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
U = np.asarray([0, 0, 0, 1 / 4, 1 / 4, 1 / 2, 1 / 2, 3 / 4, 3 / 4, 1, 1, 1])
# Create the NURBS curve
myCircle = nrb.NurbsCurve(P, W, p, U)
# Define the u-parametrization
u = np.linspace(0, 1, 101)
# Check the radius error
coords = myCircle.get_value(u)
radius_error = np.sum((np.sum((coords)**2, axis=0) - 1) ** 2) ** (1/2)
print('The two-norm of the evaluation error is : ', radius_error)
assert radius_error < 1e-8
def test_nurbs_curve_example_3():
""" Test the NURBS curve value against a known example (NURBS book section 7.5) """
# Create a circle using 3 arcs of 120 degrees
# Define the array of control points
a = np.cos(np.pi / 6)
P = np.zeros((2, 7))
P[:, 0] = [a, 1 / 2]
P[:, 1] = [0, 2]
P[:, 2] = [-a, 1 / 2]
P[:, 3] = [-2 * a, -1]
P[:, 4] = [0, -1]
P[:, 5] = [2 * a, -1]
P[:, 6] = [a, 1 / 2]
# Define the array of control point weights
W = np.asarray([1, 1 / 2, 1, 1 / 2, 1, 1 / 2, 1])
# Define the order of the basis polynomials
p = 2
# Define the knot vector (clamped spline)
# p+1 zeros, n minus p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
U = np.asarray([0, 0, 0, 1 / 3, 1 / 3, 2 / 3, 2 / 3, 1, 1, 1])
# Create the NURBS curve
myCircle = nrb.NurbsCurve(P, W, p, U)
# Define the u-parametrization
u = np.linspace(0, 1, 101)
# Check the radius error
coords = myCircle.get_value(u)
radius_error = np.sum((np.sum((coords)**2, axis=0) - 1) ** 2) ** (1/2)
print('The two-norm of the evaluation error is : ', radius_error)
assert radius_error < 1e-8
def test_nurbs_curve_example_4():
""" Test the computation of a circular NURBS curve value, curvature and arc-length in 2D """
# Set defining parameters
O = np.asarray([0.00, 1.00]) # Circle center
X = np.asarray([1.00, 0.00]) # Abscissa direction
Y = np.asarray([0.00, 1.00]) # Ordinate direction
R = 0.5 # Circle radius
theta_start = 1 / 6 * np.pi # Start angle
theta_end = 3 / 2 * np.pi - 1 / 6 * np.pi # End angle
# Create and plot the circular arc
my_circular_arc = nrb.CircularArc(O, X, Y, R, theta_start, theta_end).NurbsCurve
# Define the u-parametrization
u = np.linspace(0, 1, 101)
# Check the radius error
coords = my_circular_arc.get_value(u)
radius_error = np.sum((np.sum((coords-O[:, np.newaxis])**2, axis=0) - R**2) ** 2) ** (1/2)
print('The two-norm of the radius error is : ', radius_error)
assert radius_error < 1e-8
# CHeck the curvature error
curvature = my_circular_arc.get_curvature(u)
curvature_error = np.sum((curvature - 1 / R) ** 2) ** (1 / 2)
print("The two-norm of the curvature error is : ", curvature_error)
assert curvature_error < 1e-8
# Check the arc length error
arc_length = my_circular_arc.get_arclength()
arc_length_error = arc_length - R * np.abs(theta_end - theta_start)
print("The arc length computation error is : ", arc_length_error)
assert arc_length_error < 1e-2
def test_nurbs_curve_example_5():
""" Test the computation of a circular NURBS curve value, curvature and arc-length in 3D """
# Set defining parameters
O = np.asarray([0.00, 0.00, 0.50]) # Circle center
X = np.asarray([3.00, 0.00, 0.00]) # Abscissa direction
Y = np.asarray([0.00, 1.00, 0.00]) # Ordinate direction
R = 0.5 # Circle radius
theta_start = 0.00 # Start angle
theta_end = np.pi/2 # End angle
# Create and plot the circular arc
my_circular_arc = nrb.CircularArc(O, X, Y , R, theta_start, theta_end).NurbsCurve
# Define the u-parametrization
u = np.linspace(0, 1, 101)
# Check the radius error
coords = my_circular_arc.get_value(u)
radius_error = np.sum((np.sum((coords-O[:, np.newaxis])**2, axis=0) - R**2) ** 2) ** (1/2)
print('The two-norm of the radius error is : ', radius_error)
assert radius_error < 1e-8
# CHeck the curvature error
curvature = my_circular_arc.get_curvature(u)
curvature_error = np.sum((curvature - 1 / R) ** 2) ** (1 / 2)
print("The two-norm of the curvature error is : ", curvature_error)
assert curvature_error < 1e-8
# Check the arc length error
arc_length = my_circular_arc.get_arclength()
arc_length_error = arc_length - R * np.abs(theta_end - theta_start)
print("The arc length computation error is : ", arc_length_error)
assert arc_length_error < 1e-2
# Check the Frenet-Serret frame of reference computation (starting point)
assert np.sum((my_circular_arc.get_tangent(u=0.00) - np.asarray([0, 1, 0])[:, np.newaxis]) ** 2) ** (1 / 2) < 1e-6
assert np.sum((my_circular_arc.get_normal(u=0.00) -
|
np.asarray([-1, 0, 0])
|
numpy.asarray
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.