prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
import matplotlib.pyplot as plt
from fitter import *
pth = 'D:\\data\\2018-09-08\\14-49-52_omit_pump_pw_sw_mode2\\'
powers, freq, mag, phase = np.loadtxt(pth+'SMF_power_set_S21_frequency_set.dat', unpack=True)
powers = powers[::1001]
freq = freq[:1001]
mag = np.array_split(mag, 11)
phase =
|
np.array_split(phase, 11)
|
numpy.array_split
|
from typing import List, Optional
import numpy as np
import copy
from .block import Block
from .het_block import HetBlock
from ..classes import SteadyStateDict, JacobianDict, ImpulseDict
from ..utilities.ordered_set import OrderedSet
from ..utilities.function import ExtendedFunction, CombinedExtendedFunction
from ..utilities.bijection import Bijection
from ..utilities.optimized_routines import within_tolerance
from .. import utilities as utils
from .support.law_of_motion import LawOfMotion
from .support.stages import Stage
class StageBlock(Block):
def __init__(self, stages: List[Stage], backward_init=None, hetinputs=None, name=None):
super().__init__()
inputs = OrderedSet([])
outputs = OrderedSet([])
stages = make_all_into_stages(stages)
for i, stage in enumerate(stages):
# external inputs are whatever you don't take from next stage
inputs |= (stage.inputs - stages[(i+1) % len(stages)].backward_outputs)
outputs |= stage.report
# TODO: should have internals
self.constructor_checks(stages, inputs, outputs)
self.stages = stages
self.inputs = inputs
self.outputs = OrderedSet([o.upper() for o in outputs])
self.M_outputs = Bijection({o: o.upper() for o in outputs})
self.save_original()
if name is None:
name = stages[0].name + "_to_" + stages[-1].name
self.name = name
if hetinputs is not None:
hetinputs = CombinedExtendedFunction(hetinputs)
self.process_hetinputs(hetinputs, tocopy=False)
if backward_init is not None:
backward_init = ExtendedFunction(backward_init)
self.backward_init = backward_init
@staticmethod
def constructor_checks(stages, inputs, outputs):
# inputs, outputs, and combined backward should not overlap at all
if not inputs.isdisjoint(outputs):
raise ValueError(f'inputs and outputs have overlap {inputs & outputs}')
backward_all = set().union(*(stage.backward_outputs for stage in stages))
if not inputs.isdisjoint(backward_all):
raise ValueError(f'Some stage taking another non-immediate-successor stage backward {inputs & backward_all} as input')
if not outputs.isdisjoint(backward_all):
raise ValueError(f'Outputs and backward have overlap {outputs & backward_all}')
# 'D', 'law_of_motion' are protected names; outputs should not be upper case
for stage in stages:
if stage.name in ['D', 'law_of_motion']:
raise ValueError(f"Stage '{stage.name}' has invalid name")
for o in stage.report:
if o in ['d', 'law_of_motion']:
raise ValueError(f"Stages are not allowed to return outputs called 'd' or 'law_of_motion' but stage '{stage.name}' does")
if o.isupper():
raise ValueError(f"Stages are not allowed to report upper-case outputs. Stage '{stage.name}' has an output '{o}'")
def __repr__(self):
return f"<StageBlock '{self.name}' with stages {[k.name for k in self.stages]}>"
def _steady_state(self, calibration, backward_tol=1E-9, backward_maxit=5000,
forward_tol=1E-10, forward_maxit=100_000):
ss = self.extract_ss_dict(calibration)
hetinputs = self.return_hetinputs(ss)
ss.update(hetinputs)
self.initialize_backward(ss)
backward, report, lom = self.backward_steady_state(ss, backward_tol, backward_maxit)
# get initialized distribution
try:
Dinit = ss[self.stages[0].name]['D']
except KeyError:
# assume that beginning-of-first-stage distribution is uniform, with
# same dimensions as ANY backward input to final stage / backward output from first stage
backward_last = backward[-1]
backward_example = backward_last[list(backward_last)[0]]
Dinit = np.full(backward_example.shape, 1/backward_example.size)
D = self.forward_steady_state(Dinit, lom, forward_tol, forward_maxit)
aggregates = {}
# initialize internals with hetinputs, then add stage-level internals
internals = hetinputs
for i, stage in enumerate(self.stages):
# aggregate everything to report
for k in stage.report:
aggregates[k.upper()] = np.vdot(D[i], report[i][k])
# put individual-level report, end-of-stage backward, and beginning-of-stage dist in internals
internals[stage.name] = {**backward[i], **report[i],
'law_of_motion': lom[i], 'D': D[i]}
# put all inputs to the block into aggregates
for k in self.M.inv @ self.inputs:
aggregates[k] = ss[k]
return SteadyStateDict(aggregates, {self.name: internals})
def _impulse_nonlinear(self, ssin, inputs, outputs, ss_initial):
ss = self.extract_ss_dict(ssin)
if ss_initial is not None:
ss[self.stages[0].name]['D'] = ss_initial[self.name][self.stages[0].name]['D']
# report_path is dict(stage: {output: TxN-dim array})
# lom_path is list[t][stage] in chronological order
report_path, lom_path = self.backward_nonlinear(ss, inputs)
# D_path is dict(stage: TxN-dim array)
D_path = self.forward_nonlinear(ss, lom_path)
aggregates = {}
for stage in self.stages:
for o in stage.report:
if self.M_outputs @ o in outputs:
aggregates[self.M_outputs @ o] = utils.optimized_routines.fast_aggregate(D_path[stage.name], report_path[stage.name][o])
return ImpulseDict(aggregates, T=inputs.T) - ssin
def _impulse_linear(self, ss, inputs, outputs, Js):
return ImpulseDict(self._jacobian(ss, list(inputs.keys()), outputs, inputs.T).apply(inputs))
def _jacobian(self, ss, inputs, outputs, T):
ss = self.extract_ss_dict(ss)
outputs = self.M_outputs.inv @ outputs
differentiable_hetinput = self.preliminary_hetinput(ss, h=1E-4)
backward_data, forward_data, expectations_data = self.preliminary_all_stages(ss)
# step 1
curlyYs, curlyDs = {}, {}
for i in inputs:
curlyYs[i], curlyDs[i] = self.backward_fakenews(i, outputs, T, backward_data, forward_data, differentiable_hetinput)
# step 2
curlyEs = {}
for o in outputs:
curlyEs[o] = self.expectation_vectors(o, T-1, expectations_data)
# steps 3-4
F, J = {}, {}
for o in outputs:
for i in inputs:
if o.upper() not in F:
F[o.upper()] = {}
if o.upper() not in J:
J[o.upper()] = {}
F[o.upper()][i] = HetBlock.build_F(curlyYs[i][o], curlyDs[i], curlyEs[o])
J[o.upper()][i] = HetBlock.J_from_F(F[o.upper()][i])
return JacobianDict(J, name=self.name, T=T)
'''Steady-state backward and forward methods'''
def backward_steady_state(self, ss, tol=1E-9, maxit=5000):
# 'backward' will be dict with backward output of first stage
# (i.e. input to last stage) from the most recent time iteration
# initializer for first iteration should be in 'ss'
backward = {k: ss[k] for k in self.stages[0].backward_outputs}
# iterate until end-of-final-stage backward inputs converge
for it in range(maxit):
backward_new = self.backward_step_steady_state(backward, ss)
if it % 10 == 0 and all(within_tolerance(backward_new[k], backward[k], tol) for k in backward):
break
backward = backward_new
else:
raise ValueError(f'No convergence after {maxit} backward iterations!')
# one more iteration to get backward INPUTS, reported outputs, and law of motion for all stages
return self.backward_step_nonlinear(backward, ss)[:3]
def backward_step_steady_state(self, backward, inputs):
"""Iterate backward through all stages for a single period, ignoring reported outputs"""
for stage in reversed(self.stages):
backward, _ = stage.backward_step_separate({**inputs, **backward})
return backward
def backward_step_nonlinear(self, backward, inputs):
# append backward INPUT to final stage
backward_all = [backward]
report_all = []
lom_all = []
for stage in reversed(self.stages):
(backward, report), lom = stage.backward_step_separate({**inputs, **backward}, lawofmotion=True, hetoutputs=True)
# append backward OUTPUT, reported outputs, and law of motion for each stage, in reverse chronological order
backward_all.append(backward)
report_all.append(report)
lom_all.append(lom)
# return backward INPUT, report, and lom for each stage, with stages now in chronological order
# (to get backward inputs, skip first chronological entry of backward_all, which is backward output of first stage,
# return that entry separately as the fourth output of this function)
return backward_all[::-1][1:], report_all[::-1], lom_all[::-1], backward_all[-1]
def forward_steady_state(self, D, lom: List[LawOfMotion], tol=1E-10, maxit=100_000):
"""Find steady-state beginning-of-stage distributions for all stages"""
# iterate until beginning-of-stage distribution for first stage converges
for it in range(maxit):
D_new = self.forward_step_steady_state(D, lom)
if it % 10 == 0 and within_tolerance(D, D_new, tol):
break
D = D_new
else:
raise ValueError(f'No convergence after {maxit} forward iterations!')
# one more iteration to get beginning-of-stage in *all* stages
return self.forward_step_nonlinear(D, lom)[0]
def forward_step_steady_state(self, D, loms: List[LawOfMotion]):
"""Given beginning-of-first-stage distribution, apply laws of motion in 'loms'
for each stage to get end-of-final-stage distribution, which is returned"""
for lom in loms:
D = lom @ D
return D
def forward_step_nonlinear(self, D, loms: List[LawOfMotion]):
Ds = [D]
for i, lom in enumerate(loms):
Ds.append(lom @ Ds[i])
# return all beginning-of-stage Ds this period, then beginning-of-period next period
return Ds[:-1], Ds[-1]
'''Nonlinear backward and forward methods'''
def backward_nonlinear(self, ss, inputs):
indict = ss.copy()
T = inputs.T
# populate backward with steady-state backward inputs to final stage (stored under final stage in ss dict)
backward = {k: ss[self.stages[-1].name][k] for k in self.stages[0].backward_outputs}
# report_path is dict(stage: {output: TxN-dim array})
report_path = {stage.name: {o:
|
np.empty((T,) + ss[stage.name][o].shape)
|
numpy.empty
|
from fastai.tabular import *
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit
import os
import sys
import glob
from sklearn.utils import shuffle
dep_var = 'Label'
cat_names = ['Dst Port', 'Protocol']
cont_names = ['Timestamp', 'Flow Duration', 'Tot Fwd Pkts',
'Tot Bwd Pkts', 'TotLen Fwd Pkts', 'TotLen Bwd Pkts', 'Fwd Pkt Len Max',
'Fwd Pkt Len Min', 'Fwd Pkt Len Mean', 'Fwd Pkt Len Std',
'Bwd Pkt Len Max', 'Bwd Pkt Len Min', 'Bwd Pkt Len Mean',
'Bwd Pkt Len Std', 'Flow Byts/s', 'Flow Pkts/s', 'Flow IAT Mean',
'Flow IAT Std', 'Flow IAT Max', 'Flow IAT Min', 'Fwd IAT Tot',
'Fwd IAT Mean', 'Fwd IAT Std', 'Fwd IAT Max', 'Fwd IAT Min',
'Bwd IAT Tot', 'Bwd IAT Mean', 'Bwd IAT Std', 'Bwd IAT Max',
'Bwd IAT Min', 'Fwd PSH Flags', 'Bwd PSH Flags', 'Fwd URG Flags',
'Bwd URG Flags', 'Fwd Header Len', 'Bwd Header Len', 'Fwd Pkts/s',
'Bwd Pkts/s', 'Pkt Len Min', 'Pkt Len Max', 'Pkt Len Mean',
'Pkt Len Std', 'Pkt Len Var', 'FIN Flag Cnt', 'SYN Flag Cnt',
'RST Flag Cnt', 'PSH Flag Cnt', 'ACK Flag Cnt', 'URG Flag Cnt',
'CWE Flag Count', 'ECE Flag Cnt', 'Down/Up Ratio', 'Pkt Size Avg',
'Fwd Seg Size Avg', 'Bwd Seg Size Avg', 'Fwd Byts/b Avg',
'Fwd Pkts/b Avg', 'Fwd Blk Rate Avg', 'Bwd Byts/b Avg',
'Bwd Pkts/b Avg', 'Bwd Blk Rate Avg', 'Subflow Fwd Pkts',
'Subflow Fwd Byts', 'Subflow Bwd Pkts', 'Subflow Bwd Byts',
'Init Fwd Win Byts', 'Init Bwd Win Byts', 'Fwd Act Data Pkts',
'Fwd Seg Size Min', 'Active Mean', 'Active Std', 'Active Max',
'Active Min', 'Idle Mean', 'Idle Std', 'Idle Max', 'Idle Min']
dataPath = 'CleanedTrafficData' # use your path
resultPath = 'results/fastai'
def loadData(fileName):
dataFile = os.path.join(dataPath, fileName)
pickleDump = '{}.pickle'.format(dataFile)
if os.path.exists(pickleDump):
df = pd.read_pickle(pickleDump)
else:
df = pd.read_csv(dataFile)
df = df.dropna()
df = shuffle(df)
df.to_pickle(pickleDump)
return df
def experimentIndividual(dataFile, epochs=5, normalize=False):
# procs = [FillMissing, Categorify, Normalize]
procs = [FillMissing, Categorify]
if normalize:
procs.append(Normalize)
seed = 7
|
np.random.seed(seed)
|
numpy.random.seed
|
import os
from numpy.core.numeric import True_
import torch
import glob
from torch import optim
import numpy as np
import time
import argparse
from load_data import NUM_WRITERS
from network_tro import ConTranModel
from load_data import loadData as load_data_func
from loss_tro import CER
import wandb
parser = argparse.ArgumentParser(description='seq2seq net', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('start_epoch', type=int, help='load saved weights from which epoch')
args = parser.parse_args()
gpu = torch.device('cuda')
OOV = True
NUM_THREAD = 2
EARLY_STOP_EPOCH = None
EVAL_EPOCH = 50 #50
MODEL_SAVE_EPOCH = 200
show_iter_num = 500 #500
LABEL_SMOOTH = True
Bi_GRU = True
VISUALIZE_TRAIN = True
BATCH_SIZE = 8
lr_dis = 1 * 1e-4 # 1e-4
lr_gen = 1 * 1e-4 # 1e-4
lr_rec = 1 * 1e-5 # 1e-5
lr_cla = 1 * 1e-5 # 1e-5
############################# wandb ####################################
#import wandb
#
#wandb.init(project="Handwriting-GAN-project", entity="loolootatchapong")
#wandb.config = {
# "learning_rate_dis": lr_dis,
# "learning_rate_gen": lr_gen,
# "learning_rate_rec": lr_rec,
# "learning_rate_cla": lr_cla,
# "batch_size": BATCH_SIZE,
# "LABEL_SMOOTH" :LABEL_SMOOTH,
# "Bi_GRU" : Bi_GRU,
# "OOV " :OOV
#}
###############################################################################
CurriculumModelID = args.start_epoch
def all_data_loader():
data_train, data_test = load_data_func(OOV)
#print(data_train)
train_loader = torch.utils.data.DataLoader(data_train, collate_fn=sort_batch, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_THREAD, pin_memory=True)
test_loader = torch.utils.data.DataLoader(data_test, collate_fn=sort_batch, batch_size=15, shuffle=False, num_workers=1, pin_memory=True)
print('done loader')
#print(test_loader)
return train_loader, test_loader
def sort_batch(batch):
train_domain = list()
train_wid = list()
train_idx = list()
train_img = list()
train_img_width = list()
train_label = list()
img_xts = list()
label_xts = list()
label_xts_swap = list()
for domain, wid, idx, img, img_width, label, img_xt, label_xt, label_xt_swap in batch:
if wid >= NUM_WRITERS:
print('error!')
train_domain.append(domain)
train_wid.append(wid)
train_idx.append(idx)
train_img.append(img)
#print(np.shape(label))
train_img_width.append(img_width)
train_label.append(label)
img_xts.append(img_xt)
label_xts.append(label_xt)
#print(np.shape(label))
label_xts_swap.append(label_xt_swap)
train_domain = np.array(train_domain)
train_idx = np.array(train_idx)
train_wid = np.array(train_wid, dtype='int64')
train_img = np.array(train_img, dtype='float32')
train_img_width = np.array(train_img_width, dtype='int64')
#print('train_label : ',train_label)
train_label = np.array(train_label, dtype='int64')
img_xts = np.array(img_xts, dtype='float32')
#print(np.shape(label_xts_swap))
label_xts = np.array(label_xts, dtype='int64')
label_xts_swap = np.array(label_xts_swap, dtype='int64')
train_wid = torch.from_numpy(train_wid)
train_img = torch.from_numpy(train_img)
train_img_width = torch.from_numpy(train_img_width)
train_label = torch.from_numpy(train_label)
img_xts = torch.from_numpy(img_xts)
label_xts = torch.from_numpy(label_xts)
label_xts_swap = torch.from_numpy(label_xts_swap)
return train_domain, train_wid, train_idx, train_img, train_img_width, train_label, img_xts, label_xts, label_xts_swap
def train(train_loader, model, dis_opt, gen_opt, rec_opt, cla_opt, epoch):
model.train()
loss_dis = list()
loss_dis_tr = list()
loss_cla = list()
loss_cla_tr = list()
loss_l1 = list()
loss_rec = list()
loss_rec_tr = list()
time_s = time.time()
cer_tr = CER()
cer_te = CER()
cer_te2 = CER()
genlist=[]
TAG=0
for train_data_list in train_loader:
TAG+=1
time_s_update = time.time()
print('NOW ...... rec update' )
'''rec update'''
rec_opt.zero_grad()
l_rec_tr = model(train_data_list, epoch, 'rec_update', cer_func = cer_tr,Tag = TAG)
rec_opt.step()
print('NOW ...... classifier update' )
'''classifier update'''
cla_opt.zero_grad()
l_cla_tr = model(train_data_list, epoch, 'cla_update',Tag= TAG)
cla_opt.step()
print('NOW ...... dis update' )
'''dis update'''
dis_opt.zero_grad()
l_dis_tr = model(train_data_list, epoch, 'dis_update',Tag= TAG)
dis_opt.step()
print('NOW ...... gen update' )
'''gen update'''
gen_opt.zero_grad()
l_total, l_dis, l_cla, l_l1, l_rec = model(train_data_list, epoch, 'gen_update', cer_func = [cer_te, cer_te2],Tag= TAG)
gen_opt.step()
loss_dis.append(l_dis.cpu().item())
loss_dis_tr.append(l_dis_tr.cpu().item())
loss_cla.append(l_cla.cpu().item())
loss_cla_tr.append(l_cla_tr.cpu().item())
loss_l1.append(l_l1.cpu().item())
loss_rec.append(l_rec.cpu().item())
loss_rec_tr.append(l_rec_tr.cpu().item())
genD_time = time.time()-time_s_update
print('gen update time : ',genD_time)
genlist.append(genD_time)
fl_dis = np.mean(loss_dis)
fl_dis_tr = np.mean(loss_dis_tr)
fl_cla = np.mean(loss_cla)
fl_cla_tr = np.mean(loss_cla_tr)
fl_l1 = np.mean(loss_l1)
fl_rec = np.mean(loss_rec)
fl_rec_tr = np.mean(loss_rec_tr)
res_cer_tr = cer_tr.fin()
res_cer_te = cer_te.fin()
res_cer_te2 = cer_te2.fin()
print('gen epoch time : ',sum(genlist))
#####################################
wandb.log({"Train : gen_time": sum(genlist)})
wandb.log({"Train : loss_dis": fl_dis})
wandb.log({"Train : loss_dis_tr": fl_dis_tr})
wandb.log({"Train : loss_cla": fl_cla})
wandb.log({"Train : loss_cla_tr": fl_cla_tr})
wandb.log({"Train : loss_l1": fl_l1})
wandb.log({"Train : loss_rec": fl_rec})
wandb.log({"Train : loss_rec_tr": fl_rec_tr})
wandb.log({"Train : loss_rec_tr": res_cer_tr})
wandb.log({"Train : loss_rec_tr": res_cer_te})
wandb.log({"Train : loss_rec_tr": res_cer_te2})
wandb.log({"Train : time ": time.time()-time_s})
wandb.log({"Train : epoch ": epoch})
#####################################
print('time traint function : ', time.time()-time_s)
print('epo%d <tr>-<gen>: l_dis=%.2f-%.2f, l_cla=%.2f-%.2f, l_rec=%.2f-%.2f, l1=%.2f, cer=%.2f-%.2f-%.2f, time=%.1f' % (epoch, fl_dis_tr, fl_dis, fl_cla_tr, fl_cla, fl_rec_tr, fl_rec, fl_l1, res_cer_tr, res_cer_te, res_cer_te2, time.time()-time_s))
return res_cer_te + res_cer_te2
def test(test_loader, epoch, modelFile_o_model):
print('Now Testing')
TAG=0
if type(modelFile_o_model) == str:
model = ConTranModel(NUM_WRITERS, show_iter_num, OOV).to(gpu)
print('Loading ' + modelFile_o_model)
model.load_state_dict(torch.load(modelFile_o_model)) #load
else:
model = modelFile_o_model
model.eval()
loss_dis = list()
loss_cla = list()
loss_rec = list()
time_s = time.time()
cer_te = CER()
cer_te2 = CER()
for test_data_list in test_loader:
TAG +=1
l_dis, l_cla, l_rec = model(test_data_list, epoch, 'eval',Tag =TAG , cer_func =[cer_te, cer_te2])
loss_dis.append(l_dis.cpu().item())
loss_cla.append(l_cla.cpu().item())
loss_rec.append(l_rec.cpu().item())
fl_dis = np.mean(loss_dis)
fl_cla = np.mean(loss_cla)
fl_rec =
|
np.mean(loss_rec)
|
numpy.mean
|
import numpy as np
import pytest
from edutorch.nn import Linear
from edutorch.optim import RMSProp
def test_rmsprop(monkeypatch: pytest.MonkeyPatch) -> None:
N, D = 4, 5
w =
|
np.linspace(-0.4, 0.6, num=N * D)
|
numpy.linspace
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# Standard library
import datetime
# Third-party
import astropy.units as u
from astropy.time import Time
import pytest
import numpy as np
from numpy.testing import assert_allclose
import pytz
from astropy.coordinates import (EarthLocation, Latitude, Longitude, SkyCoord,
AltAz, Angle)
from astropy.tests.helper import assert_quantity_allclose
# Package
from ..observer import Observer
from ..target import FixedTarget
from ..exceptions import TargetAlwaysUpWarning, TargetNeverUpWarning
def test_Observer_constructor_location():
"""
Show that location defined by latitude/longitude/elevation is parsed
identically to passing in an `~astropy.coordinates.EarthLocation` directly.
"""
lat = '+19:00:00'
lon = '-155:00:00'
elevation = 0.0 * u.m
location = EarthLocation.from_geodetic(lon, lat, elevation)
environment_kwargs = dict(pressure=1*u.bar, relative_humidity=0.1,
temperature=10*u.deg_C)
obs1 = Observer(name='Observatory',
latitude=lat,
longitude=lon,
elevation=elevation,
**environment_kwargs)
obs2 = Observer(name='Observatory',
location=location,
**environment_kwargs)
assert obs1.location == obs2.location, ('using latitude/longitude/'
'elevation keywords gave a '
'different answer from passing in '
'an EarthLocation directly')
def test_Observer_altaz():
"""
Check that the altitude/azimuth computed by `Observer.altaz` is similar
to the result from PyEphem when pressure = 0 (no atmosphere) for Vega at
2000-01-01 12:00:00 UTC.
"""
# Define the test case
latitude = '00:00:00'
longitude = '00:00:00'
elevation = 0*u.m
pressure = 0*u.bar # no atmosphere
time = Time('2000-01-01 12:00:00')
vega_coords = SkyCoord('18h36m56.33635s', '+38d47m01.2802s')
# Calculate altitude/azimuth with astroplan
location = EarthLocation.from_geodetic(longitude, latitude, elevation)
astroplan_obs = Observer(name='Observatory', location=location,
pressure=pressure*u.bar)
astroplan_vega = FixedTarget(vega_coords)
altaz = astroplan_obs.altaz(time, astroplan_vega)
astroplan_altitude = altaz.alt
astroplan_azimuth = altaz.az
# Calculate altitude/azimuth with PyEphem, like so with print_pyephem_altaz
pyephem_altitude = Latitude('51.198848716510874 deg')
pyephem_azimuth = Longitude('358.4676707379987 deg')
# Assert that altitudes/azimuths are within 30 arcsec - this is a wide
# tolerance because the IERS tables used by astroplan may offset astroplan's
# positions due to leap seconds.
tolerance = (30*u.arcsec).to('deg').value
assert_allclose(pyephem_altitude.value, astroplan_altitude.value,
atol=tolerance)
assert_allclose(pyephem_azimuth.value, astroplan_azimuth.value,
atol=tolerance)
# Check that alt/az without target returns AltAz frame
from astropy.coordinates import AltAz
assert isinstance(astroplan_obs.altaz(time), AltAz)
def test_altaz_multiple_targets():
vega = SkyCoord(279.23473479*u.deg, 38.78368896*u.deg)
capella = SkyCoord(79.17232794*u.deg, 45.99799147*u.deg)
sirius = SkyCoord(101.28715533*u.deg, -16.71611586*u.deg)
targets = [vega, capella, sirius]
location = EarthLocation(10*u.deg, 45*u.deg, 0*u.m)
times = Time('1995-06-21 00:00:00') + np.linspace(0, 1, 5)*u.day
obs = Observer(location=location)
transformed_coords = obs.altaz(times, targets, grid_times_targets=True)
altitudes = transformed_coords.alt
# Double check by doing one star the normal way with astropy
vega_altaz = vega.transform_to(AltAz(location=location, obstime=times))
vega_alt = vega_altaz.alt
sirius_altaz = sirius.transform_to(AltAz(location=location, obstime=times))
sirius_alt = sirius_altaz.alt
assert all(vega_alt == altitudes[0, :])
assert all(sirius_alt == altitudes[2, :])
# check that a single element target list works:
single_target_list = [vega]
vega_list_alt = obs.altaz(times, single_target_list).alt
assert np.all(vega_list_alt == vega_alt)
# check that output elements are the proper lengths and types
assert isinstance(vega_list_alt, Latitude)
assert len(vega_list_alt) == len(times)
# Check for single time
single_time = times[0]
vega_single_time = obs.altaz(single_time, single_target_list).alt
assert vega_single_time[0] == vega_alt[0]
# Check single target input without list
vega_no_list = obs.altaz(times, vega).alt
assert all(vega_no_list == vega_alt)
# Check FixedTarget for single target
vega_FixedTarget = FixedTarget(coord=vega, name='Vega')
vega_FixedTarget_alt = obs.altaz(times, vega_FixedTarget).alt
assert all(vega_FixedTarget_alt == vega_alt)
# Check for vector FixedTarget
vega_FixedTarget = FixedTarget(coord=vega, name='Vega')
capella_FixedTarget = FixedTarget(coord=capella, name='Capella')
sirius_FixedTarget = FixedTarget(coord=sirius, name='Sirius')
ft_list = [vega_FixedTarget, capella_FixedTarget, sirius_FixedTarget]
ft_vector_alt = obs.altaz(times[:, np.newaxis], ft_list).T.alt
assert all(ft_vector_alt[0, :] == vega_alt)
assert all(ft_vector_alt[2, :] == sirius_alt)
def test_rise_set_transit_nearest_vector():
vega = SkyCoord(279.23473479*u.deg, 38.78368896*u.deg)
mira = SkyCoord(34.83663376*u.deg, -2.97763767*u.deg)
sirius = SkyCoord(101.28715533*u.deg, -16.71611586*u.deg)
polaris = SkyCoord(37.95456067*u.degree, 89.26410897*u.degree)
sc_list = [vega, mira, sirius, polaris]
location = EarthLocation(10*u.deg, 45*u.deg, 0*u.m)
time = Time('1995-06-21 00:00:00')
obs = Observer(location=location)
rise_vector = obs.target_rise_time(time, sc_list)
vega_rise = obs.target_rise_time(time, vega)
mira_rise = obs.target_rise_time(time, mira)
sirius_rise = obs.target_rise_time(time, sirius)
polaris_rise = obs.target_rise_time(time, polaris)
assert rise_vector[0] == vega_rise
assert rise_vector[1] == mira_rise
assert rise_vector[2] == sirius_rise
assert rise_vector[3].value.mask and polaris_rise.value.mask
set_vector = obs.target_set_time(time, sc_list)
vega_set = obs.target_set_time(time, vega)
mira_set = obs.target_set_time(time, mira)
sirius_set = obs.target_set_time(time, sirius)
polaris_set = obs.target_set_time(time, polaris)
assert set_vector[0] == vega_set
assert set_vector[1] == mira_set
assert set_vector[2] == sirius_set
assert set_vector[3].value.mask and polaris_set.value.mask
transit_vector = obs.target_meridian_transit_time(time, sc_list)
vega_trans = obs.target_meridian_transit_time(time, vega)
mira_trans = obs.target_meridian_transit_time(time, mira)
sirius_trans = obs.target_meridian_transit_time(time, sirius)
polaris_trans = obs.target_meridian_transit_time(time, polaris)
assert transit_vector[0] == vega_trans
assert transit_vector[1] == mira_trans
assert transit_vector[2] == sirius_trans
assert transit_vector[3] == polaris_trans
def print_pyephem_altaz(latitude, longitude, elevation, time, pressure,
target_coords):
"""
Run PyEphem to compute the altitude/azimuth of a target at specified time
and observatory, for comparison with astroplan calucation tested in
`test_Observer_altaz`.
"""
import ephem
pyephem_obs = ephem.Observer()
pyephem_obs.lat = latitude
pyephem_obs.lon = longitude
pyephem_obs.elevation = elevation
pyephem_obs.date = time.datetime
pyephem_obs.pressure = pressure
pyephem_target = ephem.FixedBody()
pyephem_target._ra = ephem.degrees(target_coords.ra.radian)
pyephem_target._dec = ephem.degrees(target_coords.dec.radian)
pyephem_target.compute(pyephem_obs)
pyephem_altitude = Latitude(np.degrees(pyephem_target.alt)*u.degree)
pyephem_azimuth = Longitude(np.degrees(pyephem_target.az)*u.degree)
print(pyephem_altitude, pyephem_azimuth)
def test_Observer_timezone_parser():
lat = '+19:00:00'
lon = '-155:00:00'
elevation = 0.0 * u.m
location = EarthLocation.from_geodetic(lon, lat, elevation)
obs1 = Observer(name='Observatory', location=location,
timezone=pytz.timezone('UTC'))
obs2 = Observer(name='Observatory', location=location, timezone='UTC')
obs3 = Observer(name='Observatory', location=location)
assert obs1.timezone == obs2.timezone, ('Accept both strings to pass to '
'the pytz.timezone() constructor '
'and instances of pytz.timezone')
assert obs2.timezone == obs3.timezone, ('Default timezone should be UTC')
def test_parallactic_angle():
"""
Compute parallactic angle for targets at hour angle = {3, 19} for
at observer at IRTF using the online SpeX calculator and PyEphem
"""
# Set up position for IRTF
lat = 19.826218*u.deg
lon = -155.471999*u.deg
elevation = 4160.0 * u.m
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = Time('2015-01-01 00:00:00')
LST = time.sidereal_time('mean', longitude=lon)
desired_HA_1 = 3*u.hourangle
desired_HA_2 = 19*u.hourangle # = -5*u.hourangle
target1 = SkyCoord(LST - desired_HA_1, -30*u.degree)
target2 = SkyCoord(LST - desired_HA_2, -30*u.degree)
obs = Observer(location=location)
q1 = obs.parallactic_angle(time, target1)
q2 = obs.parallactic_angle(time, target2)
q12 = obs.parallactic_angle(time, [target1, target2])
# Get values from PyEphem for comparison from print_pyephem_parallactic_angle()
pyephem_q1 = 46.54610060782033*u.deg
pyephem_q2 = -65.51818282032019*u.deg
assert_quantity_allclose(q1, pyephem_q1, atol=1*u.deg)
assert_quantity_allclose(q2, pyephem_q2, atol=1*u.deg)
# Get SpeX parallactic angle calculator values for comparison from
# http://irtfweb.ifa.hawaii.edu/cgi-bin/spex/parangle.cgi to produce
SpeX_q1 = 46.7237968*u.deg # deg
SpeX_q2 = -65.428924*u.deg # deg
assert_quantity_allclose(q1, SpeX_q1, atol=0.1*u.deg)
assert_quantity_allclose(q2, SpeX_q2, atol=0.1*u.deg)
assert q1 == q12[0]
assert q2 == q12[1]
def print_pyephem_parallactic_angle():
# lat = 19.826218*u.deg
lon = -155.471999*u.deg
time = Time('2015-01-01 00:00:00')
LST = time.sidereal_time('mean', longitude=lon)
desired_HA_1 = 3*u.hourangle
desired_HA_2 = 19*u.hourangle # = -5*u.hourangle
import ephem
obs = ephem.Observer()
obs.lat = '19:49:34.3848'
obs.lon = '-155:28:19.1964'
obs.elevation = 0
obs.date = time.datetime
pyephem_target1 = ephem.FixedBody()
pyephem_target1._ra = ephem.degrees((LST - desired_HA_1).to(u.rad).value)
pyephem_target1._dec = ephem.degrees((-30*u.deg).to(u.rad).value)
pyephem_target1.compute(obs)
pyephem_q1 = (float(pyephem_target1.parallactic_angle())*u.rad).to(u.deg)
pyephem_target2 = ephem.FixedBody()
pyephem_target2._ra = ephem.degrees((LST - desired_HA_2).to(u.rad).value)
pyephem_target2._dec = ephem.degrees((-30*u.deg).to(u.rad).value)
pyephem_target2.compute(obs)
pyephem_q2 = (float(pyephem_target2.parallactic_angle())*u.rad).to(u.deg)
print(pyephem_q1, pyephem_q2)
def test_sunrise_sunset_equator():
"""
Check that time of sunrise/set for an observer on the equator is
consistent with PyEphem results (for no atmosphere/pressure=0)
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0 * u.bar
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = Time('2000-01-01 12:00:00')
obs = Observer(location=location, pressure=pressure)
astroplan_next_sunrise = obs.sun_rise_time(time, which='next').datetime
astroplan_next_sunset = obs.sun_set_time(time, which='next').datetime
astroplan_prev_sunrise = obs.sun_rise_time(time, which='previous').datetime
astroplan_prev_sunset = obs.sun_set_time(time, which='previous').datetime
# Run print_pyephem_sunrise_sunset() to compute analogous
# result from PyEphem:
pyephem_next_sunrise = datetime.datetime(2000, 1, 2, 6, 3, 39, 150790)
pyephem_next_sunset = datetime.datetime(2000, 1, 1, 18, 3, 23, 676686)
pyephem_prev_sunrise = datetime.datetime(2000, 1, 1, 6, 3, 10, 720052)
pyephem_prev_sunset = datetime.datetime(1999, 12, 31, 18, 2, 55, 100786)
# Typical difference in this example between PyEphem and astroplan
# with an atmosphere is <2 min
threshold_minutes = 2
assert (abs(pyephem_next_sunrise - astroplan_next_sunrise) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_next_sunset - astroplan_next_sunset) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_prev_sunrise - astroplan_prev_sunrise) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_prev_sunset - astroplan_prev_sunset) <
datetime.timedelta(minutes=threshold_minutes))
def print_pyephem_sunrise_sunset():
"""
To run:
python -c 'from astroplan.tests.test_observer import print_pyephem_sunrise_sunset as f; f()'
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0
time = Time('2000-01-01 12:00:00')
import ephem
obs = ephem.Observer()
obs.lat = lat
obs.lon = lon
obs.elevation = elevation
obs.date = time.datetime
obs.pressure = pressure
next_sunrise = obs.next_rising(ephem.Sun(), use_center=True)
next_sunset = obs.next_setting(ephem.Sun(), use_center=True)
prev_sunrise = obs.previous_rising(ephem.Sun(), use_center=True)
prev_sunset = obs.previous_setting(ephem.Sun(), use_center=True)
print(list(map(repr, [next_sunrise.datetime(), next_sunset.datetime(),
prev_sunrise.datetime(), prev_sunset.datetime()])))
def test_vega_rise_set_equator():
"""
Check that time of rise/set of Vega for an observer on the equator is
consistent with PyEphem results (for no atmosphere/pressure=0)
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0 * u.bar
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = Time('2000-01-01 12:00:00')
vega_ra, vega_dec = (279.23473479*u.degree, 38.78368896*u.degree)
vega = SkyCoord(vega_ra, vega_dec)
obs = Observer(location=location, pressure=pressure)
astroplan_next_rise = obs.target_rise_time(time, vega, which='next').datetime
astroplan_next_set = obs.target_set_time(time, vega, which='next').datetime
astroplan_prev_rise = obs.target_rise_time(time, vega, which='previous').datetime
astroplan_prev_set = obs.target_set_time(time, vega, which='previous').datetime
astroplan_nearest_rise = obs.target_rise_time(time, vega, which='nearest').datetime
astroplan_nearest_set = obs.target_set_time(time, vega, which='nearest').datetime
# Run print_pyephem_vega_rise_set() to compute analogous
# result from PyEphem:
pyephem_next_rise = datetime.datetime(2000, 1, 2, 5, 52, 8, 257401)
pyephem_next_set = datetime.datetime(2000, 1, 1, 17, 54, 6, 211705)
pyephem_prev_rise = datetime.datetime(2000, 1, 1, 5, 56, 4, 165852)
pyephem_prev_set = datetime.datetime(1999, 12, 31, 17, 58, 2, 120088)
# Typical difference in this example between PyEphem and astroplan
# with an atmosphere is <2 min
threshold_minutes = 2
assert (abs(pyephem_next_rise - astroplan_next_rise) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_next_set - astroplan_next_set) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_prev_rise - astroplan_prev_rise) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_prev_set - astroplan_prev_set) <
datetime.timedelta(minutes=threshold_minutes))
# Check that the 'nearest' option selects the nearest rise/set
assert astroplan_nearest_rise == astroplan_prev_rise
assert astroplan_nearest_set == astroplan_next_set
def print_pyephem_vega_rise_set():
"""
To run:
python -c 'from astroplan.tests.test_observer import print_pyephem_vega_rise_set as f; f()'
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0
time = Time('2000-01-01 12:00:00')
vega_ra, vega_dec = (279.23473479*u.degree, 38.78368896*u.degree)
vega = SkyCoord(vega_ra, vega_dec)
import ephem
obs = ephem.Observer()
obs.lat = lat
obs.lon = lon
obs.elevation = elevation
obs.date = time.datetime
obs.pressure = pressure
target = ephem.FixedBody()
target._ra = ephem.degrees(vega.ra.radian)
target._dec = ephem.degrees(vega.dec.radian)
target.compute(obs)
next_rising = obs.next_rising(target).datetime()
next_setting = obs.next_setting(target).datetime()
prev_rising = obs.previous_rising(target).datetime()
prev_setting = obs.previous_setting(target).datetime()
print(list(map(repr, [next_rising, next_setting,
prev_rising, prev_setting])))
def test_vega_sirius_rise_set_seattle():
"""
Check that time of rise/set of Vega for an observer in Seattle is
consistent with PyEphem results (for no atmosphere/pressure=0)
"""
lat = '47d36m34.92s'
lon = '122d19m59.16s'
elevation = 0.0 * u.m
pressure = 0 * u.bar
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = Time('1990-01-01 12:00:00')
vega = SkyCoord(279.23473479*u.degree, 38.78368896*u.degree)
sirius = SkyCoord(101.28715533*u.degree, -16.71611586*u.degree)
obs = Observer(location=location, pressure=pressure)
astroplan_vega_rise = obs.target_rise_time(time, vega,
which='next').datetime
astroplan_sirius_rise = obs.target_rise_time(time, sirius,
which='next').datetime
astroplan_vector_rise = obs.target_rise_time(time, [vega, sirius],
which='next').datetime
astroplan_vega_set = obs.target_set_time(time, vega, which='next').datetime
astroplan_sirius_set = obs.target_set_time(time, sirius,
which='next').datetime
astroplan_vector_set = obs.target_set_time(time, [vega, sirius],
which='next').datetime
# Run print_pyephem_vega_sirius_rise_set() to compute analogous
# result from PyEphem:
pyephem_vega_rise = datetime.datetime(1990, 1, 1, 17, 36, 15, 615484)
pyephem_sirius_rise = datetime.datetime(1990, 1, 2, 11, 4, 52, 35375)
pyephem_vega_set = datetime.datetime(1990, 1, 1, 13, 49, 58, 788327)
pyephem_sirius_set = datetime.datetime(1990, 1, 1, 20, 33, 42, 342885)
# Typical difference in this example between PyEphem and astroplan
# with an atmosphere is <2 min
threshold_minutes = 2
assert (abs(pyephem_vega_rise - astroplan_vega_rise) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_sirius_rise - astroplan_sirius_rise) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_vega_set - astroplan_vega_set) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_sirius_set - astroplan_sirius_set) <
datetime.timedelta(minutes=threshold_minutes))
# Now check vectorized solutions against scalar:
assert (astroplan_vector_rise[0] == astroplan_vega_rise)
assert (astroplan_vector_rise[1] == astroplan_sirius_rise)
assert (astroplan_vector_set[0] == astroplan_vega_set)
assert (astroplan_vector_set[1] == astroplan_sirius_set)
def print_pyephem_vega_sirius_rise_set():
"""
To run:
python -c 'from astroplan.tests.test_observer import \
print_pyephem_vega_sirius_rise_set as f; f()'
"""
lat = '47:36:34.92'
lon = '122:19:59.16'
elevation = 0.0 * u.m
pressure = 0
time = Time('1990-01-01 12:00:00')
vega_coords = SkyCoord(279.23473479*u.degree, 38.78368896*u.degree)
sirius_coords = SkyCoord(101.28715533*u.degree, -16.71611586*u.degree)
import ephem
obs = ephem.Observer()
obs.lat = lat
obs.lon = lon
obs.elevation = elevation
obs.date = time.datetime
obs.pressure = pressure
vega = ephem.FixedBody()
vega._ra = ephem.degrees(vega_coords.ra.radian)
vega._dec = ephem.degrees(vega_coords.dec.radian)
vega.compute(obs)
sirius = ephem.FixedBody()
sirius._ra = ephem.degrees(sirius_coords.ra.radian)
sirius._dec = ephem.degrees(sirius_coords.dec.radian)
sirius.compute(obs)
vega_next_rising = obs.next_rising(vega).datetime()
vega_next_setting = obs.next_setting(vega).datetime()
sirius_next_rising = obs.next_rising(sirius).datetime()
sirius_next_setting = obs.next_setting(sirius).datetime()
print(list(map(repr, [vega_next_rising, sirius_next_rising,
vega_next_setting, sirius_next_setting])))
def test_sunrise_sunset_equator_civil_twilight():
"""
Check that time of sunrise/set for an observer on the equator is
consistent with PyEphem results (for no atmosphere/pressure=0)
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0 * u.bar
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = Time('2000-01-01 12:00:00')
obs = Observer(location=location, pressure=pressure)
# Manually impose horizon equivalent to civil twilight
horizon = -6 * u.degree
astroplan_next_sunrise = obs.sun_rise_time(time, which='next',
horizon=horizon).datetime
astroplan_next_sunset = obs.sun_set_time(time, which='next',
horizon=horizon).datetime
astroplan_prev_sunrise = obs.sun_rise_time(time, which='previous',
horizon=horizon).datetime
astroplan_prev_sunset = obs.sun_set_time(time, which='previous',
horizon=horizon).datetime
# Run print_pyephem_sunrise_sunset_equator_civil_twilight() to compute
# analogous result from PyEphem:
pyephem_next_rise = datetime.datetime(2000, 1, 2, 5, 37, 34, 83328)
pyephem_next_set = datetime.datetime(2000, 1, 1, 18, 29, 29, 195908)
pyephem_prev_rise = datetime.datetime(2000, 1, 1, 5, 37, 4, 701708)
pyephem_prev_set = datetime.datetime(1999, 12, 31, 18, 29, 1, 530987)
threshold_minutes = 2
assert (abs(pyephem_next_rise - astroplan_next_sunrise) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_next_set - astroplan_next_sunset) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_prev_rise - astroplan_prev_sunrise) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_prev_set - astroplan_prev_sunset) <
datetime.timedelta(minutes=threshold_minutes))
def print_pyephem_sunrise_sunset_equator_civil_twilight():
"""
Calculate next sunrise and sunset with PyEphem for an observer
on the equator.
To run:
python -c 'from astroplan.tests.test_observer import \
print_pyephem_sunrise_sunset_equator_civil_twilight as f; f()'
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0
time = Time('2000-01-01 12:00:00')
import ephem
obs = ephem.Observer()
obs.lat = lat
obs.lon = lon
obs.elevation = elevation
obs.date = time.datetime
obs.pressure = pressure
obs.horizon = '-06:00:00'
next_sunrise = obs.next_rising(ephem.Sun(), use_center=True)
next_sunset = obs.next_setting(ephem.Sun(), use_center=True)
prev_sunrise = obs.previous_rising(ephem.Sun(), use_center=True)
prev_sunset = obs.previous_setting(ephem.Sun(), use_center=True)
def pyephem_time_to_datetime_str(t): return repr(t.datetime())
print(list(map(pyephem_time_to_datetime_str,
[next_sunrise, next_sunset, prev_sunrise, prev_sunset])))
def test_twilight_convenience_funcs():
"""
Check that the convenience functions for evening
astronomical/nautical/civil twilight correspond to their
PyEphem equivalents
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0 * u.bar
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = Time('2000-01-01 12:00:00')
obs = Observer(location=location, pressure=pressure)
# Compute morning twilights with astroplan
astroplan_morning_civil = obs.twilight_morning_civil(
time, which='previous').datetime
astroplan_morning_nautical = obs.twilight_morning_nautical(
time, which='previous').datetime
astroplan_morning_astro = obs.twilight_morning_astronomical(
time, which='previous').datetime
# Compute evening twilights with astroplan
astroplan_evening_civil = obs.twilight_evening_civil(
time, which='next').datetime
astroplan_evening_nautical = obs.twilight_evening_nautical(
time, which='next').datetime
astroplan_evening_astro = obs.twilight_evening_astronomical(
time, which='next').datetime
# Compute morning and evening twilights with PyEphem from
# the function print_pyephem_twilight_convenience_funcs()
pyephem_morning_civil, pyephem_morning_nautical, pyephem_morning_astronomical, = (
datetime.datetime(2000, 1, 1, 5, 37, 4, 701708),
datetime.datetime(2000, 1, 1, 5, 10, 55, 450939),
datetime.datetime(2000, 1, 1, 4, 44, 39, 415865))
pyephem_evening_civil, pyephem_evening_nautical, pyephem_evening_astronomical = (
datetime.datetime(2000, 1, 1, 18, 29, 29, 195908),
datetime.datetime(2000, 1, 1, 18, 55, 37, 864882),
datetime.datetime(2000, 1, 1, 19, 21, 53, 213768))
threshold_minutes = 2
# Compare morning twilights
assert (abs(astroplan_morning_civil - pyephem_morning_civil) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(astroplan_morning_nautical - pyephem_morning_nautical) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(astroplan_morning_astro - pyephem_morning_astronomical) <
datetime.timedelta(minutes=threshold_minutes))
# Compare evening twilights
assert (abs(astroplan_evening_civil - pyephem_evening_civil) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(astroplan_evening_nautical - pyephem_evening_nautical) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(astroplan_evening_astro - pyephem_evening_astronomical) <
datetime.timedelta(minutes=threshold_minutes))
def print_pyephem_twilight_convenience_funcs():
"""
To run:
python -c 'from astroplan.tests.test_observer import \
print_pyephem_twilight_convenience_funcs as f; f()'
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0
time = Time('2000-01-01 12:00:00')
import ephem
obs = ephem.Observer()
obs.lat = lat
obs.lon = lon
obs.elevation = elevation
obs.date = time.datetime
obs.pressure = pressure
# Morning twilights
obs.horizon = '-06:00:00'
morning_civil = obs.previous_rising(ephem.Sun(), use_center=True)
obs.horizon = '-12:00:00'
morning_nautical = obs.previous_rising(ephem.Sun(), use_center=True)
obs.horizon = '-18:00:00'
morning_astronomical = obs.previous_rising(ephem.Sun(), use_center=True)
# Evening twilights
obs.horizon = '-06:00:00'
evening_civil = obs.next_setting(ephem.Sun(), use_center=True)
obs.horizon = '-12:00:00'
evening_nautical = obs.next_setting(ephem.Sun(), use_center=True)
obs.horizon = '-18:00:00'
evening_astronomical = obs.next_setting(ephem.Sun(), use_center=True)
def pyephem_time_to_datetime_str(t): return repr(t.datetime())
print(list(map(pyephem_time_to_datetime_str,
[morning_civil, morning_nautical, morning_astronomical,
evening_civil, evening_nautical, evening_astronomical])))
def test_solar_transit():
"""
Test that astroplan's solar transit/antitransit (which are noon and
midnight) agree with PyEphem's
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0 * u.bar
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = Time('2000-01-01 12:00:00')
from astropy.coordinates import get_sun
obs = Observer(location=location, pressure=pressure)
# Compute next/previous noon/midnight using generic calc_transit methods
astroplan_next_transit = obs.target_meridian_transit_time(
time, get_sun(time), which='next').datetime
astroplan_next_antitransit = obs.target_meridian_antitransit_time(
time, get_sun(time), which='next').datetime
astroplan_prev_transit = obs.target_meridian_transit_time(
time, get_sun(time), which='previous').datetime
astroplan_prev_antitransit = obs.target_meridian_antitransit_time(
time, get_sun(time), which='previous').datetime
astroplan_nearest_transit = obs.target_meridian_transit_time(
time, get_sun(time), which='nearest').datetime
astroplan_nearest_antitransit = obs.target_meridian_antitransit_time(
time, get_sun(time), which='nearest').datetime
# Computed in print_pyephem_solar_transit_noon()
pyephem_next_transit = datetime.datetime(2000, 1, 1, 12, 3, 17, 207300)
pyephem_next_antitransit = datetime.datetime(2000, 1, 2, 0, 3, 31, 423333)
pyephem_prev_transit = datetime.datetime(1999, 12, 31, 12, 2, 48, 562755)
pyephem_prev_antitransit = datetime.datetime(2000, 1, 1, 0, 3, 2, 918943)
threshold_minutes = 5
assert (abs(astroplan_next_transit - pyephem_next_transit) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(astroplan_next_antitransit - pyephem_next_antitransit) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(astroplan_prev_transit - pyephem_prev_transit) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(astroplan_prev_antitransit - pyephem_prev_antitransit) <
datetime.timedelta(minutes=threshold_minutes))
# Check nearest
assert astroplan_next_transit == astroplan_nearest_transit
assert astroplan_nearest_antitransit == astroplan_prev_antitransit
def test_solar_transit_convenience_methods():
"""
Test that astroplan's noon and midnight convenience methods agree with
PyEphem's solar transit/antitransit time.
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0 * u.bar
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = Time('2000-01-01 12:00:00')
obs = Observer(location=location, pressure=pressure)
# Compute next/previous noon/midnight using generic calc_transit methods
astroplan_next_noon = obs.noon(time, which='next').datetime
astroplan_next_midnight = obs.midnight(time, which='next').datetime
astroplan_prev_noon = obs.noon(time, which='previous').datetime
astroplan_prev_midnight = obs.midnight(time, which='previous').datetime
# Computed in print_pyephem_solar_transit_noon()
pyephem_next_transit = datetime.datetime(2000, 1, 1, 12, 3, 17, 207300)
pyephem_next_antitransit = datetime.datetime(2000, 1, 2, 0, 3, 31, 423333)
pyephem_prev_transit = datetime.datetime(1999, 12, 31, 12, 2, 48, 562755)
pyephem_prev_antitransit = datetime.datetime(2000, 1, 1, 0, 3, 2, 918943)
threshold_minutes = 5
assert (abs(astroplan_next_noon - pyephem_next_transit) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(astroplan_next_midnight - pyephem_next_antitransit) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(astroplan_prev_noon - pyephem_prev_transit) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(astroplan_prev_midnight - pyephem_prev_antitransit) <
datetime.timedelta(minutes=threshold_minutes))
def print_pyephem_solar_transit_noon():
"""
Calculate next sunrise and sunset with PyEphem for an observer
on the equator.
To run:
python -c 'from astroplan.tests.test_observer import print_pyephem_transit_noon as f; f()'
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0
time = Time('2000-01-01 12:00:00')
import ephem
obs = ephem.Observer()
obs.lat = lat
obs.lon = lon
obs.elevation = elevation
obs.date = time.datetime
obs.pressure = pressure
next_transit = obs.next_transit(ephem.Sun())
next_antitransit = obs.next_antitransit(ephem.Sun())
prev_transit = obs.previous_transit(ephem.Sun())
prev_antitransit = obs.previous_antitransit(ephem.Sun())
def pyephem_time_to_datetime_str(t): return repr(t.datetime())
print(list(map(pyephem_time_to_datetime_str,
[next_transit, next_antitransit,
prev_transit, prev_antitransit])))
def test_vega_sirius_transit_seattle():
"""
Check that time of transit of Vega for an observer in Seattle is
consistent with PyEphem results (for no atmosphere/pressure=0)
"""
lat = '47d36m34.92s'
lon = '122d19m59.16s'
elevation = 0.0 * u.m
pressure = 0 * u.bar
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = Time('1990-01-01 12:00:00')
vega = SkyCoord(279.23473479*u.degree, 38.78368896*u.degree)
sirius = SkyCoord(101.28715533*u.degree, -16.71611586*u.degree)
obs = Observer(location=location, pressure=pressure)
astroplan_vega_transit = obs.target_meridian_transit_time(
time, vega, which='next').datetime
astroplan_sirius_transit = obs.target_meridian_transit_time(
time, sirius, which='next').datetime
astroplan_vector_transit = obs.target_meridian_transit_time(
time, [vega, sirius], which='next').datetime
# Run print_pyephem_vega_sirius_transit() to compute analogous
# result from PyEphem:
pyephem_vega_transit = datetime.datetime(1990, 1, 2, 3, 41, 9, 244067)
pyephem_sirius_transit = datetime.datetime(1990, 1, 1, 15, 51, 15, 135167)
# Typical difference in this example between PyEphem and astroplan
# with an atmosphere is <2 min
threshold_minutes = 2
assert (abs(pyephem_vega_transit - astroplan_vega_transit) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_sirius_transit - astroplan_sirius_transit) <
datetime.timedelta(minutes=threshold_minutes))
# Now check vectorized solutions against scalar:
assert (astroplan_vector_transit[0] == astroplan_vega_transit)
assert (astroplan_vector_transit[1] == astroplan_sirius_transit)
def print_pyephem_vega_sirius_transit():
"""
To run:
python -c 'from astroplan.tests.test_observer import \
print_pyephem_vega_sirius_transit as f; f()'
"""
lat = '47:36:34.92'
lon = '122:19:59.16'
elevation = 0.0 * u.m
pressure = 0
time = Time('1990-01-01 12:00:00')
vega_coords = SkyCoord(279.23473479*u.degree, 38.78368896*u.degree)
sirius_coords = SkyCoord(101.28715533*u.degree, -16.71611586*u.degree)
import ephem
obs = ephem.Observer()
obs.lat = lat
obs.lon = lon
obs.elevation = elevation
obs.date = time.datetime
obs.pressure = pressure
vega = ephem.FixedBody()
vega._ra = ephem.degrees(vega_coords.ra.radian)
vega._dec = ephem.degrees(vega_coords.dec.radian)
vega.compute(obs)
sirius = ephem.FixedBody()
sirius._ra = ephem.degrees(sirius_coords.ra.radian)
sirius._dec = ephem.degrees(sirius_coords.dec.radian)
sirius.compute(obs)
vega_next_transit = obs.next_transit(vega).datetime()
sirius_next_transit = obs.next_transit(sirius).datetime()
print(list(map(repr, [vega_next_transit, sirius_next_transit])))
def test_target_is_up():
"""
Test that Polaris is/isn't observable from north/south pole
"""
elevation = 0.0 * u.m
pressure = 0 * u.bar
north = EarthLocation.from_geodetic('00:00:00',
'90:00:00', elevation)
south = EarthLocation.from_geodetic('00:00:00',
'-90:00:00', elevation)
time = Time('2000-01-01 12:00:00')
polaris = SkyCoord(37.95456067*u.degree, 89.26410897*u.degree)
polaris_B = SkyCoord(37.639725*u.degree, 89.26080556*u.degree)
polaris_binary = [polaris, polaris_B]
north_pole = Observer(location=north, pressure=pressure)
south_pole = Observer(location=south, pressure=pressure)
assert north_pole.target_is_up(time, polaris)
assert not south_pole.target_is_up(time, polaris)
assert all(north_pole.target_is_up(time, polaris_binary))
assert not any(south_pole.target_is_up(time, polaris_binary))
def test_string_times():
"""
Test that strings passed to time argument get successfully
passed to Time constructor. Analogous test to test_vega_rise_set_equator(),
just with a string for a time.
"""
lat = '00:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
pressure = 0 * u.bar
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = '2000-01-01 12:00:00'
vega_ra, vega_dec = (279.23473479*u.degree, 38.78368896*u.degree)
vega = SkyCoord(vega_ra, vega_dec)
obs = Observer(location=location, pressure=pressure)
astroplan_next_rise = obs.target_rise_time(time, vega, which='next').datetime
astroplan_next_set = obs.target_set_time(time, vega, which='next').datetime
astroplan_prev_rise = obs.target_rise_time(time, vega, which='previous').datetime
astroplan_prev_set = obs.target_set_time(time, vega, which='previous').datetime
astroplan_nearest_rise = obs.target_rise_time(time, vega, which='nearest').datetime
astroplan_nearest_set = obs.target_set_time(time, vega, which='nearest').datetime
# Run print_pyephem_vega_rise_set() to compute analogous
# result from PyEphem:
pyephem_next_rise = datetime.datetime(2000, 1, 2, 5, 52, 8, 257401)
pyephem_next_set = datetime.datetime(2000, 1, 1, 17, 54, 6, 211705)
pyephem_prev_rise = datetime.datetime(2000, 1, 1, 5, 56, 4, 165852)
pyephem_prev_set = datetime.datetime(1999, 12, 31, 17, 58, 2, 120088)
# Typical difference in this example between PyEphem and astroplan
# with an atmosphere is <2 min
threshold_minutes = 2
assert (abs(pyephem_next_rise - astroplan_next_rise) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_next_set - astroplan_next_set) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_prev_rise - astroplan_prev_rise) <
datetime.timedelta(minutes=threshold_minutes))
assert (abs(pyephem_prev_set - astroplan_prev_set) <
datetime.timedelta(minutes=threshold_minutes))
# Check that the 'nearest' option selects the nearest rise/set
assert astroplan_nearest_rise == astroplan_prev_rise
assert astroplan_nearest_set == astroplan_next_set
def test_TargetAlwaysUpWarning(recwarn):
lat = '90:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = Time('2000-01-01 12:00:00')
polaris = SkyCoord(37.95456067*u.degree, 89.26410897*u.degree)
obs = Observer(location=location)
no_time = obs.target_rise_time(time, polaris, which='next')
w = recwarn.pop(TargetAlwaysUpWarning)
assert issubclass(w.category, TargetAlwaysUpWarning)
assert no_time.mask
# Regression test: make sure 'nearest' also works
no_time = obs.target_rise_time(time, polaris, which='nearest')
# Cycle back through warnings until a TargetAlwaysUpWarning is hit
# (other warnings can also be raised here)
while not issubclass(w.category, TargetAlwaysUpWarning):
w = recwarn.pop(TargetAlwaysUpWarning)
assert issubclass(w.category, TargetAlwaysUpWarning)
assert no_time.mask
def test_TargetNeverUpWarning(recwarn):
lat = '-90:00:00'
lon = '00:00:00'
elevation = 0.0 * u.m
location = EarthLocation.from_geodetic(lon, lat, elevation)
time = Time('2000-01-01 12:00:00')
polaris = SkyCoord(37.95456067*u.degree, 89.26410897*u.degree)
obs = Observer(location=location)
no_time = obs.target_rise_time(time, polaris, which='next')
w = recwarn.pop(TargetNeverUpWarning)
assert issubclass(w.category, TargetNeverUpWarning)
assert no_time.mask
def test_mixed_rise_and_dont_rise():
vega = SkyCoord(279.23473479*u.deg, 38.78368896*u.deg)
polaris = SkyCoord(37.95456067*u.deg, 89.26410897*u.deg)
sirius = SkyCoord(101.28715533*u.deg, -16.71611586*u.deg)
targets = [vega, polaris, sirius]
location = EarthLocation(10*u.deg, 45*u.deg, 0*u.m)
time = Time('1995-06-21 00:00:00')
obs = Observer(location=location)
with pytest.warns(TargetAlwaysUpWarning) as recwarn:
rise_times = obs.target_rise_time(time, targets, which='next')
assert rise_times.mask[1]
targets_that_rise = np.array(targets)[~rise_times.mask]
assert np.all([vega, sirius] == targets_that_rise)
w = recwarn.pop(TargetAlwaysUpWarning)
assert issubclass(w.category, TargetAlwaysUpWarning)
def test_timezone_convenience_methods():
location = EarthLocation(-74.0*u.deg, 40.7*u.deg, 0*u.m)
obs = Observer(location=location, timezone=pytz.timezone('US/Eastern'))
t = Time(57100.3, format='mjd')
assert (obs.astropy_time_to_datetime(t).hour == 3)
dt = datetime.datetime(2015, 3, 19, 3, 12)
assert (obs.datetime_to_astropy_time(dt).datetime ==
datetime.datetime(2015, 3, 19, 7, 12))
assert (obs.astropy_time_to_datetime(obs.datetime_to_astropy_time(dt)).replace(
tzinfo=None) == dt)
# Test ndarray of times:
times = t +
|
np.linspace(0, 24, 10)
|
numpy.linspace
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_utils.py
"""
Testing for utils.
Copyright (c) 2021, <NAME>
"""
import unittest
from itertools import product
import numpy as np
import pytest
# import the package to test
from dphtools.utils import (
anscombe,
anscombe_inv,
bin_ndarray,
fft_gaussian_filter,
fft_pad,
_padding_slices,
radial_profile,
slice_maker,
scale,
win_nd,
)
from numpy.fft import fftshift, ifftshift
from numpy.testing import assert_allclose, assert_almost_equal
from scipy.fft import next_fast_len
from scipy.ndimage.filters import gaussian_filter
rng = np.random.default_rng(12345)
class TestBinNdarray(unittest.TestCase):
"""Test bin_ndarray."""
def setUp(self):
"""Set up."""
self.data = np.arange(16).reshape(4, 4)
def test_shapes(self):
"""Test exception raising."""
with pytest.raises(ValueError):
bin_ndarray(self.data)
def test_new_shape(self):
"""Test exception raising."""
with pytest.raises(ValueError):
bin_ndarray(self.data, new_shape=(2, 2, 2))
def test_operation(self):
"""Test exception raising."""
with pytest.raises(ValueError):
bin_ndarray(self.data, bin_size=2, operation="add")
def test_scale_error():
"""Test exception raising."""
with pytest.raises(TypeError):
scale(rng.standard_normal(10) + rng.standard_normal(10) * 1j)
class TestFFTPad(unittest.TestCase):
"""Test fft_pad."""
def test_wrong_newshape(self):
"""Test newshape input."""
with pytest.raises(ValueError):
data = np.empty((12, 15))
fft_pad(data, object)
def test_new_shape_no_size(self):
"""Test the make a new shape with even and odd numbers when no size is specified, i.e. test auto padding."""
oldshape = (2 * 17, 17)
data = np.zeros(oldshape)
newshape = tuple(next_fast_len(s) for s in oldshape)
newdata = fft_pad(data)
assert newshape == newdata.shape
def test_new_shape_one_size(self):
"""Make sure the new shape has the same dimensions when one is given."""
oldshape = (10, 20, 30)
data = rng.standard_normal(oldshape)
newsize = 50
newdata = fft_pad(data, newsize)
assert (newsize,) * newdata.ndim == newdata.shape
def test_new_shape_multiple(self):
"""Make sure the new shape has the same dimensions when one is given."""
oldshape = (10, 20, 30, 40)
data = rng.standard_normal(oldshape)
newsize = (50, 40, 30, 100)
newdata = fft_pad(data, newsize)
assert newsize == newdata.shape
def test_smaller_shape(self):
"""Test that cropping works as expected."""
oldshape = rng.integers(10, 200)
newshape = rng.integers(5, oldshape)
data =
|
np.ones(oldshape)
|
numpy.ones
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 19:06:40 2020
@author: <NAME>
Manual Gate finder
"""
import pandas as pd
import numpy as np
from sklearn.mixture import GaussianMixture
from scipy import stats
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
def gmm_dist_plot (adata, marker_of_interest):
"""
Parameters
----------
adata : Ann Data Object
marker_of_interest : string
Marker of interest.
Returns
-------
Distribution plot of the marker of interest along with GMM overlaid.
Example
-------
gmm_dist_plot (adata, marker_of_interest='CD45')
"""
# If no raw data is available make a copy
if adata.raw is None:
adata.raw = adata
# Copy of the raw data if it exisits
if adata.raw is not None:
adata.X = adata.raw.X
# Make a copy of the data with the marker of interest
data = pd.DataFrame(np.log1p(adata.X), columns = adata.var.index, index= adata.obs.index)[[marker_of_interest]]
# Clip of the top and bottom outliers before applying the model
data = data.clip(lower =
|
np.percentile(data,1)
|
numpy.percentile
|
import numpy as np
import constants as consts
def load_attitude (filePath):
return np.loadtxt(filePath,
comments="#",
delimiter=",",
skiprows=consts.ATT_HEADER_ROWS,
converters = {consts.ATT_RA_COL: lambda s: 360.0 * (float(s.strip() or 0) / 24.0)}, #Convert hours to degrees
usecols = (consts.ATT_TIME_COL, consts.ATT_RA_COL, consts.ATT_DEC_COL))
# Returns the ra dec interpolated for a given time
def get_ra_dec(time, att):
#print("time: " + str(time))
att_times = att[:,0]
idx = (
|
np.abs(att_times - time)
|
numpy.abs
|
#This code trains consequently SVMs with different parameters C and gamma to find the one with the best recognition scores for 5 gestures.
#The datasets are retrieved from the local drive where they were stored at a previous step. (look at Microprcessor Software).
#The parameters and the kernel with the best recall score should be noted so that they can be given to another Python code which trains an SVM and sends commads to the Lego NXT Robot.
import numpy as np
import sys
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
np.set_printoptions(threshold=sys.maxsize)
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn import preprocessing
# session 1 loading data to variables lists
s1g0="C:/Users/ece73/Desktop/recordings/session1/gesture0.txt"
s1g1="C:/Users/ece73/Desktop/recordings/session1/gesture1.txt"
s1g2="C:/Users/ece73/Desktop/recordings/session1/gesture2.txt"
s1g3="C:/Users/ece73/Desktop/recordings/session1/gesture3.txt"
s1g4="C:/Users/ece73/Desktop/recordings/session1/gesture4.txt"
s1g5="C:/Users/ece73/Desktop/recordings/session1/gesture5.txt"
s1g6="C:/Users/ece73/Desktop/recordings/session1/gesture6.txt"
s1g7="C:/Users/ece73/Desktop/recordings/session1/gesture7.txt"
s1g8="C:/Users/ece73/Desktop/recordings/session1/gesture8.txt"
ds1g0 = np.loadtxt(s1g0, delimiter=",")
ds1g1 = np.loadtxt(s1g1, delimiter=",")
ds1g2 = np.loadtxt(s1g2, delimiter=",")
ds1g3 = np.loadtxt(s1g3, delimiter=",")
ds1g4 = np.loadtxt(s1g4, delimiter=",")
ds1g5 = np.loadtxt(s1g5, delimiter=",")
ds1g6 = np.loadtxt(s1g6, delimiter=",")
ds1g7 = np.loadtxt(s1g7, delimiter=",")
ds1g8 = np.loadtxt(s1g8, delimiter=",")
###################################################
# session 2 loading data to variables lists
s2g0="C:/Users/ece73/Desktop/recordings/session2/gesture0.txt"
s2g1="C:/Users/ece73/Desktop/recordings/session2/gesture1.txt"
s2g2="C:/Users/ece73/Desktop/recordings/session2/gesture2.txt"
s2g3="C:/Users/ece73/Desktop/recordings/session2/gesture3.txt"
s2g4="C:/Users/ece73/Desktop/recordings/session2/gesture4.txt"
s2g5="C:/Users/ece73/Desktop/recordings/session2/gesture5.txt"
s2g6="C:/Users/ece73/Desktop/recordings/session2/gesture6.txt"
s2g7="C:/Users/ece73/Desktop/recordings/session2/gesture7.txt"
s2g8="C:/Users/ece73/Desktop/recordings/session2/gesture8.txt"
ds2g0 = np.loadtxt(s2g0, delimiter=",")
ds2g1 = np.loadtxt(s2g1, delimiter=",")
ds2g2 = np.loadtxt(s2g2, delimiter=",")
ds2g3 = np.loadtxt(s2g3, delimiter=",")
ds2g4 = np.loadtxt(s2g4, delimiter=",")
ds2g5 = np.loadtxt(s2g5, delimiter=",")
ds2g6 = np.loadtxt(s2g6, delimiter=",")
ds2g7 = np.loadtxt(s2g7, delimiter=",")
ds2g8 = np.loadtxt(s2g8, delimiter=",")
##################################################################
# session 3 loading data to variables lists
s3g0="C:/Users/ece73/Desktop/recordings/session3/gesture0.txt"
s3g1="C:/Users/ece73/Desktop/recordings/session3/gesture1.txt"
s3g2="C:/Users/ece73/Desktop/recordings/session3/gesture2.txt"
s3g3="C:/Users/ece73/Desktop/recordings/session3/gesture3.txt"
s3g4="C:/Users/ece73/Desktop/recordings/session3/gesture4.txt"
s3g5="C:/Users/ece73/Desktop/recordings/session3/gesture5.txt"
s3g6="C:/Users/ece73/Desktop/recordings/session3/gesture6.txt"
s3g7="C:/Users/ece73/Desktop/recordings/session3/gesture7.txt"
s3g8="C:/Users/ece73/Desktop/recordings/session3/gesture8.txt"
ds3g0 = np.loadtxt(s3g0, delimiter=",")
ds3g1 = np.loadtxt(s3g1, delimiter=",")
ds3g2 = np.loadtxt(s3g2, delimiter=",")
ds3g3 = np.loadtxt(s3g3, delimiter=",")
ds3g4 = np.loadtxt(s3g4, delimiter=",")
ds3g5 = np.loadtxt(s3g5, delimiter=",")
ds3g6 = np.loadtxt(s3g6, delimiter=",")
ds3g7 = np.loadtxt(s3g7, delimiter=",")
ds3g8 = np.loadtxt(s3g8, delimiter=",")
##################################################################
# session 4 loading data to variables lists
s4g0="C:/Users/ece73/Desktop/recordings/session4/gesture0.txt"
s4g1="C:/Users/ece73/Desktop/recordings/session4/gesture1.txt"
s4g2="C:/Users/ece73/Desktop/recordings/session4/gesture2.txt"
s4g3="C:/Users/ece73/Desktop/recordings/session4/gesture3.txt"
s4g4="C:/Users/ece73/Desktop/recordings/session4/gesture4.txt"
s4g5="C:/Users/ece73/Desktop/recordings/session4/gesture5.txt"
s4g6="C:/Users/ece73/Desktop/recordings/session4/gesture6.txt"
s4g7="C:/Users/ece73/Desktop/recordings/session4/gesture7.txt"
s4g8="C:/Users/ece73/Desktop/recordings/session4/gesture8.txt"
ds4g0 = np.loadtxt(s4g0, delimiter=",")
ds4g1 = np.loadtxt(s4g1, delimiter=",")
ds4g2 = np.loadtxt(s4g2, delimiter=",")
ds4g3 = np.loadtxt(s4g3, delimiter=",")
ds4g4 = np.loadtxt(s4g4, delimiter=",")
ds4g5 = np.loadtxt(s4g5, delimiter=",")
ds4g6 = np.loadtxt(s4g6, delimiter=",")
ds4g7 = np.loadtxt(s4g7, delimiter=",")
ds4g8 = np.loadtxt(s4g8, delimiter=",")
##################################################################
# session 5 loading data to variables lists
s5g0="C:/Users/ece73/Desktop/recordings/session5/gesture0.txt"
s5g1="C:/Users/ece73/Desktop/recordings/session5/gesture1.txt"
s5g2="C:/Users/ece73/Desktop/recordings/session5/gesture2.txt"
s5g3="C:/Users/ece73/Desktop/recordings/session5/gesture3.txt"
s5g4="C:/Users/ece73/Desktop/recordings/session5/gesture4.txt"
s5g5="C:/Users/ece73/Desktop/recordings/session5/gesture5.txt"
s5g6="C:/Users/ece73/Desktop/recordings/session5/gesture6.txt"
s5g7="C:/Users/ece73/Desktop/recordings/session5/gesture7.txt"
s5g8="C:/Users/ece73/Desktop/recordings/session5/gesture8.txt"
ds5g0 = np.loadtxt(s5g0, delimiter=",")
ds5g1 = np.loadtxt(s5g1, delimiter=",")
ds5g2 = np.loadtxt(s5g2, delimiter=",")
ds5g3 = np.loadtxt(s5g3, delimiter=",")
ds5g4 = np.loadtxt(s5g4, delimiter=",")
ds5g5 = np.loadtxt(s5g5, delimiter=",")
ds5g6 = np.loadtxt(s5g6, delimiter=",")
ds5g7 = np.loadtxt(s5g7, delimiter=",")
ds5g8 = np.loadtxt(s5g8, delimiter=",")
##################################################################
# Creating y lists for the classification for every session and every gesture
#session 1 gestures y matrices
ys1g0=[0 for i in range(len(ds1g0))]
ys1g1=[1 for i in range(len(ds1g1))]
ys1g2=[2 for i in range(len(ds1g2))]
ys1g3=[3 for i in range(len(ds1g3))]
ys1g4=[4 for i in range(len(ds1g4))]
ys1g5=[5 for i in range(len(ds1g5))]
ys1g6=[6 for i in range(len(ds1g6))]
ys1g7=[7 for i in range(len(ds1g7))]
ys1g8=[8 for i in range(len(ds1g8))]
#session 2 gestures y matrices
ys2g0=[0 for i in range(len(ds2g0))]
ys2g1=[1 for i in range(len(ds2g1))]
ys2g2=[2 for i in range(len(ds2g2))]
ys2g3=[3 for i in range(len(ds2g3))]
ys2g4=[4 for i in range(len(ds2g4))]
ys2g5=[5 for i in range(len(ds2g5))]
ys2g6=[6 for i in range(len(ds2g6))]
ys2g7=[7 for i in range(len(ds2g7))]
ys2g8=[8 for i in range(len(ds2g8))]
#session 3 gestures y matrices
ys3g0=[0 for i in range(len(ds3g0))]
ys3g1=[1 for i in range(len(ds3g1))]
ys3g2=[2 for i in range(len(ds3g2))]
ys3g3=[3 for i in range(len(ds3g3))]
ys3g4=[4 for i in range(len(ds3g4))]
ys3g5=[5 for i in range(len(ds3g5))]
ys3g6=[6 for i in range(len(ds3g6))]
ys3g7=[7 for i in range(len(ds3g7))]
ys3g8=[8 for i in range(len(ds3g8))]
#session 4 gestures y matrices
ys4g0=[0 for i in range(len(ds4g0))]
ys4g1=[1 for i in range(len(ds4g1))]
ys4g2=[2 for i in range(len(ds4g2))]
ys4g3=[3 for i in range(len(ds4g3))]
ys4g4=[4 for i in range(len(ds4g4))]
ys4g5=[5 for i in range(len(ds4g5))]
ys4g6=[6 for i in range(len(ds4g6))]
ys4g7=[7 for i in range(len(ds4g7))]
ys4g8=[8 for i in range(len(ds4g8))]
#session 5 gestures y matrices
ys5g0=[0 for i in range(len(ds5g0))]
ys5g1=[1 for i in range(len(ds5g1))]
ys5g2=[2 for i in range(len(ds5g2))]
ys5g3=[3 for i in range(len(ds5g3))]
ys5g4=[4 for i in range(len(ds5g4))]
ys5g5=[5 for i in range(len(ds5g5))]
ys5g6=[6 for i in range(len(ds5g6))]
ys5g7=[7 for i in range(len(ds5g7))]
ys5g8=[8 for i in range(len(ds5g8))]
#creating a complete dataset with y matrices for every session.
#session 1
ds1=np.concatenate((ds1g0,ds1g3,ds1g4,ds1g6,ds1g8))
ys1=np.concatenate((ys1g0,ys1g3,ys1g4,ys1g6,ys1g8))
#session 2
ds2=np.concatenate((ds2g0,ds2g3,ds2g4,ds2g6,ds2g8))
ys2=np.concatenate((ys2g0,ys2g3,ys2g4,ys2g6,ys2g8))
#session 3
ds3=np.concatenate((ds3g0,ds3g3,ds3g4,ds3g6,ds3g8))
ys3=np.concatenate((ys3g0,ys3g3,ys3g4,ys3g6,ys3g8))
#session 4
ds4=np.concatenate((ds4g0,ds4g3,ds4g4,ds4g6,ds4g8))
ys4=np.concatenate((ys4g0,ys4g3,ys4g4,ys4g6,ys4g8))
#session 5
ds5=np.concatenate((ds5g0,ds5g3,ds5g4,ds5g6,ds5g8))
ys5=np.concatenate((ys5g0,ys5g3,ys5g4,ys5g6,ys5g8))
#Creating the input for SVM. Complete data (x matrix) and y matrices.
datall=
|
np.concatenate((ds1,ds2,ds3,ds4,ds5))
|
numpy.concatenate
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
import argparse
import os
import os.path as osp
import numpy as np
import torch # put this before scipy import
from scipy.misc import imread, imresize
import sys
sys.path.insert(0, '../tools')
from mattnet import MattNet
# In[4]:
# box functions
def xywh_to_xyxy(boxes):
"""Convert [x y w h] box format to [x1 y1 x2 y2] format."""
return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1))
def show_attn(img_path, box, attn):
"""
box : [xywh]
attn: 49
"""
img = imread(img_path)
attn = np.array(attn).reshape(7,7)
x,y,w,h = int(box[0]), int(box[1]), int(box[2]), int(box[3])
roi = img[y:y+h-1, x:x+w-1]
attn = imresize(attn, [h,w])
plt.imshow(roi)
plt.imshow(attn, alpha=0.7)
def show_boxes(img_path, boxes, colors, texts=None, masks=None):
# boxes [[xyxy]]
img = imread(img_path)
plt.imshow(img)
ax = plt.gca()
for k in range(boxes.shape[0]):
box = boxes[k]
xmin, ymin, xmax, ymax = list(box)
coords = (xmin, ymin), xmax - xmin + 1, ymax - ymin + 1
color = colors[k]
ax.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
if texts is not None:
ax.text(xmin, ymin, texts[k], bbox={'facecolor':color, 'alpha':0.5})
# show mask
if masks is not None:
for k in range(len(masks)):
mask = masks[k]
m = np.zeros( (mask.shape[0], mask.shape[1], 3))
m[:,:,0] = 0; m[:,:,1] = 0; m[:,:,2] = 1.
ax.imshow(np.dstack([m*255, mask*255*0.4]).astype(np.uint8))
# In[5]:
# arguments
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='refcoco', help='dataset name: refclef, refcoco, refcoco+, refcocog')
parser.add_argument('--splitBy', type=str, default='unc', help='splitBy: unc, google, berkeley')
parser.add_argument('--model_id', type=str, default='mrcn_cmr_with_st', help='model id name')
args = parser.parse_args('')
# In[7]:
# MattNet
mattnet = MattNet(args)
# In[8]:
# image path
IMAGE_DIR = '../data/images/mscoco/images/train2014'
img_path = osp.join(IMAGE_DIR, 'COCO_train2014_'+str(229598).zfill(12)+'.jpg')
# In[11]:
# forward image
img_data = mattnet.forward_image(img_path, nms_thresh=0.3, conf_thresh=0.50)
# show masks
plt.rcParams['figure.figsize'] = (10., 8.)
dets = img_data['dets']
show_boxes(img_path, xywh_to_xyxy(np.array([det['box'] for det in dets])),
['blue']*len(dets), ['%s(%.2f)' % (det['category_name'], det['score']) for det in dets])
# In[18]:
# comprehend expression
expr = 'man in black'
entry = mattnet.comprehend(img_data, expr)
# In[19]:
# visualize
tokens = expr.split()
print('sub(%.2f):' % entry['weights'][0], ''.join(['(%s,%.2f)'% (tokens[i], s) for i, s in enumerate(entry['sub_attn'])]))
print('loc(%.2f):' % entry['weights'][1], ''.join(['(%s,%.2f)'% (tokens[i], s) for i, s in enumerate(entry['loc_attn'])]))
print('rel(%.2f):' % entry['weights'][2], ''.join(['(%s,%.2f)'% (tokens[i], s) for i, s in enumerate(entry['rel_attn'])]))
# predict attribute on the predicted object
print(entry['pred_atts'])
# show prediction
plt.rcParams['figure.figsize'] = (12., 8.)
fig = plt.figure()
plt.subplot(121)
show_boxes(img_path, xywh_to_xyxy(
|
np.vstack([entry['pred_box']])
|
numpy.vstack
|
import itertools as it
import numpy as np
import scipy
import scipy.interpolate
from .graph_utils import vertex_neighbours, clockwise_about
from .voronization import Lattice
from .graph_utils import get_edge_vectors
def normalised(a): return a / np.linalg.norm(a)
def vertices_to_triangles(g, edge_labels, triangle_size = 0.05):
"""
Map every vertex to a triangle.
The original vertex labeled by i get mapped to vertices 3*i, 3*i + 1, 3*i + 2
Where the mapping with those three is determined by the edge coloring which allows us to connect the right
vertices of neighbouring triangles together.
"""
new_vertices = np.zeros(shape = (g.vertices.positions.shape[0]*3, 2), dtype = float)
new_adjacency = np.zeros(shape = (g.edges.indices.shape[0] + g.vertices.positions.shape[0]*3, 2), dtype = int)
new_adjacency_crossing = np.zeros(shape = (g.edges.indices.shape[0] + g.vertices.positions.shape[0]*3, 2), dtype = int)
# loop over each vertex, look at its three neighbours
# make 3 new vertices in its place shifted towards the nieghbours
for vertex_i in range(g.vertices.positions.shape[0]):
# get vertex and edge neighbours of the vertex
this_vertex = g.vertices.positions[vertex_i]
vertex_indices, edge_indices = vertex_neighbours(g, vertex_i)
vertex_indices, edge_indices = clockwise_about(vertex_i, g)
# this function takes into account the fact that edges can cross boundaries
edge_vectors = get_edge_vectors(vertex_i, edge_indices, g)
# loop over the neigbours, the new vertices will have label = vertex_i + edge_label
for k, vertex_j, edge_j in zip(it.count(), vertex_indices, edge_indices):
# use the color of the edge in question to determine where to put it
# inside new_vertices
edge_label = edge_labels[edge_j]
index = 3*vertex_i + edge_label
# push the new vertex out from the center along the edge
this_triangle_size = min(0.2 * np.linalg.norm(edge_vectors[k]), triangle_size)
new_vertex_position = (this_vertex + this_triangle_size * normalised(edge_vectors[k]))
other_vertex_position = (this_vertex + edge_vectors[k] - this_triangle_size * normalised(edge_vectors[k]))
new_vertices[index] = new_vertex_position
# make external edge
other_index = 3*vertex_j + edge_label #index of the vertex on another site
new_adjacency[edge_j] = (index, other_index)
new_adjacency_crossing[edge_j] = np.floor(other_vertex_position) - np.floor(new_vertex_position)
# make internal edges
next_edge_j = edge_indices[(k+1)%3]
next_edge_label = edge_labels[next_edge_j]
other_index = 3*vertex_i + next_edge_label #index of the next vertex inside the site
new_adjacency[g.edges.indices.shape[0] + index] = (index, other_index)
#new_adjacency_crossing[g.edges.indices.shape[0] + index] = np.floor(new_vertices[other_index]) - np.floor(new_vertices[index])
# now that all the vertices and edges have been assigned
# go back and set adjacency_crossing for the internal vertices
# I'm not 100% sure why you need to do the external vertices up there and the
# internal onces down here, but it seems to work.
for edge_j in np.arange(g.edges.indices.shape[0], new_adjacency.shape[0]):
start, end = new_vertices[new_adjacency[edge_j]]
new_adjacency_crossing[edge_j] = (
|
np.floor(end)
|
numpy.floor
|
# The script is used to perform analysis of XRF spectra measured by
# Olympus Delta XRF (https://www.olympus-ims.com/en/xrf-xrd/delta-handheld/delta-prof/).
# The measurement is done for powder samples which are fixed on the XRF
# device using a custom 3D printed plastic holder(s). Several holders can be used in one
# series of measurements, which should be specified in the command line arguments.
# The analysis is based on calculating calibration for a certain element,
# and calculating the amount of element in the samples with unknown amount.
import argparse
import chardet
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import sys
from datetime import datetime
from glob import glob
from scipy import stats
from scipy.optimize import curve_fit
from scipy.signal import savgol_filter
from element_data import get_elements
##########
### Section with common vairables related to spectra file and measurements
##########
# CSV contains first column which has titles of rows
TITLE_COL = 1
# number of beams in the XRF measurement. 3 for Olympus Delta XRF
NUM_BEAMS = 3
# number of measurement repeats. Usually 3 is done
NUM_REPEATS = 3
# row for number of beams = ExposureNum
ROW_NUM_BEAMS = 0
# row for number of data points in spectrum
ROW_NUM_DATA = 4
# row for time of measurement, to calculate cps instead of cumulative counts
ROW_NUM_TIME = 7 # seconds
def get_spectrum(spectra: pd.DataFrame, # dataframe with all spectra (loaded CSV file)
spectrum_num: int, # zero based index of sample spectrum to take
repeat_num: int, # zero based measurement repeat number for the sample spectrum
beam_num: int, # zero based measurent beam number for the sample spectrum
num_repeats=NUM_REPEATS, # total number of repeats for each sample
num_beams=NUM_BEAMS, # total number of beams for each sample
title_col=TITLE_COL, # indicated if title column (first one) is present in CSV
skip_XRF_calibration=True) -> np.ndarray: # to skip first spectrum in CSV which is usually mandatory calibration for device
# calculate column index which is for spectrum to get
spectrum_num = title_col + int(skip_XRF_calibration) + num_repeats * spectrum_num * num_beams + repeat_num * num_repeats + beam_num
# print('Selected spectrum number:', spectrum_num)
# get number of data points in spectrum measured
num_points = int(spectra.iloc[ROW_NUM_DATA, spectrum_num])
# get measurement time to caluclate cps
meas_time = float(spectra.iloc[ROW_NUM_TIME, spectrum_num])
y_spectrum = spectra.iloc[-num_points:, spectrum_num].to_numpy() / meas_time
return y_spectrum
def fit_gauss(peak_spectrum: np.array) -> np.array:
'''Fit XRF peak with gaussian.'''
def gauss(x: np.array, *params) -> np.array:
# Gaussian function with params = [baseline, A, mu, sigma] parameters
baseline, A, mu, sigma = params
return baseline + A * np.exp(-(x - mu)**2 / (2. * sigma**2))
# inital params guess
p0 = [0., 1, 0., 1]
x = peak_spectrum[0] / np.max(peak_spectrum[0])
y = peak_spectrum[1] / np.max(peak_spectrum[1])
params, cov = curve_fit(gauss, x, y, p0)
peak_fit = gauss(x, *params) * np.max(peak_spectrum[1])
return np.array([peak_spectrum[0], peak_fit])
def calc_peak_ints(args: argparse.Namespace,
element: str,
spectrum_num: int) -> np.ndarray:
'''Calculate peak integrals for element for certain spetrum number.'''
# select beam number from the element data
element = args.elements_data[element]
repeat_ints = []
for rep_num in range(args.repeats):
spectrum = get_spectrum(args.spectra, spectrum_num, rep_num, element.beam,
num_repeats=args.repeats, num_beams=args.num_beams,
title_col=TITLE_COL, skip_XRF_calibration=args.skip_XRF_calibration)
spectrum = savgol_filter(spectrum, element.filter_window, 2)
# integrals for each peak
peak_ints = []
for peak_coords in element.int_limits:
# get indices from x coordinate
peak_mask = np.logical_and(args.x_keV >= peak_coords[0], args.x_keV <= peak_coords[1])
peak = np.array([args.x_keV[peak_mask], spectrum[peak_mask]])
# print(peak)
try:
fit = fit_gauss(peak)
peak_ints.append(np.sum(fit[1]))
'''if spectrum_num == 6 and rep_num == 1:
plt.plot(args.x_keV, spectrum)
plt.plot(peak[0], peak[1])
plt.plot(fit[0], fit[1])
plt.show()'''
except RuntimeError:
print('Gauss fit failed for spectrum', spectrum_num)
peak_ints.append(np.sum(peak[1]))
# print(peak_ints)
repeat_ints.append(peak_ints)
# calculate average and std for each peak for all repeats
repeat_ints = np.array(repeat_ints)
avgs = np.mean(repeat_ints, axis=0) # / weight, not used, see python element_content.py --help
stds = np.std(repeat_ints, axis=0) # / weight
# print('averages for', element.name, 'for spectrum', spectrum_num, avgs)
return avgs, stds
def calc_background(args: argparse.Namespace,
element: str) -> np.ndarray:
'''Calculates background for holders which are at the beginning of
spectra csv file.'''
if args.skip_background:
return np.array([]), np.array([])
else:
bg_avs = []
bg_stds = []
for i in range(args.num_holders):
av, std = calc_peak_ints(args, element, i)
bg_avs.append(av)
bg_stds.append(std)
# print('bg averages', np.array(bg_avs), 'bg stds', np.array(bg_stds))
return np.array(bg_avs),
|
np.array(bg_stds)
|
numpy.array
|
"""Console script for chess_tuning_tools."""
import json
import logging
import sys
from datetime import datetime
import click
import dill
import numpy as np
from atomicwrites import AtomicWriter
from skopt.utils import create_result
import tune
from tune.db_workers import TuningClient, TuningServer
from tune.io import load_tuning_config, prepare_engines_json, write_engines_json
from tune.local import (
check_log_for_errors,
initialize_data,
initialize_optimizer,
is_debug_log,
load_points_to_evaluate,
parse_experiment_result,
plot_results,
print_results,
run_match,
setup_logger,
update_model,
)
from tune.priors import create_priors
@click.group()
def cli():
pass
@cli.command()
@click.option(
"--verbose", "-v", is_flag=True, default=False, help="Turn on debug output."
)
@click.option("--logfile", default=None, help="Path to where the log is saved to.")
@click.option(
"--terminate-after", default=0, help="Terminate the client after x minutes."
)
@click.option(
"--run-only-once",
default=False,
is_flag=True,
help="Terminate the client after one job has been completed or no job can be "
"found.",
)
@click.option(
"--skip-benchmark",
default=False,
is_flag=True,
help="Skip calibrating the time control by running a benchmark.",
)
@click.option(
"--clientconfig", default=None, help="Path to the client configuration file."
)
@click.argument("dbconfig")
def run_client(
verbose,
logfile,
terminate_after,
run_only_once,
skip_benchmark,
clientconfig,
dbconfig,
):
"""Run the client to generate games for distributed tuning.
In order to connect to the database you need to provide a valid DBCONFIG
json file. It contains the necessary parameters to connect to the database
where it can fetch jobs and store results.
"""
log_level = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(
level=log_level,
filename=logfile,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
tc = TuningClient(
dbconfig_path=dbconfig,
terminate_after=terminate_after,
clientconfig=clientconfig,
only_run_once=run_only_once,
skip_benchmark=skip_benchmark,
)
tc.run()
@cli.command()
@click.option(
"--verbose", "-v", is_flag=True, default=False, help="Turn on debug output."
)
@click.option("--logfile", default=None, help="Path to where the log is saved to.")
@click.argument("command")
@click.argument("experiment_file")
@click.argument("dbconfig")
def run_server(verbose, logfile, command, experiment_file, dbconfig):
"""Run the tuning server for a given EXPERIMENT_FILE (json).
To connect to the database you also need to provide a DBCONFIG json file.
\b
You can choose from these COMMANDs:
* run: Starts the server.
* deactivate: Deactivates all active jobs of the given experiment.
* reactivate: Reactivates all recent jobs for which sample size is not reached yet.
"""
log_level = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(
level=log_level,
filename=logfile,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
tc = TuningServer(experiment_path=experiment_file, dbconfig_path=dbconfig)
if command == "run":
tc.run()
elif command == "deactivate":
tc.deactivate()
elif command == "reactivate":
tc.reactivate()
else:
raise ValueError(f"Command {command} is not recognized. Terminating...")
@cli.command()
@click.option(
"-c",
"--tuning-config",
help="json file containing the tuning configuration.",
required=True,
type=click.File("r"),
)
@click.option(
"-a",
"--acq-function",
default="mes",
help="Acquisition function to use for selecting points to try. "
"Can be one of: {mes, pvrs, ei, ts, vr, lcb, mean, ttei} "
"Consult the parameter reference and the FAQ for more information.",
show_default=True,
)
@click.option(
"--acq-function-samples",
default=1,
help="How many GP samples to average the acquisition function over. "
"More samples will slow down the computation time, but might give more "
"stable tuning results. Less samples on the other hand cause more exploration "
"which could help avoid the tuning to get stuck.",
show_default=True,
)
@click.option(
"--confidence",
default=0.90,
help="Confidence to use for the highest density intervals of the optimum.",
show_default=True,
)
@click.option(
"-d",
"--data-path",
default="data.npz",
help="Save the evaluated points to this file.",
type=click.Path(exists=False),
show_default=True,
)
@click.option(
"--gp-burnin",
default=5,
type=int,
help="Number of samples to discard before sampling model parameters. "
"This is used during tuning and few samples suffice.",
show_default=True,
)
@click.option(
"--gp-samples",
default=300,
type=int,
help="Number of model parameters to sample for the model. "
"This is used during tuning and it should be a multiple of 100.",
show_default=True,
)
@click.option(
"--gp-initial-burnin",
default=100,
type=int,
help="Number of samples to discard before starting to sample the initial model "
"parameters. This is only used when resuming or for the first model.",
show_default=True,
)
@click.option(
"--gp-initial-samples",
default=300,
type=int,
help="Number of model parameters to sample for the initial model. "
"This is only used when resuming or for the first model. "
"Should be a multiple of 100.",
show_default=True,
)
@click.option(
"--gp-signal-prior-scale",
default=4.0,
type=click.FloatRange(min=0.0),
help="Prior scale of the signal (standard deviation) magnitude which is used to"
"parametrize a half-normal distribution."
"Needs to be a number strictly greater than 0.0.",
show_default=True,
)
@click.option(
"--gp-noise-prior-scale",
default=0.0006,
type=click.FloatRange(min=0.0),
help="Prior scale of the noise (standard deviation) which is used to parametrize a "
"half-normal distribution."
"Needs to be a number strictly greater than 0.0.",
show_default=True,
)
@click.option(
"--gp-lengthscale-prior-lb",
default=0.1,
type=click.FloatRange(min=0.0),
help="Lower bound for the inverse-gamma lengthscale prior. "
"It marks the point where the prior reaches 1% of the cumulative density."
"Needs to be a number strictly greater than 0.0.",
show_default=True,
)
@click.option(
"--gp-lengthscale-prior-ub",
default=0.5,
type=click.FloatRange(min=0.0),
help="Upper bound for the inverse-gamma lengthscale prior. "
"It marks the point where the prior reaches 99% of the cumulative density."
"Needs to be a number strictly greater than 0.0 and the lower bound.",
show_default=True,
)
@click.option(
"-l",
"--logfile",
default="log.txt",
help="Path to log file.",
type=click.Path(exists=False),
show_default=True,
)
@click.option(
"--n-initial-points",
default=16,
help="Size of initial dense set of points to try.",
show_default=True,
)
@click.option(
"--n-points",
default=500,
help="The number of random points to consider as possible next point. "
"Less points reduce the computation time of the tuner, but reduce "
"the coverage of the space.",
show_default=True,
)
@click.option(
"-p",
"--evaluate-points",
default=None,
type=click.Path(exists=False),
help="Path to a .csv file (without header row) with points to evaluate. An optional"
" last column can be used to request a different number of rounds for each "
"point.",
show_default=False,
)
@click.option(
"--plot-every",
default=1,
help="Plot the current optimization landscape every n-th iteration. "
"Set to 0 to turn it off.",
show_default=True,
)
@click.option(
"--plot-path",
default="plots",
help="Path to the directory to which the tuner will output plots.",
show_default=True,
)
@click.option(
"--random-seed",
default=0,
help="Number to seed all internally used random generators.",
show_default=True,
)
@click.option(
"--result-every",
default=1,
help="Output the actual current optimum every n-th iteration."
"The further you are in the tuning process, the longer this will take to "
"compute. Consider increasing this number, if you do not need the output "
"that often. Set to 0 to turn it off.",
show_default=True,
)
@click.option(
"--resume/--no-resume",
default=True,
help="Let the optimizer resume, if it finds points it can use.",
show_default=True,
)
@click.option(
"--fast-resume/--no-fast-resume",
default=True,
help="If set, resume the tuning process with the model in the file specified by"
" the --model-path. "
"Note, that a full reinitialization will be performed, if the parameter"
"ranges have been changed.",
show_default=True,
)
@click.option(
"--model-path",
default="model.pkl",
help="The current optimizer will be saved for fast resuming to this file.",
type=click.Path(exists=False),
show_default=True,
)
@click.option("--verbose", "-v", count=True, default=0, help="Turn on debug output.")
@click.option(
"--warp-inputs/--no-warp-inputs",
default=True,
show_default=True,
help="If True, let the tuner warp the input space to find a better fit to the "
"optimization landscape.",
)
def local( # noqa: C901
tuning_config,
acq_function="mes",
acq_function_samples=1,
confidence=0.9,
data_path=None,
evaluate_points=None,
gp_burnin=5,
gp_samples=300,
gp_initial_burnin=100,
gp_initial_samples=300,
gp_signal_prior_scale=4.0,
gp_noise_prior_scale=0.0006,
gp_lengthscale_prior_lb=0.1,
gp_lengthscale_prior_ub=0.5,
logfile="log.txt",
n_initial_points=16,
n_points=500,
plot_every=1,
plot_path="plots",
random_seed=0,
result_every=1,
resume=True,
fast_resume=True,
model_path="model.pkl",
verbose=0,
warp_inputs=True,
):
"""Run a local tune.
Parameters defined in the `tuning_config` file always take precedence.
"""
json_dict = json.load(tuning_config)
settings, commands, fixed_params, param_ranges = load_tuning_config(json_dict)
root_logger = setup_logger(
verbose=verbose, logfile=settings.get("logfile", logfile)
)
# First log the version of chess-tuning-tools:
root_logger.info(f"chess-tuning-tools version: {tune.__version__}")
root_logger.debug(f"Got the following tuning settings:\n{json_dict}")
# Initialize/import data structures:
if data_path is None:
data_path = "data.npz"
try:
X, y, noise, iteration, optima, performance = initialize_data(
parameter_ranges=list(param_ranges.values()),
resume=resume,
data_path=data_path,
)
except ValueError:
root_logger.error(
"The number of parameters are not matching the number of "
"dimensions. Rename the existing data file or ensure that the "
"parameter ranges are correct."
)
sys.exit(1)
# Initialize Optimizer object and if applicable, resume from existing
# data/optimizer:
gp_priors = create_priors(
n_parameters=len(param_ranges),
signal_scale=settings.get("gp_signal_prior_scale", gp_signal_prior_scale),
lengthscale_lower_bound=settings.get(
"gp_lengthscale_prior_lb", gp_lengthscale_prior_lb
),
lengthscale_upper_bound=settings.get(
"gp_lengthscale_prior_ub", gp_lengthscale_prior_ub
),
noise_scale=settings.get("gp_noise_prior_scale", gp_noise_prior_scale),
)
opt = initialize_optimizer(
X=X,
y=y,
noise=noise,
parameter_ranges=list(param_ranges.values()),
random_seed=settings.get("random_seed", random_seed),
warp_inputs=settings.get("warp_inputs", warp_inputs),
n_points=settings.get("n_points", n_points),
n_initial_points=settings.get("n_initial_points", n_initial_points),
acq_function=settings.get("acq_function", acq_function),
acq_function_samples=settings.get("acq_function_samples", acq_function_samples),
resume=resume,
fast_resume=fast_resume,
model_path=model_path,
gp_initial_burnin=settings.get("gp_initial_burnin", gp_initial_burnin),
gp_initial_samples=settings.get("gp_initial_samples", gp_initial_samples),
gp_priors=gp_priors,
)
extra_points = load_points_to_evaluate(
space=opt.space,
csv_file=evaluate_points,
rounds=settings.get("rounds", 10),
)
root_logger.debug(
f"Loaded {len(extra_points)} extra points to evaluate: {extra_points}"
)
# Main optimization loop:
while True:
root_logger.info("Starting iteration {}".format(iteration))
# If a model has been fit, print/plot results so far:
if len(y) > 0 and opt.gp.chain_ is not None:
result_object = create_result(Xi=X, yi=y, space=opt.space, models=[opt.gp])
result_every_n = settings.get("result_every", result_every)
if result_every_n > 0 and iteration % result_every_n == 0:
try:
current_optimum, estimated_elo, estimated_std = print_results(
optimizer=opt,
result_object=result_object,
parameter_names=list(param_ranges.keys()),
confidence=settings.get("confidence", confidence),
)
optima.append(current_optimum)
performance.append((int(iteration), estimated_elo, estimated_std))
except ValueError:
pass
plot_every_n = settings.get("plot_every", plot_every)
if plot_every_n > 0 and iteration % plot_every_n == 0:
plot_results(
optimizer=opt,
result_object=result_object,
iterations=np.array(performance)[:, 0],
elos=np.array(performance)[:, 1:],
optima=np.array(optima),
plot_path=settings.get("plot_path", plot_path),
parameter_names=list(param_ranges.keys()),
confidence=settings.get("confidence", confidence),
current_iteration=iteration,
)
used_extra_point = False
# If there are extra points to evaluate, evaluate them first in FIFO order:
if len(extra_points) > 0:
point, n_rounds = extra_points.pop(0)
# Log that we are evaluating the extra point:
root_logger.info(
f"Evaluating extra point {dict(zip(param_ranges.keys(), point))} for "
f"{n_rounds} rounds."
)
used_extra_point = True
else:
# Ask optimizer for next point:
point = opt.ask()
n_rounds = settings.get("rounds", 10)
match_settings = settings.copy()
match_settings["rounds"] = n_rounds
point_dict = dict(zip(param_ranges.keys(), point))
root_logger.info("Testing {}".format(point_dict))
# Prepare engines.json file for cutechess-cli:
engine_json = prepare_engines_json(commands=commands, fixed_params=fixed_params)
root_logger.debug(f"engines.json is prepared:\n{engine_json}")
write_engines_json(engine_json, point_dict)
# Run experiment:
root_logger.info("Start experiment")
now = datetime.now()
out_exp = []
out_all = []
for output_line in run_match(**match_settings):
line = output_line.rstrip()
is_debug = is_debug_log(line)
if is_debug and verbose > 2:
root_logger.debug(line)
if not is_debug:
out_exp.append(line)
out_all.append(line)
check_log_for_errors(cutechess_output=out_all)
out_exp = "\n".join(out_exp)
later = datetime.now()
difference = (later - now).total_seconds()
root_logger.info(f"Experiment finished ({difference}s elapsed).")
# Parse cutechess-cli output and report results (Elo and standard deviation):
score, error_variance, draw_rate = parse_experiment_result(out_exp, **settings)
root_logger.info(
"Got Elo: {} +- {}".format(-score * 100, np.sqrt(error_variance) * 100)
)
root_logger.info("Estimated draw rate: {:.2%}".format(draw_rate))
# Update model with the new data:
root_logger.info("Updating model")
update_model(
optimizer=opt,
point=point,
score=score,
variance=error_variance,
acq_function_samples=settings.get(
"acq_function_samples", acq_function_samples
),
gp_burnin=settings.get("gp_burnin", gp_burnin),
gp_samples=settings.get("gp_samples", gp_samples),
gp_initial_burnin=settings.get("gp_initial_burnin", gp_initial_burnin),
gp_initial_samples=settings.get("gp_initial_samples", gp_initial_samples),
)
# If we used an extra point, we need to reset n_initial_points of the optimizer:
if used_extra_point:
opt._n_initial_points += 1
# Update data structures and persist to disk:
X.append(point)
y.append(score)
noise.append(error_variance)
iteration += 1
with AtomicWriter(data_path, mode="wb", overwrite=True).open() as f:
np.savez_compressed(
f,
np.array(X),
np.array(y),
np.array(noise),
|
np.array(optima)
|
numpy.array
|
#!/usr/bin/env python
#
# bdalg_test.py - test suit for block diagram algebra
# RMM, 30 Mar 2011 (based on TestBDAlg from v0.4a)
import unittest
import numpy as np
from numpy import sort
import control as ctrl
from control.xferfcn import TransferFunction
from control.statesp import StateSpace
from control.bdalg import feedback
from control.lti import zero, pole
class TestFeedback(unittest.TestCase):
"""These are tests for the feedback function in bdalg.py. Currently, some
of the tests are not implemented, or are not working properly. TODO: these
need to be fixed."""
def setUp(self):
"""This contains some random LTI systems and scalars for testing."""
# Two random SISO systems.
self.sys1 = TransferFunction([1, 2], [1, 2, 3])
self.sys2 = StateSpace([[1., 4.], [3., 2.]], [[1.], [-4.]],
[[1., 0.]], [[0.]])
# Two random scalars.
self.x1 = 2.5
self.x2 = -3.
def testScalarScalar(self):
"""Scalar system with scalar feedback block."""
ans1 = feedback(self.x1, self.x2)
ans2 = feedback(self.x1, self.x2, 1.)
self.assertAlmostEqual(ans1.num[0][0][0] / ans1.den[0][0][0],
-2.5 / 6.5)
self.assertAlmostEqual(ans2.num[0][0][0] / ans2.den[0][0][0], 2.5 / 8.5)
def testScalarSS(self):
"""Scalar system with state space feedback block."""
ans1 = feedback(self.x1, self.sys2)
ans2 = feedback(self.x1, self.sys2, 1.)
np.testing.assert_array_almost_equal(ans1.A, [[-1.5, 4.], [13., 2.]])
np.testing.assert_array_almost_equal(ans1.B, [[2.5], [-10.]])
np.testing.assert_array_almost_equal(ans1.C, [[-2.5, 0.]])
np.testing.assert_array_almost_equal(ans1.D, [[2.5]])
np.testing.assert_array_almost_equal(ans2.A, [[3.5, 4.], [-7., 2.]])
np.testing.assert_array_almost_equal(ans2.B, [[2.5], [-10.]])
np.testing.assert_array_almost_equal(ans2.C, [[2.5, 0.]])
np.testing.assert_array_almost_equal(ans2.D, [[2.5]])
# Make sure default arugments work as well
ans3 = feedback(self.sys2, 1)
ans4 = feedback(self.sys2)
np.testing.assert_array_almost_equal(ans3.A, ans4.A)
np.testing.assert_array_almost_equal(ans3.B, ans4.B)
np.testing.assert_array_almost_equal(ans3.C, ans4.C)
np.testing.assert_array_almost_equal(ans3.D, ans4.D)
def testScalarTF(self):
"""Scalar system with transfer function feedback block."""
ans1 = feedback(self.x1, self.sys1)
ans2 = feedback(self.x1, self.sys1, 1.)
np.testing.assert_array_almost_equal(ans1.num, [[[2.5, 5., 7.5]]])
np.testing.assert_array_almost_equal(ans1.den, [[[1., 4.5, 8.]]])
np.testing.assert_array_almost_equal(ans2.num, [[[2.5, 5., 7.5]]])
np.testing.assert_array_almost_equal(ans2.den, [[[1., -0.5, -2.]]])
# Make sure default arugments work as well
ans3 = feedback(self.sys1, 1)
ans4 = feedback(self.sys1)
np.testing.assert_array_almost_equal(ans3.num, ans4.num)
np.testing.assert_array_almost_equal(ans3.den, ans4.den)
def testSSScalar(self):
"""State space system with scalar feedback block."""
ans1 = feedback(self.sys2, self.x1)
ans2 = feedback(self.sys2, self.x1, 1.)
np.testing.assert_array_almost_equal(ans1.A, [[-1.5, 4.], [13., 2.]])
np.testing.assert_array_almost_equal(ans1.B, [[1.], [-4.]])
np.testing.assert_array_almost_equal(ans1.C, [[1., 0.]])
np.testing.assert_array_almost_equal(ans1.D, [[0.]])
np.testing.assert_array_almost_equal(ans2.A, [[3.5, 4.], [-7., 2.]])
np.testing.assert_array_almost_equal(ans2.B, [[1.], [-4.]])
np.testing.assert_array_almost_equal(ans2.C, [[1., 0.]])
|
np.testing.assert_array_almost_equal(ans2.D, [[0.]])
|
numpy.testing.assert_array_almost_equal
|
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from datetime import date
import numpy as np
import os
def average(data):
'''7d moving average (3 back, 3 forward in time)'''
result = np.zeros(data.shape[0]-3)
data = np.append(np.zeros(3), data, axis=0)
for i in range(7):
result += data[i:data.shape[0]-6+i]
result /= 7
return result
def plot_graph(days, name):
print(f'\r{name}: plotting', end='')
fig, ax1 = plt.subplots(1, figsize=(6,4))
fig.suptitle('Covid-cases and -deaths')
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d.%m.%y'))
# ax1.xaxis.set_major_locator(mdates.DayLocator(interval=n//10))
plt.gcf().autofmt_xdate()
ax1.set_xlabel('date')
ax1.set_ylim(0,80000)
ax2 = ax1.twinx()
ax2.set_ylim(0,4000)
for x, ax, color in [('cases', ax1, 'blue'), ('deaths', ax2, 'red')]:
today, = [
|
np.genfromtxt(f'{x}/{day}', delimiter=',', dtype=int)
|
numpy.genfromtxt
|
import argparse
import numpy as np
import gzip
import helpers as h
from scipy.stats import hypergeom
def downsample_sfs(n_obs, allele_count, n_downsample):
"""
Calculate expected site frequency spectrum for a smaller sample size.
Arguments:
n_obs -- the original number of haploid genotypes
minor_allele_count -- the original allele count
n_downsample -- the new (smaller) sample size
Returns:
sfs -- the expected downsampled SFS as a length--n_downsample + 1 numpy array
"""
# If fewer than n_downsample observations, return zeros.
if n_downsample > n_obs:
sfs = np.zeros(n_downsample+1)
# Otherwise, use hypergeometric probability as expected sfs
else:
x =
|
np.arange(0, n_downsample+1)
|
numpy.arange
|
# This code belongs to the paper
#
# <NAME> and <NAME>.
# WPPNets and WPPFlows: The Power of Wasserstein Patch Priors for Superresolution.
# ArXiv Preprint#2201.08157
#
# Please cite the paper, if you use the code.
#
# The script estimates the forward operator based on a registered pair of
# high- and low-resolution image.
import torch
import torch.nn as nn
import skimage.io,skimage.transform
import numpy as np
import math
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def init_weights(kernel_size, sigma,shift=torch.zeros(2,dtype=torch.float,device=DEVICE)):
x_grid = torch.arange(kernel_size).view(kernel_size,1).repeat(1,kernel_size).to(DEVICE)
y_grid = x_grid.permute(1,0)
xy_grid = torch.stack([x_grid,y_grid],dim=-1)
mean = (kernel_size - 1)/2.
variance = sigma**2.*torch.tensor([1.,1.],dtype=torch.float,device=DEVICE)
gaussian_kernel = (1./(torch.prod(2.*math.pi*variance)**.5))*torch.exp(-torch.sum((xy_grid - mean - shift)**2./(2*variance), dim=-1))
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
return gaussian_kernel.view(1, 1, kernel_size, kernel_size)
class SR_operator(nn.Module):
def __init__(self,scale,kernel_size=15):
super(SR_operator,self).__init__()
self.kernel=nn.Parameter(init_weights(kernel_size,2),requires_grad=False)
self.bias=nn.Parameter(torch.tensor([0.],device=DEVICE,dtype=torch.float),requires_grad=False)
self.scale=scale
self.kernel_size=kernel_size
def forward(self,x):
kernel=torch.zeros_like(x)
diffs_kernel=np.array([kernel.shape[2]-self.kernel_size,kernel.shape[3]-self.kernel_size])
diffs_kernel_right=diffs_kernel//2
diffs_kernel_left=diffs_kernel-diffs_kernel_right
kernel[:,:,diffs_kernel_left[0]:-diffs_kernel_right[0],diffs_kernel_left[1]:-diffs_kernel_right[1]]=self.kernel.data
kernel=torch.fft.ifftshift(kernel)
x=torch.fft.fftn(x)/torch.prod(torch.tensor(x.shape,dtype=torch.float,device=DEVICE))
kernel_four=torch.fft.fftn(kernel)
x=kernel_four*x
x[:,0,0,0]+=self.bias.data
x=torch.fft.fftshift(x)
hr_shape=x.shape[2:]
if type(self.scale)==list:
lr_shape=[int(np.round(s*self.scale[t])) for t,s in enumerate(hr_shape)]
else:
lr_shape=[int(np.round(s*self.scale)) for s in hr_shape]
diffs=np.array([hr_shape[0]-lr_shape[0],hr_shape[1]-lr_shape[1]])
diffs_left=diffs//2
diffs_right=diffs-diffs_left
diffs_right[diffs_right==0]=-np.max(list(x.shape))
x=x[:,:,diffs_left[0]:-diffs_right[0],diffs_left[1]:-diffs_right[1]]
x=x*torch.prod(torch.tensor(x.shape,dtype=torch.float,device=DEVICE))
x=torch.real(torch.fft.ifftn(torch.fft.ifftshift(x)))
return x
if __name__=='__main__':
print(DEVICE)
kernel_size=15
hr_learn=skimage.io.imread('training_img/FS_registered_operator/FS_HR_estimate_operator.png').astype(np.float64)/255
lr_learn=skimage.io.imread('training_img/FS_registered_operator/FS_LR_estimate_operator.png').astype(np.float64)/255
lr_learn=skimage.transform.rescale(lr_learn,.5)
diffs=np.array([hr_learn.shape[0]-lr_learn.shape[0],hr_learn.shape[1]-lr_learn.shape[1]])
diffs_right=diffs//2
diffs_left=diffs-diffs_right
hr_four=np.fft.fftshift(np.fft.fftn(hr_learn))/np.prod(hr_learn.shape)
hr_four_lr=hr_four[diffs_left[0]:-diffs_right[0],diffs_left[1]:-diffs_right[1]]
lr_four=np.fft.fftshift(np.fft.fftn(lr_learn))/np.prod(lr_learn.shape)
kernel_four_middle_phase=(lr_four/hr_four_lr)/np.abs(lr_four/hr_four_lr)
kernel_four_middle_abs=
|
np.abs(lr_four)
|
numpy.abs
|
from __future__ import print_function, division
from keras.utils import Sequence
import numpy as np
import os
import gc
import sys
import datetime
import h5py
class ModelDataInterface(object):
def __init__(self, data_file_path, dim_x = 32, dim_y = 32, dim_z = 32, n_classes = 10):
'Initialization'
self.dim_x = dim_x
self.dim_y = dim_y
self.dim_z = dim_z
self.n_classes = n_classes
self.data_file_path = data_file_path
self.data_file = h5py.File(data_file_path, "a")
@staticmethod
def get_exploration_order(dataset_size, shuffle = False):
'Generates order of exploration'
# Find exploration order
indexes = np.arange(dataset_size)
if shuffle:
np.random.shuffle(indexes)
return indexes
def _crop_pad_image(self, image_segment):
# Center crop followed by balanced padding
s = image_segment.shape
if s[0] > self.dim_x or s[1] > self.dim_y:
startx = s[1] // 2 - (self.dim_x // 2)
starty = s[0] // 2 - (self.dim_y // 2)
image_segment = image_segment[max(starty, 0): starty + self.dim_y, max(startx, 0) : startx + self.dim_x]
s = image_segment.shape
full_pad = ((self.dim_x - s[0]), (self.dim_y - s[1]))
result = np.lib.pad(image_segment, (( full_pad[0] // 2, full_pad[0] - full_pad[0] // 2), ( full_pad[1] // 2, full_pad[1] - full_pad[1] // 2), (0, 0)), 'constant', constant_values = 127 )
return result
@staticmethod
def modify_image(image, image_modification_seed):
if image_modification_seed < 0.25:
pass
elif image_modification_seed < 0.5:
image = np.flip(image, axis = 0)
elif image_modification_seed < 0.75:
image = np.flip(image, axis = 1)
else:
image = np.flip(image, axis = 0)
image = np.flip(image, axis = 1)
return image
def _sparsify(self, y):
'Returns labels in binary NumPy array'
n_classes = self.n_classes
return np.array([[1 if y[i] == j else 0 for j in range(n_classes)] for i in range(y.shape[0])])
def get_dataset_size(self, dataset_partition):
label_set = self.data_file[dataset_partition + "_label_set"]
return label_set.shape[0]
def get_file_metadata(self, dataset_partition):
file_info_set = self.data_file[dataset_partition + "_file_info_set"]
file_metadata = file_info_set[:]
file_metadata = [(image_name.decode('UTF-8'), region_count) for image_name, region_count in file_metadata]
return file_metadata
def get_label_descriptions(self):
label_description_set = self.data_file["label_description_set"]
label_descriptions = label_description_set[:]
label_descriptions = [(label_id, label_description.decode('UTF-8')) for label_id, label_description in label_descriptions]
return label_descriptions
class DataSequence(Sequence):
def initialize_new_epoch(self):
self.indexes = ModelDataInterface.get_exploration_order(self.dataset_size, self.shuffle)
if not self.label_only:
if self.data_augmentation_enabled:
self.image_modification = np.random.random((self.dataset_size))
else:
self.image_modification = np.zeros((self.dataset_size))
def __init__(self, data_interface_object, dataset_partition, dim_x, dim_y, dim_z, data_only = False, label_only = False, data_augmentation_enabled = False, shuffle = False, batch_size = 32, cached = False):
print("Preparing sequence generator for '" + dataset_partition + "'")
self.parent = data_interface_object
self.dim_x = dim_x
self.dim_y = dim_y
self.dim_z = dim_z
self.data_only = data_only
self.label_only = label_only
self.data_augmentation_enabled = data_augmentation_enabled
self.shuffle = shuffle
self.batch_size = batch_size
self.cached = cached
if not data_only:
self.label_set = self.parent.data_file[dataset_partition + "_label_set"]
dataset_size = self.label_set.shape[0]
self.dataset_size = dataset_size
if cached:
self.label_cache = np.empty((dataset_size), dtype = 'int')
for i in range(dataset_size):
self.label_cache[i] = self.label_set[i]
if not label_only:
self.image_set = self.parent.data_file[dataset_partition + "_image_set"]
self.mask_set = self.parent.data_file[dataset_partition + "_mask_set"]
self.metadata_set = self.parent.data_file[dataset_partition + "_metadata_set"]
dataset_size = self.image_set.shape[0]
self.dataset_size = dataset_size
if cached:
self.data_cache = np.empty((dataset_size, self.dim_x, self.dim_y, self.dim_z), dtype = 'uint8')
for i in range(dataset_size):
shape = tuple(self.metadata_set[i][0])
image_segment = np.resize(self.image_set[i], shape)
standard_image = self.parent._crop_pad_image(image_segment)
self.data_cache[i, :, : ,:] = standard_image
self.total_batch_num = int(np.ceil(self.dataset_size / float(self.batch_size)))
self.initialize_new_epoch()
def __len__(self):
return self.total_batch_num
def on_epoch_end(self):
gc.collect()
self.initialize_new_epoch()
def get_batch(self, batch_i):
'Generates data of batch_size samples'
indexes = self.indexes
label_only = self.label_only
data_only = self.data_only
cached = self.cached
batch_size = self.batch_size
batch_data_ids = indexes[batch_i * batch_size : (batch_i + 1) * batch_size]
# X : (n_samples, v_size, v_size, v_size)
# Initialization
n_samples = len(batch_data_ids)
if not label_only:
X = np.empty((n_samples, self.dim_x, self.dim_y, self.dim_z))
if cached:
for i, data_id in enumerate(batch_data_ids):
standard_image = np.copy(self.data_cache[data_id])
processed_image = ModelDataInterface.modify_image(standard_image, self.image_modification[data_id]) / 255.0
X[i, :, :, :] = processed_image
else:
for i, data_id in enumerate(batch_data_ids):
shape = tuple(self.metadata_set[data_id][0])
image_segment = np.resize(self.image_set[data_id], shape)
standard_image = self.parent._crop_pad_image(image_segment)
processed_image = ModelDataInterface.modify_image(standard_image, self.image_modification[data_id]) / 255.0
X[i, :, :, :] = processed_image
if not data_only:
y = np.empty((n_samples), dtype = int)
if cached:
for i, data_id in enumerate(batch_data_ids):
y[i] = self.label_cache[data_id]
else:
for i, data_id in enumerate(batch_data_ids):
y[i] = self.label_set[data_id]
if data_only:
return X
if label_only:
return y
return X, self.parent._sparsify(y)
def __getitem__(self, idx):
return self.get_batch(idx)
def get_sequence_generator(self, dataset_partition, data_only = False, label_only = False, data_augmentation_enabled = False, shuffle = False, batch_size = 32, cached = False):
seq_gen = ModelDataInterface.DataSequence(self, dataset_partition, self.dim_x, self.dim_y, self.dim_z, data_only, label_only, data_augmentation_enabled, shuffle, batch_size, cached)
return seq_gen
def get_generator(self, dataset_partition, data_only = False, label_only = False, data_augmentation_enabled = False, shuffle = False, batch_size = 32, cached = False):
# dataset_partition is a string specifying the partition to be used. i.e. 'training', 'dev' or 'testing'
print("Preparing python generator for '" + dataset_partition + "'")
if not data_only:
label_set = self.data_file[dataset_partition + "_label_set"]
dataset_size = label_set.shape[0]
if cached:
label_cache = np.empty((dataset_size), dtype = 'int')
for i in range(dataset_size):
label_cache[i] = label_set[i]
if not label_only:
image_set = self.data_file[dataset_partition + "_image_set"]
mask_set = self.data_file[dataset_partition + "_mask_set"]
metadata_set = self.data_file[dataset_partition + "_metadata_set"]
dataset_size = image_set.shape[0]
if cached:
data_cache = np.empty((dataset_size, self.dim_x, self.dim_y, self.dim_z), dtype = 'uint8')
for i in range(dataset_size):
shape = tuple(metadata_set[i][0])
image_segment = np.resize(image_set[i], shape)
standard_image = self._crop_pad_image(image_segment)
data_cache[i, :, : ,:] = standard_image
total_batch_num = int(np.ceil(dataset_size / float(batch_size)))
def generate():
'Generates batches of samples'
# Infinite loop
while 1:
# Generate order of exploration of dataset
indexes = ModelDataInterface.get_exploration_order(dataset_size, shuffle)
if not label_only:
if data_augmentation_enabled:
image_modification = np.random.random((dataset_size))
else:
image_modification = np.zeros((dataset_size))
# Generate batches
for batch_i in range(total_batch_num):
# Find list of IDs
batch_data_ids = indexes[batch_i * batch_size : (batch_i + 1) * batch_size]
'Generates data of batch_size samples'
# X : (n_samples, v_size, v_size, v_size)
# Initialization
n_samples = len(batch_data_ids)
if not label_only:
X = np.empty((n_samples, self.dim_x, self.dim_y, self.dim_z))
if cached:
for i, data_id in enumerate(batch_data_ids):
standard_image = np.copy(data_cache[data_id])
processed_image = ModelDataInterface.modify_image(standard_image, image_modification[data_id]) / 255.0
X[i, :, :, :] = processed_image
else:
for i, data_id in enumerate(batch_data_ids):
shape = tuple(metadata_set[data_id][0])
image_segment = np.resize(image_set[data_id], shape)
standard_image = self._crop_pad_image(image_segment)
processed_image = ModelDataInterface.modify_image(standard_image, image_modification[data_id]) / 255.0
X[i, :, :, :] = processed_image
if not data_only:
y = np.empty((n_samples), dtype = int)
if cached:
for i, data_id in enumerate(batch_data_ids):
y[i] = label_cache[data_id]
else:
for i, data_id in enumerate(batch_data_ids):
y[i] = label_set[data_id]
if data_only:
yield X
if label_only:
yield y
if not (data_only or label_only):
yield X, self._sparsify(y)
del X
del y
gc.collect()
return generate()
class EvaluationDataInterface(object):
dataset_exists_error_message_template = "Error encountered when creating {dataset_name}: dataset already exists."
def __init__(self):
pass
def open_file(self, data_file_path):
self.data_file_path = data_file_path
self.data_file = h5py.File(data_file_path, "a")
def write_evaluation_results_to_file(self, dataset_partition, table_text, count_cross_table, prob_label_cross_table, prob_pred_cross_table):
if not dataset_partition + "table_text" in self.data_file:
# print(table_text)
self.data_file.create_dataset(dataset_partition + "table_text", data = np.array(table_text, dtype = "S256"), dtype = "S256")
else:
print(dataset_exists_error_message_template.format(dataset_name = dataset_partition + "count_cross_table"))
if not dataset_partition + "count_cross_table" in self.data_file:
self.data_file.create_dataset(dataset_partition + "count_cross_table", data = count_cross_table)
else:
print(dataset_exists_error_message_template.format(dataset_name = dataset_partition + "count_cross_table"))
if not dataset_partition + "prob_label_cross_table" in self.data_file:
self.data_file.create_dataset(dataset_partition + "prob_label_cross_table", data = prob_label_cross_table)
else:
print(dataset_exists_error_message_template.format(dataset_name = dataset_partition + "prob_label_cross_table"))
if not dataset_partition + "prob_pred_cross_table" in self.data_file:
self.data_file.create_dataset(dataset_partition + "prob_pred_cross_table", data = prob_pred_cross_table)
else:
print(dataset_exists_error_message_template.format(dataset_name = dataset_partition + "prob_pred_cross_table"))
def read_evaluation_results(self, dataset_partition):
table_text = self.data_file[dataset_partition + "table_text"]
table_text = [x.decode('UTF-8') for x in table_text]
count_cross_table = self.data_file[dataset_partition + "count_cross_table"]
count_cross_table = np.array(count_cross_table)
prob_label_cross_table = self.data_file[dataset_partition + "prob_label_cross_table"]
prob_label_cross_table =
|
np.array(prob_label_cross_table)
|
numpy.array
|
import numpy as np
import numexpr as ne
import healpy as hp
from astropy.table import Table
from astropy.time import Time
from astropy import constants
from astropy import units as u
from astropy.coordinates import EarthLocation, AltAz, SkyCoord, TETE
import copy
import healpy as hp
from pyuvdata import UVData, UVBeam
import multiprocessing
from scipy.interpolate import SmoothSphereBivariateSpline as SSBS
from scipy.interpolate import RectSphereBivariateSpline as RSBS
from scipy.interpolate import RectBivariateSpline as RBS
import pixel_selection
class OptMapping:
'''Optimal Mapping Object
'''
def __init__(self, uv, nside, epoch='J2000', feed=None):
'''Init function for basic setup
Input
------
uv: pyuvdata object
UVData data in the pyuvdata format, data_array only has the blt dimension
nside: int
nside of the healpix map
epoch: str
epoch of the map, can be either 'J2000' or 'Current'
feed: str
feed type 'dipole' or 'vivaldi'. Default is None, and feed type is determined by
the observation date
Return
------
None
'''
self.hera_site = EarthLocation(lat=uv.telescope_location_lat_lon_alt_degrees[0]*u.deg,
lon=uv.telescope_location_lat_lon_alt_degrees[1]*u.deg,
height=uv.telescope_location_lat_lon_alt_degrees[2]*u.m)
self.uv = uv
self.nside = nside
self.npix = hp.nside2npix(nside)
self.hera_dec = self.uv.telescope_location_lat_lon_alt[0]
self.lsts = np.unique(self.uv.lst_array)
self.times = np.unique(uv.time_array)
self.equinox = epoch
if feed is None:
if np.mean(self.times) < 2458362: #2018-09-01
self.feed_type = 'dipole'
else:
self.feed_type = 'vivaldi'
else:
self.feed_type = feed
print('RA/DEC in the epoch of %s, with %s beam used.'%(self.equinox, self.feed_type))
theta, phi = hp.pix2ang(nside, range(self.npix))
self.ra = phi
self.dec = np.pi/2. - theta
az, alt = self._radec2azalt(self.ra, self.dec,
np.mean(self.times))
self.az = az
self.alt = alt
self.frequency = np.squeeze(self.uv.freq_array)
self.wavelength = constants.c.value/self.frequency
data = np.squeeze(self.uv.data_array)
flag = np.squeeze(self.uv.flag_array)
self.data = np.expand_dims(data, axis=1)
self.flag = np.expand_dims(flag, axis=1)
self.nvis = len(data)
return
def _radec2azalt(self, ra, dec, time):
'''Convert ra/dec to az/alt at the given obs_time and assuming the site
as HERA
Input:
------
ra: 1d array (float)
array of the ra coordintes (in radians)
dec, 1d array (float)
array of the dec coordintes (in radians)
time: float
observation time (in the format of JD)
Output:
------
az, alt: 1d array (float)
arrays containing the converted az, alt values (in radians)
'''
obs_time = Time(time, format='jd')
aa = AltAz(location=self.hera_site, obstime=obs_time)
if self.equinox == 'J2000':
c = SkyCoord(ra=ra, dec=dec, unit='radian', frame=TETE(obstime=self.equinox))
#c = SkyCoord(ra=ra, dec=dec, unit='radian', frame='icrs')
#print('ICRS')
elif self.equinox == 'Current':
c = SkyCoord(ra=ra, dec=dec, unit='radian', frame=TETE(obstime=obs_time))
else:
print('Please provide a proper epoch: either J2000 or Current')
az = np.radians(c.transform_to(aa).az.value)
alt = np.radians(c.transform_to(aa).alt.value)
return az, alt
def set_k_psf(self, radius_deg, calc_k=False):
'''Function to set up the K_psf matrix. K_psf selects
healpix from the entire sky to the regions within a
certain radius away from the phase center
Input:
------
radius_deg: float (in degrees)
radius to be included in the K_psf matrix
calc_k: boolean
whether calculating K_psf
Output:
------
k_psf: 2d array (boolean) (if calc_k=True)
Npsf X Npix array
Attributes:
------
.k_psf_in: 1d array (int)
healpix map indices within the PSF
.k_psf_out: 1d array (int)
healpix map indices outside of the PSF
.k_psf: 2d array (bool), if calc_k=True
matrix turning the full map into psf-included map
'''
psf_radius = np.radians(radius_deg)
self.idx_psf_out = np.where((np.pi/2 - self.alt) > psf_radius)[0]
self.idx_psf_in = np.where((np.pi/2 - self.alt) < psf_radius)[0]
if calc_k:
k_full = np.diag(np.ones(self.npix, dtype=bool))
k_psf = np.delete(k_full, idx_psf_out, axis=0).T
del k_full
self.k_psf = k_psf
return k_psf
else:
return
def set_psf_by_idx(self, idx_psf_in=None, calc_k=False):
'''Set up the K_psf matrix by passing idices.
Input:
------
idx_psf_idx: array-like int
Healpix idices of the psf region for the map. Default is the union of horizon during
the entire observation according to uv
calc_k: boolean
whether calculating K_psf
Output:
------
k_psf: 2d array (boolean) (if calc_k=True)
Npsf X Npix array
Attributes:
------
.k_psf_in: 1d array (int)
healpix map indices within the PSF
.k_psf_out: 1d array (int)
healpix map indices outside of the PSF
.k_psf: 2d array (bool), if calc_k=True
matrix turning the full map into psf-included map
'''
if idx_psf_in is None:
self.idx_psf_in = pixel_selection.set_psf_idx(nside, self.lsts.min(), self.lsts.max(), radius=90)
else:
assert idx_psf_in.max() <= self.npix, "PSF indices out of range."
self.idx_psf_in = idx_psf_in
self.idx_psf_out = np.arange(self.npix)[~np.in1d(np.arange(self.npix), self.idx_psf_in)]
if calc_k:
k_full = np.diag(np.ones(self.npix, dtype=bool))
k_psf = np.delete(k_full, idx_psf_out, axis=0).T
del k_full
self.k_psf = k_psf
return k_psf
else:
return
def set_pyuvbeam(self, beam_model):
'''Set up the pyuvbeam from simulation for interpolation
Args
------
beam_model: str ('vivaldi' or 'dipole')
beam model used for interpolation
Output:
------
None
Attribute:
.pyuvbeam: UVBeam Object
UVBeam Object for beam interpolation
'''
# loading the beamfits file
if beam_model == 'vivaldi':
beamfits_file = '/nfs/esc/hera/HERA_beams/high_precision_runs/outputs/'+\
'cst_vivaldi_time_solver_simplified_master_Apr2021/uvbeam/'+\
'efield_farfield_Vivaldi_pos_0.0_0.0_0.0_0.0_0.0_160_180MHz_high_precision_0.125MHz_simplified_model.beamfits'
#print('Vivaldi beam simulation file is not set up yet.')
elif beam_model == 'dipole':
beamfits_file = '/nfs/ger/home/zhileixu/data/git_beam/HERA-Beams/NicolasFagnoniBeams/NF_HERA_Dipole_efield_beam_high-precision.fits'
#beamfits_file = '/nfs/ger/home/zhileixu/data/git_beam/cst_beam_files/fagnoni_high_precision_dipole/H19/'+\
# 'E-farfield-100ohm-50-250MHz-high-acc-ind-H19-port21/efield_dipole_H19-port21_high-precision_peak-norm.fits'
else:
print('Please provide correct beam model (either vivaldi or dipole)')
print('Beam file:', beamfits_file)
pyuvbeam = UVBeam()
pyuvbeam.read_beamfits(beamfits_file)
pyuvbeam.efield_to_power()
#pyuvbeam.select(polarizations=self.uv.polarization_array)
pyuvbeam.select(polarizations=[-6,])
#print(pyuvbeam.polarization_array)
pyuvbeam.peak_normalize()
pyuvbeam.interpolation_function = 'az_za_simple'
pyuvbeam.freq_interp_kind = 'cubic'
# attribute assignment
self.pyuvbeam = pyuvbeam
return
# def pyuvbeam_efield_to_power(self, efield_data, basis_vector_array,
# calc_cross_pols=True):
#
# Nfeeds = efield_data.shape[0]
# Nfreqs = efield_data.shape[3]
# Nsources = efield_data.shape[4]
#
# feed_pol_order = [(0, 0)]
# if Nfeeds > 1:
# feed_pol_order.append((1, 1))
#
# if calc_cross_pols:
# Npols = Nfeeds ** 2
# if Nfeeds > 1:
# feed_pol_order.extend([(0, 1), (1, 0)])
# else:
# Npols = Nfeeds
#
#
# power_data = np.zeros((1, 1, Npols, Nfreqs, Nsources), dtype=np.complex128)
#
#
# for pol_i, pair in enumerate(feed_pol_order):
# for comp_i in range(2):
# power_data[0, :, pol_i] += (
# (
# efield_data[0, :, pair[0]]
# * np.conj(efield_data[0, :, pair[1]])
# )
# * basis_vector_array[0, comp_i] ** 2
# + (
# efield_data[1, :, pair[0]]
# * np.conj(efield_data[1, :, pair[1]])
# )
# * basis_vector_array[1, comp_i] ** 2
# + (
# efield_data[0, :, pair[0]]
# * np.conj(efield_data[1, :, pair[1]])
# + efield_data[1, :, pair[0]]
# * np.conj(efield_data[0, :, pair[1]])
# )
# * (
# basis_vector_array[0, comp_i]
# * basis_vector_array[1, comp_i]
# )
# )
#
# power_data = np.real_if_close(power_data, tol=10)
#
# return power_data
# def set_beam_model(self, beam_model, interp_method='grid'):
# '''Beam interpolation model set up with RectSphereBivariantSpline
# beam power is used as sqrt(col4**2 + col6**2)
#
# Input:
# ------
# beam_model: str ('vivaldi' or 'dipole')
# beam model used for interpolation
# interp_method: str ('grid' or 'sphere')
# Method used for interpolating the beam
# 'grid' -> RectBivariateSpline
# 'sphere' -> RectSphereBivariateSpline
#
# Output:
# ------
# None
#
# Attribute:
# .beam_model: function
# interpolation function for the beam
# '''
# # loading the beam file
# if beam_model == 'vivaldi':
# beam_file_folder = '/nfs/eor-14/d1/hera/beams/Vivaldi_1.8m-detailed_mecha_design-E-field-100ohm_load-Pol_X'
# elif beam_model == 'dipole':
# beam_file_folder = '/nfs/ger/proj/hera/beams/dipole_beams_Efield/HERA 4.9m - E-field'
# else:
# print('Please provide correct beam model (either vivaldi or dipole)')
# ifreq = int(np.round(self.frequency/1e6))
# beam_file = beam_file_folder+'/farfield (f=%d) [1].txt'%ifreq
# beam_table = Table.read(beam_file, format='ascii', data_start=2)
# #print(beam_model, 'is selected with', interp_method, 'interpolation method.')
# beam_theta = np.radians(np.unique(beam_table['col1']))
# beam_phi = np.radians(np.unique(beam_table['col2']))
# power = beam_table['col4']**2 + beam_table['col6']**2
# beam_data = power.reshape(len(beam_phi), len(beam_theta)).T
# beam_data = beam_data/beam_data.max()
# if interp_method == 'sphere':
# epsilon = 1e-5
# beam_theta[0] += epsilon
# beam_theta[-1] -= epsilon
# beam_model = RSBS(beam_theta, beam_phi, beam_data)
# elif interp_method == 'grid':
# beam_model = RBS(beam_theta, beam_phi, beam_data)
# else:
# print('Please provide a proper interpolation method, either sphere or grid.')
# # Attribute assignment
# self.beam_model = beam_model
#
# return
def set_a_mat(self, uvw_sign=1, apply_beam=True):
'''Calculating A matrix, covering the range defined by K_psf
Input:
------
uvw_sign: 1 or -1
uvw sign for the baseline calculation
apply_beam: boolean
Whether apply beam to the a matrix elements, default:true
Output:
------
a_mat: 2d matrix (complex64)
a_matrix (Nvis X Npsf) from the given observation
Attribute:
------
.a_mat: 2d matrix (complex64)
a_matrix added in the attribute
'''
a_mat = np.zeros((len(self.data), len(self.idx_psf_in)), dtype='float64')
beam_mat = np.zeros(a_mat.shape, dtype='float64')
#self.set_beam_model(beam_model=self.feed_type)
self.set_pyuvbeam(beam_model=self.feed_type)
#print('Pyuvdata readin.')
freq_array = np.array([self.frequency,])
#self.set_beam_interp('hp')
for time_t in np.unique(self.uv.time_array):
az_t, alt_t = self._radec2azalt(self.ra[self.idx_psf_in],
self.dec[self.idx_psf_in],
time_t)
lmn_t = np.array([np.cos(alt_t)*np.sin(az_t),
np.cos(alt_t)*np.cos(az_t),
np.sin(alt_t)])
#beam_map_t = self.beam_model(np.pi/2. - alt_t, az_t, grid=False)
pyuvbeam_interp,_ = self.pyuvbeam.interp(az_array=az_t, za_array=np.pi/2. - alt_t,
az_za_grid=False, freq_array= freq_array,
reuse_spline=True)
#print('efield interpolation...')
#pyuvbeam_interp_e, vectors = self.pyuvbeam.interp(az_array=az_t, za_array=np.pi/2. - alt_t,
# az_za_grid=False, freq_array= freq_array,
# reuse_spline=True)
#pyuvbeam_interp = self.pyuvbeam_efield_to_power(pyuvbeam_interp_e, vectors)
#ipol = 0
#print(ipol)
beam_map_t = pyuvbeam_interp[0, 0, 0, 0].real
#beam_map_t = self.beam_dic[time_t]
idx_time = np.where(self.uv.time_array == time_t)[0]
for i in range(len(idx_time)):
irow = idx_time[i]
a_mat[irow] = uvw_sign*2*np.pi/self.wavelength*np.matmul(np.matrix(self.uv.uvw_array[irow].astype(np.float64)),
np.matrix(lmn_t.astype(np.float64)))
if self.flag[irow] == False:
beam_mat[irow] = beam_map_t.astype(np.float64)
elif self.flag[irow] == True:
beam_mat[irow] = np.zeros(beam_mat.shape[1])
print('%dth visibility is flagged.'%irow)
else:
print('Flag on the %dth visibility is not recognized.'%irow)
a_mat = ne.evaluate('exp(a_mat * 1j)')
a_mat = a_mat.astype('complex128')
a_mat = np.matrix(a_mat)
if apply_beam:
a_mat = np.matrix(np.multiply(a_mat, beam_mat))
self.a_mat = a_mat
return a_mat
def beam_interp_onecore(self, time, pix):
'''Calculating the phase for the pixels within PSF at a given time
'''
if pix == 'hp':
ra_arr = self.ra[self.idx_psf_in]
dec_arr = self.dec[self.idx_psf_in]
elif pix == 'hp+ps':
ra_arr = np.concatenate((self.ra[self.idx_psf_in], self.ra_ps))
dec_arr = np.concatenate((self.dec[self.idx_psf_in], self.dec_ps))
else:
print('Please provide a correct pix kind: hp or hp+ps.')
az_t, alt_t = self._radec2azalt(ra_arr, dec_arr, time)
lmn_t = np.array([np.cos(alt_t)*np.sin(az_t),
np.cos(alt_t)*np.cos(az_t),
np.sin(alt_t)])
#beam_map_t = self.beam_model(np.pi/2. - alt_t, az_t, grid=False)
#pyuvbeam_interp,_ = self.pyuvbeam.interp(az_array=az_t, za_array=np.pi/2. - alt_t,
# az_za_grid=False, freq_array= freq_array,
# reuse_spline=True)
print(time, 'efield interpolation')
#pyuvbeam = self.set_pyuvbeam(beam_model=self.feed_type)
pyuvbeam_interp_e, vectors = self.pyuvbeam.interp(az_array=az_t, za_array=np.pi/2. - alt_t,
az_za_grid=False, freq_array= np.array([self.frequency,]),
reuse_spline=True)
pyuvbeam_interp = self.pyuvbeam_efield_to_power(pyuvbeam_interp_e, vectors)
ipol = 1
beam_map_t = pyuvbeam_interp[0, 0, ipol, 0].real
return {time: beam_map_t}
def set_beam_interp(self, pix, ncores=10):
'''Run the beam interpolation in parallel and store the result in a dictionary
pix: str
'hp', or 'hp+ps'
'''
print(pix)
self.set_pyuvbeam(beam_model=self.feed_type)
pool = multiprocessing.Pool(processes=ncores)
args = []
for time_t in
|
np.unique(self.uv.time_array)
|
numpy.unique
|
"""
Wrappers around NumPy arrays.
Notes
-----
Arr3d -> Arr2d etc. can only be achieved by their slice() methods
Numpy's slice-index notation A[:,0,:] etc. works (i.e. reshapes)
but doesn't convert the type
(c) 2019- <NAME>.
Copywright: Ask for permission writing to <EMAIL>.
"""
import numpy as np
import matplotlib.pyplot as plt
from autologging import logged, traced
from matplotlib.gridspec import GridSpec
from fullwavepy.generic.parse import kw, del_kw
from fullwavepy.generic.decor import widgets, timer
from fullwavepy.plot.generic import figure, aspeqt, Plotter
from fullwavepy.plot.plt2d import plot_image
# -------------------------------------------------------------------------------
# Arrays - basic classes
# -------------------------------------------------------------------------------
@logged
class Arr(np.ndarray):
"""
Wrapper around numpy's array.
"""
def __new__(cls, source, ndims=None, **kwargs):
"""
Init by reading from source.
Notes
-----
From https://docs.scipy.org/doc/numpy/user/basics.subclassing.html:
Input array is an already formed ndarray instance
"""
if hasattr(source, 'extent'): # NOTE ADDED 12.01.2021
kwargs['extent'] = source.extent
source = cls._read(source, **kwargs)
obj = np.asarray(source).view(cls) # CAST THE TYPE
# FIXME: REPLACE IT WITH STH TAKING source
# AS ARG AND RETURNING EXTENT WHICH WE'LL ASSIGN TO obj JUST BEFORE RETURNING IT
# FROM THIS __new__ FUNCTION
obj = cls._set_extent(obj, **kwargs)
#obj = cls._set_coords(obj, **kwargs)
obj = cls._set_dx(obj, **kwargs)
if ndims is not None:
assert len(obj.shape) == ndims
return obj # NECESSARY!
# -----------------------------------------------------------------------------
def _read(source, **kwargs):
"""
"""
#from fullwavepy.seismic.data import Data
#from fullwavepy.ndat.manifs import Surf, SurfZ, Plane
#from fullwavepy.ndat.points import Points
#
#if (type(source) == type(np.array([])) or
# type(source) == Arr or
# type(source) == Arr1d or
# type(source) == Arr2d or
# type(source) == Arr3d or
# type(source) == Data or
# type(source) == Surf or
# type(source) == SurfZ or
# type(source) == Plane or
# type(source) == Points or
# type(source) == np.memmap):
# A = source
if isinstance(source, str):
from fullwavepy.ioapi.generic import read_any
if hasattr(source, 'shape'): # FOR EFFICIENCY (SEE read_any)
kwargs['shape'] = self.shape
A = read_any(source, **kwargs)
else:
A = source
#else:
# raise TypeError('Arguments need to be either ' +
# 'file-names or arrays or np.memmap, NOT: %s' %
# type(source))
return A
# -----------------------------------------------------------------------------
def _set_extent(obj, func=None, **kwargs):
if 'extent' in kwargs:
obj.__log.debug('Using extent from kwargs, even if it means overwriting')
obj.extent = kwargs['extent']
elif hasattr(obj, 'extent'):
obj.__log.debug('obj.extent already set and not provided in kwargs')
pass
else:
obj.__log.debug('Setting extent to default.')
obj.extent = obj._default_extent(func, **kwargs)
return obj
# -----------------------------------------------------------------------------
def _default_extent(obj, func=None, **kwargs):
"""
Redefined in child classes to account for vertical axis flipping
when plotting with imshow.
"""
if func is None:
func = lambda dim : [0, dim-1] # outdated: # NOT dim-1; SEE GridProjFile ETC.
extent = []
for dim in obj.shape:
extent.append(func(dim))
# if len(obj.shape) == 1:
# extent = extent[0]
return extent
# -----------------------------------------------------------------------------
def _set_dx(obj, **kwargs):
"""
It is fully determined by extent and shape.
In general, it is axis-dependent (dx != dy != dz != dx)
"""
dx = []
obj.__log.debug('obj.shape %s' % str(obj.shape))
obj.__log.debug('obj.extent %s' % str(obj.extent))
assert len(obj.shape) == len(obj.extent)
for nx, (x1, x2) in zip(obj.shape, obj.extent):
obj.__log.debug('nx=%s, x1=%s, x2=%s' % (nx, x1, x2))
dx_1D = (x2 - x1) / (nx-1) if nx > 1 else None
obj.__log.debug('dx_1D=%s' % dx_1D)
dx.append(dx_1D)
obj.dx = np.array(dx)
return obj
# -----------------------------------------------------------------------------
def _set_coords(obj, **kwargs):
obj.__log.debug('obj.extent' + str(obj.extent))
obj.__log.debug('Setting coords to None. Fill it with actual code')
obj.coords = None
return obj
# -----------------------------------------------------------------------------
def __array_finalize__(self, obj):
if obj is None: return
# -----------------------------------------------------------------------------
def _metre2index(self, m, axis, **kwargs):
origin = self.extent[axis][0]
i = (m - origin) / self.dx[axis]
if not i.is_integer():
raise ValueError('Index must be integer not %s' % i)
return int(i)
# -----------------------------------------------------------------------------
def _metre_2_nearest_index(self, m, axis, **kwargs):
"""
Better version of _metre2index used
by fwilight.ndat.A3d and A2d.
Parameters
----------
m : float
Value in metres.
axis : int
Axis of the array.
Returns
-------
int
Nearest index.
"""
origin = self.extent[axis][0]
i = (m - origin) / self.dx[axis]
if not i.is_integer():
print('Warning. Non-integer index. Taking its floor')
i = np.floor(i)
return int(i)
# -----------------------------------------------------------------------------
def _index2metre(self, i, axis, **kwargs):
origin = self.extent[axis][0]
m = i * self.dx[axis] + origin
return m
# -----------------------------------------------------------------------------
def _metre2gridnode(self, *args, **kwargs):
return self._metre2index(*args, **kwargs) + 1
# -----------------------------------------------------------------------------
def _box2inds(self, box, **kwargs):
"""
Convert box into slicing-indices using extent.
"""
box = np.array(box)
extent = np.array(self.extent)
assert len(box.shape) == 1
assert len(box) == len(extent.flatten())
box = box.reshape(extent.shape)
inds = np.zeros(box.shape)
for axis, _ in enumerate(box):
b0, b1 = box[axis]
if b0 == b1: # FOR 2D (DOUBLE-CHECK)
self.__log.warn('Skipping b0=b1=%s' % b0)
continue
inds[axis][0] = self._metre2index(b0, axis)
inds[axis][1] = self._metre2index(b1, axis) + 1 # NOTE: FOR np.arange(b1, b2) etc.
self.__log.debug('axis %s: i1=%s, i2=%s' % (axis, inds[axis][0], inds[axis][1]))
return inds.astype(int)
# -----------------------------------------------------------------------------
def carve(self, box, **kwargs):
"""
Carve a box out of an array.
Parameters
----------
box : list
Returns
-------
self
"""
inds = self._box2inds(box, **kwargs)
for axis in range(len(self.shape)):
self = np.take(self, np.arange(*inds[axis]), axis=axis)
self.extent = np.array(box).reshape(inds.shape)
return self
# -----------------------------------------------------------------------------
def save(self, fname, **kwargs):
from fullwavepy.ioapi.fw3d import save_vtr
save_vtr(self, fname)
# -----------------------------------------------------------------------------
def info(self, **kwargs):
self.__log.info('grid shape: {} [nodes]'.format(self.shape))
self.__log.info('grid cell-sizes in (x,y,z): {} [m]'.format(self.extent))
self.__log.info('grid extent: {} [m]'.format(self.extent))
self.__log.info('value min: {}, max: {}'.format(np.min(self), np.max(self)))
# -----------------------------------------------------------------------------
def compare(self, othe, mode='interleave', **kwargs): #fig, gs=None, widgets=False,
if mode == 'interleave' or mode == 'ileave':
A = self.interleave(othe, **kwargs)
A.plot(**kwargs)
# elif mode == 'diff' or mode == 'dif':
# c = A3d(self-othe, extent=self.extent)
# c.plot(**kwargs)
# return c
else:
raise ValueError(mode)
# -----------------------------------------------------------------------------
def compare_subplots(self, **kwargs):
assert type(self) == type(othe)
assert self.shape == othe.shape
xlim = kw('xlim', None, kwargs)
ylim = kw('ylim', None, kwargs)
if widgets:
figsize = (kw('figsize_x', 8, kwargs), kw('figsize_y', 8, kwargs))
fig = plt.figure(figsize=figsize)
kwargs['widgets'] = False
if gs is None:
gs = fig.add_gridspec(1,2)
ax1 = fig.add_subplot(gs[0,0])
self.plot(**kwargs)
ax2 = fig.add_subplot(gs[0,1])
othe.plot(**kwargs)
for ax in [ax1, ax2]:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
@logged
class Arr1d(Arr):
"""
"""
def __new__(cls, source, **kwargs):
return super().__new__(cls, source, ndims=1, **kwargs)
# -----------------------------------------------------------------------------
def plot(self, **kwargs):
"""
format of extent: [[x1,x2]] is for compatibility with 2d and 3d
"""
from fullwavepy.plot.plt1d import plot_line
c = kw('c', None, kwargs)
assert np.array(self.extent).shape == (1,2)
self.__log.debug('self.extent' + str(self.extent))
kwargs['extent'] = self.extent[:2]
plot_line(self, **kwargs)
# x = np.linspace(x1, x2, len(self))
# plt.plot(x, self, c=c)
return plt.gca()
# -----------------------------------------------------------------------------
@logged
class Arr2d(Plotter, Arr):
"""
"""
def __new__(cls, source, **kwargs):
return super().__new__(cls, source, ndims=2, **kwargs)
# -----------------------------------------------------------------------------
###@widgets('slice_at', 'node')
def slice(self, slice_at='y', node=0, widgets=False, **kwargs):
"""
"""
di = {'x': 0, 'y': 1} # TRANSLATE slice_at INTO AXIS NO.
axis = di[slice_at]
A = Arr1d(np.take(self, indices=node, axis=axis))
assert len(self.extent) == 2
extent1d = np.array([el for i, el in enumerate(self.extent) if i != di[slice_at]])
self.__log.debug('extent1d %s' % str(extent1d))
A.extent = extent1d
return A
# -----------------------------------------------------------------------------
def interleave(self, othe, **kwargs):
self.interleaved = interleave_arrays(self, othe, **kwargs)
return self.interleaved
# -----------------------------------------------------------------------------
###@widgets('cmap', 'slice_at', 'node')
def plot_slice(self, slice_at='y', node=0, widgets=False, **kwargs):
"""
"""
arr1d = self.slice(slice_at, node, widgets=False, **kwargs)
ax = arr1d.plot(**kwargs)
return ax
# -----------------------------------------------------------------------------
def plot_full(self, wiggle=False, **kwargs):
"""
"""
kwargs['extent'] = np.ravel(self.extent) # ravel JUST IN CASE
# IT SHOULDN'T BE APPLIED TWICE!
self = modify_array(self, **kwargs) # FIXME: MOVE IT SOMEWHERE ELSE?!
if wiggle:
ax = plot_wiggl(self, **kwargs)
else:
ax = plot_image(self, **kwargs)
return ax
# -----------------------------------------------------------------------------
def plot(self, *args, **kwargs):
"""
"""
if 'slice_at' in kwargs:
ax = self.plot_slice(*args, **kwargs)
else:
ax = self.plot_full(*args, **kwargs)
return ax
# -----------------------------------------------------------------------------
#def compare(self, othe, **kwargs):
#A = self.interleave(othe, **kwargs)
#A.plot(**kwargs)
@logged
class Arr3d(Plotter, Arr):
"""
3D array.
"""
def __new__(cls, source, **kwargs):
return super().__new__(cls, source, ndims=3, **kwargs)
# -----------------------------------------------------------------------------
def slice(self, slice_at='y', node=0, widgets=False, **kwargs):
"""
"""
di = {'x': 0, 'y': 1, 'z': 2} # TRANSLATE slice_at INTO AXIS NO.
axis = di[slice_at]
A = Arr2d(np.take(self, indices=node, axis=axis))
assert len(self.extent) == 3
# extent2d = np.ravel([el for i, el in enumerate(self.extent) if i != di[slice_at]])
extent2d = np.array([el for i, el in enumerate(self.extent) if i != di[slice_at]])
# if axis != 2:
self.__log.debug('Setting extent2d so that no vertical-axis flipping is needed.')
self.__log.debug('NOW ALSO FOR zslice (NOT TESTED BUT SEEMS TO HAVE FIXED THE BUG)')
# extent2d[-2: ] = [extent2d[-1], extent2d[-2]]
extent2d[-1] = extent2d[-1][::-1]
self.__log.debug('extent2d: ' + str(extent2d))
A.extent = extent2d
return A
# -----------------------------------------------------------------------------
def interleave(self, othe, *args, **kwargs):
A1 = self.slice(*args, **kwargs)
A2 = othe.slice(*args, **kwargs)
A = Arr2d(interleave_arrays(A1, A2, **kwargs))
return A
# -----------------------------------------------------------------------------
def plot_slice(self, slice_at='y', node=None, widgets=False, **kwargs):
"""
"""
nx, ny, nz = self.shape
if node is None:
if slice_at == 'x':
node = kw('node', nx//2, kwargs)
# metre = self._index2metre(node, 0)
elif slice_at == 'y':
node = kw('node', ny//2, kwargs)
# metre = self._index2metre(node, 1)
elif slice_at == 'z':
node = kw('node', nz//2, kwargs)
# metre = self._index2metre(node, 2)
else:
raise ValueError('Wrong slice_at: %s' % str(slice_at))
arr2d = self.slice(slice_at, node, widgets=False, **kwargs)
suffix = kwargs.get('title', '')
if suffix is None:
kwargs['title'] = ''
else:
suffix = ', ' + suffix if suffix != '' else suffix
kwargs['title'] = 'Array slice at %s-index %s%s' % (slice_at, node, suffix)
del_kw('slice_at', kwargs) # JUST IN CASE
ax = arr2d.plot(**kwargs)
if slice_at == 'z': # DISABLE?
ax.invert_yaxis()
return ax
# -----------------------------------------------------------------------------
def plot_3slices_new2(self, x, y, z, fig=None, gs=None, **kwargs):
"""
"""
from fullwavepy.plot.plt2d import plot_image
layout = kw('layout', 'square', kwargs)
if fig is None:
fig = figure(16,8)
kwargs['x'] = x
kwargs['y'] = y
kwargs['z'] = z
# LABELS FOR EACH AXIS
s2 = kw('slice', 'y', kwargs) # MAIN SLICE PLOTTED AT THE BOTTOM IN FULL WIDTH
s0, s1 = [i for i in ['x', 'y', 'z'] if i != s2]
s = [s0, s1, s2]
# CONVERT THE LABELS INTO ARRAY DIMENSIONS (AXES)
convert_s2a = {'x': 0, 'y': 1, 'z': 2} # TRANSLATE slice TO axis
if layout == 'square':
if gs is None:
gs = GridSpec(2,2, height_ratios=[1,1], width_ratios=[2,1])
axes = list(np.zeros(3))
axes[0] = fig.add_subplot(gs[0,0])
axes[1] = fig.add_subplot(gs[1,0])
axes[2] = fig.add_subplot(gs[:,1])
elif layout == 'thin':
if gs is None:
gs = GridSpec(3,1)
axes = list(np.zeros(3))
axes[0] = fig.add_subplot(gs[0,0])
axes[1] = fig.add_subplot(gs[1,0])
axes[2] = fig.add_subplot(gs[2,0])
else:
raise ValueError('Unknown layout: %s' % layout)
kwargs['vmin'] = kw('vmin', np.min(self), kwargs)
kwargs['vmax'] = kw('vmax', np.max(self), kwargs)
self.__log.debug('Setting vmin, vmax to: {}, {}'.format(kwargs['vmin'],
kwargs['vmax']))
for i, ax in enumerate(axes):
plt.sca(ax)
aaxx = plot_image(
|
np.take(self, kwargs[s[i]], convert_s2a[s[i]])
|
numpy.take
|
import os
from time import time
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from box import Box
from sklearn.metrics import (accuracy_score, f1_score, precision_score,
recall_score)
from torchvision import datasets, transforms
from tqdm import tqdm
from common.benchmark import OptimizersBenchmark, run_benchmark
from common.models import Net
from common.optimizers import optimizers
from common.schedulers import get_schedulers_dict
from common.setup import setup
def train_iter_mnist(bench):
"""Train iteration for experiment on MNIST.
Args:
bench (OptimizersBenchmark): Class for benchmarking.
Returns:
[List, float]: List of losses, iteration time
"""
bench.model.train()
losses = []
t1 = time()
for batch_idx, (data, target) in tqdm(enumerate(bench.train_loader)):
data, target = data.to(bench.device), target.to(bench.device)
bench.optimizer.zero_grad()
output = bench.model(data)
loss = F.nll_loss(output, target)
loss.backward(create_graph=True)
bench.optimizer.step()
if bench.sched_step_note in ['cyclic', 'cosine']:
bench.scheduler.step()
if batch_idx % bench.log_interval == 0:
losses.append(loss.item())
epoch_time = time() - t1
return losses, epoch_time
def test_iter_mnist(bench):
"""Test iteration for experiment on MNIST.
Args:
bench (OptimizersBenchmark): Class with setup for benchmarking.
Returns:
[List, Dict]: List of losses, dictionary with metric values
"""
bench.model.eval()
losses = []
targets = []
preds = []
with torch.no_grad():
for data, target in bench.test_loader:
targets += list(np.array(target))
data, target = data.to(bench.device), target.to(bench.device)
output = bench.model(data)
loss_val = F.nll_loss(output, target, reduction='sum').item()
losses.append(loss_val)
pred = output.argmax(dim=1, keepdim=True)
preds += list(pred.cpu().numpy().ravel())
precision = precision_score(
y_true=targets, y_pred=preds, average="macro", zero_division=0)
recall = recall_score(y_true=targets, y_pred=preds, average="macro")
accuracy = accuracy_score(y_true=targets, y_pred=preds)
f1 = f1_score(y_true=targets, y_pred=preds, average="macro")
metrics = {"precision": precision,
"recall": recall, "accuracy": accuracy, "f1": f1}
print(f'Average loss: {
|
np.mean(losses)
|
numpy.mean
|
# Simulate an inhomogeneous Poisson point process on a rectangle
# This is done by simulating a homogeneous Poisson process, which is then
# thinned according to a (spatially *dependent*) p-thinning.
# The intensity function is lambda(x,y)=exp(-(x^2+y^2)/s^2), where s>0.
# Author: <NAME>, 2019.
# Website: hpaulkeeler.com
# Repository: github.com/hpaulkeeler/posts
# For more details, see the post:
# hpaulkeeler.com/simulating-an-inhomogeneous-poisson-point-process/
import numpy as np; # NumPy package for arrays, random number generation, etc
import matplotlib.pyplot as plt # For plotting
from scipy.optimize import minimize # For optimizing
from scipy import integrate # For integrating
plt.close('all'); # close all figures
# Simulation window parameters
xMin = -1;
xMax = 1;
yMin = -1;
yMax = 1;
xDelta = xMax - xMin;
yDelta = yMax - yMin; # rectangle dimensions
areaTotal = xDelta * yDelta;
numbSim = 10 ** 3; # number of simulations
s = 0.5; # scale parameter
# Point process parameters
def fun_lambda(x, y):
return 100 * np.exp(-(x ** 2 + y ** 2) / s ** 2); # intensity function
#fun_lambda = lambda x,y: 100 * np.exp(-(x ** 2 + y ** 2) / s ** 2);
###START -- find maximum lambda -- START ###
# For an intensity function lambda, given by function fun_lambda,
# finds the maximum of lambda in a rectangular region given by
# [xMin,xMax,yMin,yMax].
def fun_Neg(x):
return -fun_lambda(x[0], x[1]); # negative of lambda
#fun_Neg = lambda x: -fun_lambda(x[0], x[1]); # negative of lambda
xy0 = [(xMin + xMax) / 2, (yMin + yMax) / 2]; # initial value(ie centre)
# Find largest lambda value
resultsOpt = minimize(fun_Neg, xy0, bounds=((xMin, xMax), (yMin, yMax)));
lambdaNegMin = resultsOpt.fun; # retrieve minimum value found by minimize
lambdaMax = -lambdaNegMin;
###END -- find maximum lambda -- END ###
# define thinning probability function
def fun_p(x, y):
return fun_lambda(x, y) / lambdaMax;
#fun_p = lambda x, y: fun_lambda(x, y) / lambdaMax;
# for collecting statistics -- set numbSim=1 for one simulation
numbPointsRetained =
|
np.zeros(numbSim)
|
numpy.zeros
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Testing spatialimages
"""
from ..externals.six import BytesIO
import numpy as np
from ..spatialimages import (Header, SpatialImage, HeaderDataError,
ImageDataError)
from unittest import TestCase
from nose.tools import (assert_true, assert_false, assert_equal,
assert_not_equal, assert_raises)
from numpy.testing import assert_array_equal, assert_array_almost_equal
from .test_helpers import bytesio_round_trip
def test_header_init():
# test the basic header
hdr = Header()
assert_equal(hdr.get_data_dtype(), np.dtype(np.float32))
assert_equal(hdr.get_data_shape(), (0,))
assert_equal(hdr.get_zooms(), (1.0,))
hdr = Header(np.float64)
assert_equal(hdr.get_data_dtype(), np.dtype(np.float64))
assert_equal(hdr.get_data_shape(), (0,))
assert_equal(hdr.get_zooms(), (1.0,))
hdr = Header(np.float64, shape=(1,2,3))
assert_equal(hdr.get_data_dtype(), np.dtype(np.float64))
assert_equal(hdr.get_data_shape(), (1,2,3))
assert_equal(hdr.get_zooms(), (1.0, 1.0, 1.0))
hdr = Header(np.float64, shape=(1,2,3), zooms=None)
assert_equal(hdr.get_data_dtype(), np.dtype(np.float64))
assert_equal(hdr.get_data_shape(), (1,2,3))
assert_equal(hdr.get_zooms(), (1.0, 1.0, 1.0))
hdr = Header(np.float64, shape=(1,2,3), zooms=(3.0, 2.0, 1.0))
assert_equal(hdr.get_data_dtype(), np.dtype(np.float64))
assert_equal(hdr.get_data_shape(), (1,2,3))
assert_equal(hdr.get_zooms(), (3.0, 2.0, 1.0))
def test_from_header():
# check from header class method. Note equality checks below,
# equality methods used here too.
empty = Header.from_header()
assert_equal(Header(), empty)
empty = Header.from_header(None)
assert_equal(Header(), empty)
hdr = Header(np.float64, shape=(1,2,3), zooms=(3.0, 2.0, 1.0))
copy = Header.from_header(hdr)
assert_equal(hdr, copy)
assert_false(hdr is copy)
class C(object):
def get_data_dtype(self): return np.dtype('u2')
def get_data_shape(self): return (5,4,3)
def get_zooms(self): return (10.0, 9.0, 8.0)
converted = Header.from_header(C())
assert_true(isinstance(converted, Header))
assert_equal(converted.get_data_dtype(), np.dtype('u2'))
assert_equal(converted.get_data_shape(), (5,4,3))
assert_equal(converted.get_zooms(), (10.0,9.0,8.0))
def test_eq():
hdr = Header()
other = Header()
assert_equal(hdr, other)
other = Header('u2')
assert_not_equal(hdr, other)
other = Header(shape=(1,2,3))
assert_not_equal(hdr, other)
hdr = Header(shape=(1,2))
other = Header(shape=(1,2))
assert_equal(hdr, other)
other = Header(shape=(1,2), zooms=(2.0,3.0))
assert_not_equal(hdr, other)
def test_copy():
# test that copy makes independent copy
hdr = Header(np.float64, shape=(1,2,3), zooms=(3.0, 2.0, 1.0))
hdr_copy = hdr.copy()
hdr.set_data_shape((4,5,6))
assert_equal(hdr.get_data_shape(), (4,5,6))
assert_equal(hdr_copy.get_data_shape(), (1,2,3))
hdr.set_zooms((4,5,6))
assert_equal(hdr.get_zooms(), (4,5,6))
assert_equal(hdr_copy.get_zooms(), (3,2,1))
hdr.set_data_dtype(np.uint8)
assert_equal(hdr.get_data_dtype(), np.dtype(np.uint8))
assert_equal(hdr_copy.get_data_dtype(), np.dtype(np.float64))
def test_shape_zooms():
hdr = Header()
hdr.set_data_shape((1, 2, 3))
assert_equal(hdr.get_data_shape(), (1,2,3))
assert_equal(hdr.get_zooms(), (1.0,1.0,1.0))
hdr.set_zooms((4, 3, 2))
assert_equal(hdr.get_zooms(), (4.0,3.0,2.0))
hdr.set_data_shape((1, 2))
assert_equal(hdr.get_data_shape(), (1,2))
assert_equal(hdr.get_zooms(), (4.0,3.0))
hdr.set_data_shape((1, 2, 3))
assert_equal(hdr.get_data_shape(), (1,2,3))
assert_equal(hdr.get_zooms(), (4.0,3.0,1.0))
# null shape is (0,)
hdr.set_data_shape(())
assert_equal(hdr.get_data_shape(), (0,))
assert_equal(hdr.get_zooms(), (1.0,))
# zooms of wrong lengths raise error
assert_raises(HeaderDataError, hdr.set_zooms, (4.0, 3.0))
assert_raises(HeaderDataError,
hdr.set_zooms,
(4.0, 3.0, 2.0, 1.0))
# as do negative zooms
assert_raises(HeaderDataError,
hdr.set_zooms,
(4.0, 3.0, -2.0))
def test_data_dtype():
hdr = Header()
assert_equal(hdr.get_data_dtype(), np.dtype(np.float32))
hdr.set_data_dtype(np.float64)
assert_equal(hdr.get_data_dtype(), np.dtype(np.float64))
hdr.set_data_dtype('u2')
assert_equal(hdr.get_data_dtype(), np.dtype(np.uint16))
def test_affine():
hdr = Header(np.float64, shape=(1,2,3), zooms=(3.0, 2.0, 1.0))
assert_array_almost_equal(hdr.get_best_affine(),
[[-3.0,0,0,0],
[0,2,0,-1],
[0,0,1,-1],
[0,0,0,1]])
hdr.default_x_flip = False
assert_array_almost_equal(hdr.get_best_affine(),
[[3.0,0,0,0],
[0,2,0,-1],
[0,0,1,-1],
[0,0,0,1]])
assert_array_equal(hdr.get_base_affine(),
hdr.get_best_affine())
def test_read_data():
hdr = Header(np.int32, shape=(1,2,3), zooms=(3.0, 2.0, 1.0))
fobj = BytesIO()
data = np.arange(6).reshape((1,2,3))
hdr.data_to_fileobj(data, fobj)
assert_equal(fobj.getvalue(),
data.astype(np.int32).tostring(order='F'))
fobj.seek(0)
data2 = hdr.data_from_fileobj(fobj)
assert_array_equal(data, data2)
class DataLike(object):
# Minimal class implementing 'data' API
shape = (3,)
def __array__(self):
return np.arange(3)
class TestSpatialImage(TestCase):
# class for testing images
image_class = SpatialImage
can_save = False
def test_isolation(self):
# Test image isolated from external changes to header and affine
img_klass = self.image_class
arr = np.arange(24, dtype=np.int16).reshape((2, 3, 4))
aff = np.eye(4)
img = img_klass(arr, aff)
assert_array_equal(img.get_affine(), aff)
aff[0,0] = 99
assert_false(np.all(img.get_affine() == aff))
# header, created by image creation
ihdr = img.get_header()
# Pass it back in
img = img_klass(arr, aff, ihdr)
# Check modifying header outside does not modify image
ihdr.set_zooms((4, 5, 6))
assert_not_equal(img.get_header(), ihdr)
def test_float_affine(self):
# Check affines get converted to float
img_klass = self.image_class
arr = np.arange(3, dtype=np.int16)
img = img_klass(arr, np.eye(4, dtype=np.float32))
assert_equal(img.get_affine().dtype, np.dtype(np.float64))
img = img_klass(arr, np.eye(4, dtype=np.int16))
assert_equal(img.get_affine().dtype, np.dtype(np.float64))
def test_images(self):
# Assumes all possible images support int16
# See https://github.com/nipy/nibabel/issues/58
arr = np.arange(24, dtype=np.int16).reshape((2, 3, 4))
img = self.image_class(arr, None)
assert_array_equal(img.get_data(), arr)
assert_equal(img.get_affine(), None)
hdr = self.image_class.header_class()
hdr.set_data_shape(arr.shape)
hdr.set_data_dtype(arr.dtype)
assert_equal(img.get_header(), hdr)
def test_data_api(self):
# Test minimal api data object can initialize
img = self.image_class(DataLike(), None)
assert_array_equal(img.get_data(), np.arange(3))
assert_equal(img.shape, (3,))
def check_dtypes(self, expected, actual):
# Some images will want dtypes to be equal including endianness,
# others may only require the same type
assert_equal(expected, actual)
def test_data_default(self):
# check that the default dtype comes from the data if the header
# is None, and that unsupported dtypes raise an error
img_klass = self.image_class
hdr_klass = self.image_class.header_class
data = np.arange(24, dtype=np.int32).reshape((2,3,4))
affine = np.eye(4)
img = img_klass(data, affine)
self.check_dtypes(data.dtype, img.get_data_dtype())
header = hdr_klass()
header.set_data_dtype(np.float32)
img = img_klass(data, affine, header)
self.check_dtypes(np.dtype(np.float32), img.get_data_dtype())
def test_data_shape(self):
# Check shape correctly read
img_klass = self.image_class
# Assumes all possible images support int16
# See https://github.com/nipy/nibabel/issues/58
arr = np.arange(4, dtype=np.int16)
img = img_klass(arr, np.eye(4))
assert_equal(img.shape, (4,))
img = img_klass(
|
np.zeros((2,3,4), dtype=np.float32)
|
numpy.zeros
|
import os
import torch
import pickle
import timeit
import datetime
import numpy as np
from copy import deepcopy
from ReplayBuffer import ReplayBufferStd
import StateUtilities as SU
from Agents import agent_constructor
import RLCritics
def one_single_episode(algorithm,
building, building_occ,
agents = None, critics = None,
hyper_params = None, episode_number = 0, sqloutput = None,
extended_logging = False, evaluation_episode = False,
add_random_process_in_eval_epoch = False,
ts_diff_in_min = 5, rpb = None):
#
# define the hyper-parameters
if hyper_params is None:
LAMBDA_REWARD_ENERGY = 0.1
LAMBDA_REWARD_MANU_STP_CHANGES = 150
TAU_TARGET_NETWORKS = 0.01
DISCOUNT_FACTOR = 0.9
BATCH_SIZE = 128
RPB_BUFFER_SIZE = 12*24*2 # 2 Tage
LEARNING_RATE = 0.01
TARGET_NETWORK_UPDATE_FREQ = 3
TS_UNTIL_REGULATION = 1
else:
LAMBDA_REWARD_ENERGY = hyper_params.lambda_rwd_energy
LAMBDA_REWARD_MANU_STP_CHANGES = hyper_params.lambda_rwd_mstpc
TAU_TARGET_NETWORKS = hyper_params.tau
DISCOUNT_FACTOR = hyper_params.discount_factor
BATCH_SIZE = hyper_params.batch_size
RPB_BUFFER_SIZE = hyper_params.rpb_buffer_size
LEARNING_RATE = hyper_params.lr
TARGET_NETWORK_UPDATE_FREQ = hyper_params.target_network_update_freq
TS_UNTIL_REGULATION = hyper_params.ts_until_regulation
#
# define the output dict containing status informations
status_output_dict = {}
if hyper_params.verbose_output_mode:
status_output_dict["verbose_output"] = []
#
# Define the replay ReplayBuffer
if rpb is None:
rpb = ReplayBufferStd(size=RPB_BUFFER_SIZE, number_agents=len(agents))
#
# Define the loss for DDQN
if hyper_params.ddqn_loss == "L2":
loss = torch.nn.MSELoss()
else:
loss = torch.nn.L1Loss()
#
# Lists for command-line outputs
reward_list = []
output_loss_list = []
output_q_st2_list= []
output_J_mean_list=[]
output_cr_frobnorm_mat_list = []
output_cr_frobnorm_bia_list = []
output_ag_frobnorm_mat_list = []
output_ag_frobnorm_bia_list = []
output_n_stp_ch = []
output_energy_Wh = []
if not algorithm == "ddpg" or not extended_logging:
output_q_st2_list= [0 for _ in agents]
output_J_mean_list=[0 for _ in agents]
output_cr_frobnorm_mat_list = [0 for _ in agents]
output_cr_frobnorm_bia_list = [0 for _ in agents]
if algorithm == "baseline_rule-based":
output_loss_list = [0]
output_ag_frobnorm_mat_list = [0]
output_ag_frobnorm_bia_list = [0]
# list for q values output, if selected
if evaluation_episode and hyper_params.output_Q_vals_iep:
q_values_list = [ [] for _ in agents ]
#
# prepare the simulation
state = building.model_reset()
SU.fix_year_confussion(state)
SU.expand_state_next_occup(state, building, hyper_params.next_occ_horizont, ts_diff_in_min, building_occ)
norm_state_ten = SU.unnormalized_state_to_tensor(state, building)
#
current_occupancy = building_occ.draw_sample( state["time"] )
timestep = 0
last_state = None
historical_econs_values = [0]
# start the simulation loop
while not building.model_is_terminate():
actions = list()
currdate = state['time']
#
# request occupancy for the next state
nextdate = state['time'] + datetime.timedelta(minutes=ts_diff_in_min)
next_occupancy = building_occ.draw_sample(nextdate)
#
# propagate occupancy values to COBS / EnergyPlus
for zonename, occd in next_occupancy.items():
actions.append({"priority": 0,
"component_type": "Schedule:Constant",
"control_type": "Schedule Value",
"actuator_key": f"OCC-SCHEDULE-{zonename}",
"value": next_occupancy[zonename]["relative number occupants"],
"start_time": state['timestep'] + TS_UNTIL_REGULATION})
#
# request new actions from all agents
agent_actions_dict = {}
agent_actions_list = []
add_random_process = True
if evaluation_episode and not add_random_process_in_eval_epoch:
add_random_process = False
if algorithm == "ddqn":
if agents[0].shared_network_per_agent_class:
new_actions = agents[0].next_action(norm_state_ten, add_random_process)
agent_actions_list = new_actions
# decode the actions for every agent using the individual agent objects
for idx, agent in enumerate(agents):
if agent.type == "RL":
agent_actions_dict[agent.name] = agent.output_action_to_action_dict(new_actions[idx])
else:
agent_actions_dict[agent.name] = agent.step(state)
else:
for agent in agents:
if agent.type == "RL":
new_action = agent.next_action(norm_state_ten, add_random_process)
agent_actions_list.append( new_action )
agent_actions_dict[agent.name] = agent.output_action_to_action_dict(new_action)
if hyper_params.verbose_output_mode:
_, vo_ipt = agent.step_tensor(norm_state_ten, True, True)
vodict = {"state": state, "norm_state_ten": norm_state_ten,
"agent_action": new_action,
"agent internal input tensor": vo_ipt.detach()}
status_output_dict["verbose_output"].append(vodict)
else:
agent_actions_dict[agent.name] = agent.step(state)
# no backtransformation of variables needed, this is done in agents definition already
#
# output Q values in eval episode if selected
if evaluation_episode and hyper_params.output_Q_vals_iep:
if agents[0].shared_network_per_agent_class:
q_values = agents[0].step_tensor(norm_state_ten, use_actor=True).detach().numpy()
for idx, agent in enumerate(agents):
if agent.type != "RL": continue
q_values_list[idx].append(q_values[idx])
else:
for idx, agent in enumerate(agents):
if agent.type != "RL": continue
q_values = agent.step_tensor(norm_state_ten, use_actor=True).detach().numpy()
q_values_list[idx].append(q_values)
elif algorithm == "ddpg":
for agent in agents:
if agent.type == "RL":
new_action = agent.step_tensor(norm_state_ten,
use_actor = True,
add_ou = add_random_process)
agent_actions_list.append( new_action )
new_action_dict = agent.output_tensor_to_action_dict(new_action)
agent_actions_dict[agent.name] = SU.backtransform_variables_in_dict(new_action_dict, inplace=True)
else:
agent_actions_list.append( agent.step_tensor(norm_state_ten) )
agent_actions_dict[agent.name] = agent.step(state)
elif algorithm == "baseline_rule-based":
for agent in agents:
agent_actions_dict[agent.name] = agent.step(state)
#
# send agent actions to the building object and obtaion the actions for COBS/eplus
actions.extend( building.obtain_cobs_actions( agent_actions_dict, state["timestep"]+TS_UNTIL_REGULATION ) )
#
# send actions to EnergyPlus and obtian the new state
norm_state_ten_last = norm_state_ten
last_state = state
timestep += 1
state = building.model_step(actions)
current_occupancy = next_occupancy
SU.fix_year_confussion(state)
SU.expand_state_next_occup(state, building, hyper_params.next_occ_horizont, ts_diff_in_min, building_occ)
current_energy_Wh = state["energy"] / 360
current_energy_kWh= current_energy_Wh / 1000.0
#
# modify state
norm_state_ten = SU.unnormalized_state_to_tensor(state, building)
#
# send current temp/humidity values for all rooms
# obtain number of manual setpoint changes
_, n_manual_stp_changes, target_temp_per_room = building_occ.manual_setpoint_changes(state['time'], state["temperature"], None, hyper_params.stp_reward_step_offset)
#
# reward computation
if hyper_params is None or hyper_params.reward_function == "sum_energy_mstpc":
mstpc_after_function = setpoint_activation_function(n_manual_stp_changes, hyper_params.stp_reward_function)
reward = reward_sum_econs_mstpc(
current_energy_kWh if hyper_params.energy_cons_in_kWh else current_energy_Wh,
mstpc_after_function,
LAMBDA_REWARD_ENERGY,
LAMBDA_REWARD_MANU_STP_CHANGES,
hyper_params.clip_econs_at,
hyper_params.soften_instead_of_clipping,
hyper_params.log_rwd_energy)
elif hyper_params.reward_function == "sum_emean_ediff_mstpc":
mstpc_after_function = setpoint_activation_function(n_manual_stp_changes, hyper_params.stp_reward_function)
reward = reward_sum_eMean_eDiff_mstpc(
current_energy_kWh if hyper_params.energy_cons_in_kWh else current_energy_Wh,
historical_econs_values,
mstpc_after_function,
LAMBDA_REWARD_ENERGY,
LAMBDA_REWARD_MANU_STP_CHANGES,
hyper_params.clip_econs_at,
hyper_params.soften_instead_of_clipping,
hyper_params.log_rwd_energy)
elif hyper_params.reward_function == "rulebased_roomtemp":
reward, target_temp_per_room = reward_fn_rulebased_roomtemp(state, building, hyper_params.stp_reward_step_offset)
reward = setpoint_activation_function(reward, hyper_params.stp_reward_function)
#elif hyper_params.reward_function == "rulebased_agent_output":
else:
reward, target_temp_per_room = reward_fn_rulebased_agent_output(state, agent_actions_dict, building, hyper_params.stp_reward_step_offset)
reward = setpoint_activation_function(reward, hyper_params.stp_reward_function)
# invert and scale reward and (maybe) add offset
reward = -hyper_params.reward_scale * reward + hyper_params.reward_offset
if not hyper_params is None and hyper_params.log_reward:
reward = - np.log(-reward + 1)
# add reward to output list for command-line outputs
reward_list.append(reward)
output_n_stp_ch.append(n_manual_stp_changes)
output_energy_Wh.append(current_energy_Wh)
#
# save (last_state, actions, reward, state) to replay buffer
rpb.add_transition(norm_state_ten_last, agent_actions_list, reward, norm_state_ten)
#
if algorithm == "ddqn":
# sample minibatch
b_state1, b_action, b_reward, b_state2 = rpb.sample_minibatch(BATCH_SIZE, False)
b_action = torch.tensor(b_action)
#
# loop over all [agent, critic]-pairs
if agents[0].shared_network_per_agent_class:
#
# compute y (i.e. the TD-target)
# Hint: s_{i+1} <- state2; s_i <- state1
agents[0].model_actor.zero_grad()
b_reward = b_reward.detach().expand(-1, len(agents) ).flatten()[:, np.newaxis]
# wrong: b_reward = b_reward.detach().repeat(len(agents), 1)
if hyper_params.ddqn_new:
next_a = agents[0].step_tensor(b_state2, use_actor = True).detach().max(dim=1).indices[:, np.newaxis]
y = b_reward + DISCOUNT_FACTOR * agents[0].step_tensor(b_state2, use_actor = False).gather(1, next_a)
else:
y = b_reward + DISCOUNT_FACTOR * agents[0].step_tensor(b_state2, use_actor = False).detach().max(dim=1).values[:, np.newaxis]
# compute Q for state1
q = agents[0].step_tensor(b_state1, use_actor = True).gather(1, b_action.flatten()[:, np.newaxis])
# update agent by minimizing the loss L
L = loss(q, y)
L.backward()
agents[0].optimizer_step()
#
# save outputs
output_loss_list.append(float(L.detach().numpy()))
# compute and store frobenius norms for the weights
ag_fnorm1, ag_fnorm2 = 0, 0
for p in agents[0].model_actor.parameters():
if len(p.shape) == 1: ag_fnorm2 += float(p.norm().detach().cpu().numpy())
else: ag_fnorm1 += float(p.norm().detach().cpu().numpy())
output_ag_frobnorm_mat_list.append( ag_fnorm1 )
output_ag_frobnorm_bia_list.append( ag_fnorm2 )
else:
for agent_id, agent in enumerate(agents):
if not agent.type == "RL":
continue
#
# compute y (i.e. the TD-target)
# Hint: s_{i+1} <- state2; s_i <- state1
agent.model_actor.zero_grad()
if hyper_params.ddqn_new:
next_a = agent.step_tensor(b_state2, use_actor = True).detach().max(dim=1).indices[:, np.newaxis].long()
y = b_reward.detach() + DISCOUNT_FACTOR * agent.step_tensor(b_state2, use_actor = False).gather(1, next_a)
else:
y = b_reward.detach() + DISCOUNT_FACTOR * agent.step_tensor(b_state2, use_actor = False).detach().max(dim=1).values[:, np.newaxis]
# compute Q for state1
q = agent.step_tensor(b_state1, use_actor = True).gather(1, b_action[:, agent_id][:, np.newaxis])
# update agent by minimizing the loss L
L = loss(q, y)
L.backward()
agent.optimizer_step()
#
# save outputs
output_loss_list.append(float(L.detach().numpy()))
# compute and store frobenius norms for the weights
ag_fnorm1, ag_fnorm2 = 0, 0
for p in agent.model_actor.parameters():
if len(p.shape) == 1: ag_fnorm2 += float(p.norm().detach().cpu().numpy())
else: ag_fnorm1 += float(p.norm().detach().cpu().numpy())
output_ag_frobnorm_mat_list.append( ag_fnorm1 )
output_ag_frobnorm_bia_list.append( ag_fnorm2 )
elif algorithm == "ddpg":
# sample minibatch
b_state1, b_action, b_action_cat, b_reward, b_state2 = rpb.sample_minibatch(BATCH_SIZE)
#
# loop over all [agent, critic]-pairs
for agent, critic in zip(agents, critics):
if agent.type != "RL":
continue
#
# compute y
# Hint: s_{i+1} <- state2; s_i <- state1
critic.model.zero_grad()
# 1. compute mu'(s_{i+1})
mu_list = [ aInnerLoop.step_tensor(b_state2, use_actor = False) for aInnerLoop in agents ]
# 2. compute y
q_st2 = critic.forward_tensor(b_state2, mu_list, no_target = False)
y = b_reward.detach() + DISCOUNT_FACTOR * q_st2
# compute Q for state1
q = critic.forward_tensor(b_state1, b_action_cat, no_target = True)
# update critic by minimizing the loss L
L = critic.compute_loss_and_optimize(q, y, no_backprop = evaluation_episode)
#
# update actor policies
# policy loss = J
mu_list = [ aInnerLoop.step_tensor(b_state1, add_ou = False) for aInnerLoop in agents ]
agent.model_actor.zero_grad()
policy_J = -critic.forward_tensor(b_state1, mu_list)
policy_J_mean = policy_J.mean()
if not evaluation_episode:
policy_J_mean.backward()
agent.optimizer_step()
#
# save outputs
output_loss_list.append(float(L.detach().numpy()))
output_q_st2_list.append(float(q_st2.detach().mean().numpy()))
output_J_mean_list.append(float(policy_J_mean.detach().numpy()))
# compute and store frobenius norms for the weights
cr_fnorm1, cr_fnorm2, ag_fnorm1, ag_fnorm2 = 0, 0, 0, 0
if extended_logging:
for p in critic.model.parameters():
if len(p.shape) == 1: cr_fnorm2 += float(p.cpu().norm().detach().cpu().numpy())
else: cr_fnorm1 += float(p.norm().detach().cpu().numpy())
for p in agent.model_actor.parameters():
if len(p.shape) == 1: ag_fnorm2 += float(p.norm().detach().cpu().numpy())
else: ag_fnorm1 += float(p.norm().detach().cpu().numpy())
output_cr_frobnorm_mat_list.append( cr_fnorm1 )
output_cr_frobnorm_bia_list.append( cr_fnorm2 )
output_ag_frobnorm_mat_list.append( ag_fnorm1 )
output_ag_frobnorm_bia_list.append( ag_fnorm2 )
#
# store detailed output, if extended logging is selected
if extended_logging and not sqloutput is None:
sqloutput.add_every_step_of_some_episodes( locals() )
if timestep % 200 == 0:
eval_ep_str = " " if evaluation_episode else "no"
rand_pr_add_str = " " if add_random_process else "no"
print(f"ep. {episode_number:3}, ts. {timestep:5}: {state['time']}, {eval_ep_str} eval ep., {rand_pr_add_str} rand. p. add.")
#
# update target networks
status_output_dict["target_network_update"] = False
if episode_number % TARGET_NETWORK_UPDATE_FREQ == 0:
if algorithm == "ddqn":
for agent in agents:
if agent.type != "RL": continue
agent.copy_weights_to_target()
status_output_dict["target_network_update"] = True
elif algorithm == "ddpg":
# update target critic
for critic in critics:
critic.update_target_network(TAU_TARGET_NETWORKS)
# update target network for actor
for agent in agents:
if agent.type != "RL": continue
agent.update_target_network(TAU_TARGET_NETWORKS)
status_output_dict["target_network_update"] = True
#
# status output dict postprocessing
status_output_dict["episode"] = episode_number
status_output_dict["lr"] = LEARNING_RATE
status_output_dict["tau"] = TAU_TARGET_NETWORKS
status_output_dict["lambda_energy"] = LAMBDA_REWARD_ENERGY
status_output_dict["lambda_manu_stp"] = LAMBDA_REWARD_MANU_STP_CHANGES
status_output_dict["reward_mean"] = np.mean(reward_list)
status_output_dict["reward_sum"] = np.sum(reward_list)
status_output_dict["sum_manual_stp_ch_n"] = np.sum(output_n_stp_ch)
status_output_dict["mean_manual_stp_ch_n"] = np.mean(output_n_stp_ch)
status_output_dict["current_energy_Wh_mean"] = np.mean(output_energy_Wh)
status_output_dict["current_energy_Wh_sum"] =
|
np.sum(output_energy_Wh)
|
numpy.sum
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 21:14:25 2015
@author: <NAME>, <NAME>
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
import h5py
import collections
from warnings import warn
from collections import Iterable
import numpy as np
import socket
from platform import platform
from .write_utils import INDICES_DTYPE, VALUES_DTYPE, get_aux_dset_slicing, clean_string_att, make_indices_matrix, \
Dimension, build_ind_val_matrices
from .io_utils import get_time_stamp
from .dtype_utils import contains_integers, validate_dtype
from ...__version__ import version as pycroscopy_version
__all__ = ['get_attr', 'get_h5_obj_refs', 'get_indices_for_region_ref', 'get_dimensionality', 'get_sort_order',
'get_auxiliary_datasets', 'get_attributes', 'get_group_refs', 'check_if_main', 'check_and_link_ancillary',
'copy_region_refs', 'get_all_main', 'get_unit_values', 'get_data_descriptor', 'check_for_matching_attrs'
'create_region_reference',
'copy_attributes', 'reshape_to_n_dims', 'link_h5_objects_as_attrs',
'link_h5_obj_as_alias',
'find_results_groups', 'get_formatted_labels', 'reshape_from_n_dims', 'find_dataset', 'print_tree',
'copy_main_attributes', 'create_empty_dataset', 'check_for_old', 'get_source_dataset',
'link_as_main', 'copy_reg_ref_reduced_dim', 'simple_region_ref_copy', 'write_book_keeping_attrs',
'is_editable_h5', 'write_ind_val_dsets', 'write_reduced_spec_dsets',
'write_simple_attrs', 'write_main_dataset', 'attempt_reg_ref_build', 'write_region_references',
'assign_group_index', 'clean_reg_ref', 'create_results_group', 'create_indexed_group'
]
if sys.version_info.major == 3:
unicode = str
# TODO: Next version should account for two objects being in different files!
def print_tree(parent, rel_paths=False, main_dsets_only=False):
"""
Simple function to recursively print the contents of an hdf5 group
Parameters
----------
parent : h5py.Group
HDF5 tree to print
rel_paths : (Optional) bool. Default = False
True - prints the relative paths for all elements.
False - prints a tree-like structure with only the element names
main_dsets_only : bool, optional. default=False
True - prints only groups and Main datasets
False - prints all dataset and group objects
Returns
-------
None
"""
def __print(name, obj):
show = True
if main_dsets_only:
show = False
if check_if_main(obj) or isinstance(obj, h5py.Group):
show = True
if not show:
return
if rel_paths:
print(name)
else:
levels = name.count('/')
curr_name = name[name.rfind('/') + 1:]
print(levels * ' ' + '├ ' + curr_name)
if isinstance(obj, h5py.Group):
print((levels + 1) * ' ' + len(curr_name) * '-')
print(parent.name)
parent.visititems(__print)
def get_all_main(parent, verbose=False):
"""
Simple function to recursively print the contents of an hdf5 group
Parameters
----------
parent : h5py.Group
HDF5 Group to search within
verbose : bool
If true, extra print statements are enabled
Returns
-------
main_list : list of h5py.Dataset
The datasets found in the file that meet the 'Main Data' criteria.
"""
if not isinstance(parent, (h5py.Group, h5py.File)):
raise TypeError('parent should be a h5py.File or h5py.Group object')
from .pycro_data import PycroDataset
main_list = list()
def __check(name, obj):
if verbose:
print(name, obj)
if isinstance(obj, h5py.Dataset):
if verbose:
print(name, 'is an HDF5 Dataset.')
ismain = check_if_main(obj)
if ismain:
if verbose:
print(name, 'is a `Main` dataset.')
main_list.append(PycroDataset(obj))
if verbose:
print('Checking the group {} for `Main` datasets.'.format(parent.name))
parent.visititems(__check)
return main_list
def get_auxiliary_datasets(h5_object, aux_dset_name=None):
"""
Returns auxiliary dataset objects associated with some DataSet through its attributes.
Note - region references will be ignored.
Parameters
----------
h5_object : h5py.Dataset, h5py.Group or h5py.File object
Dataset object reference.
aux_dset_name : str or list of strings, optional, default = all (DataSet.attrs).
Name of auxiliary Dataset objects to return.
Returns
-------
list of h5py.Reference of auxiliary dataset objects.
"""
if not isinstance(h5_object, (h5py.Dataset, h5py.Group, h5py.File)):
raise TypeError('h5_object should be a h5py.Dataset, h5py.Group or h5py.File object')
if aux_dset_name is None:
aux_dset_name = h5_object.attrs.keys()
else:
if isinstance(aux_dset_name, (str, unicode)):
aux_dset_name = [aux_dset_name]
if not isinstance(aux_dset_name, (list, tuple)):
raise TypeError('aux_dset_name should be a string or list / tuple of strings')
if not np.all([isinstance(x, (str, unicode)) for x in aux_dset_name]):
raise TypeError('aux_dset_name should be a string or list / tuple of strings')
data_list = list()
curr_name = None
try:
h5_file = h5_object.file
for curr_name in aux_dset_name:
h5_ref = h5_object.attrs[curr_name]
if isinstance(h5_ref, h5py.Reference) and isinstance(h5_file[h5_ref], h5py.Dataset) and not \
isinstance(h5_ref, h5py.RegionReference):
data_list.append(h5_file[h5_ref])
except KeyError:
raise KeyError('%s is not an attribute of %s' % (str(curr_name), h5_object.name))
return data_list
def get_attr(h5_object, attr_name):
"""
Returns the attribute from the h5py object
Parameters
----------
h5_object : h5py.Dataset, h5py.Group or h5py.File object
object whose attribute is desired
attr_name : str
Name of the attribute of interest
Returns
-------
att_val : object
value of attribute, in certain cases (byte strings or list of byte strings) reformatted to readily usable forms
"""
if not isinstance(h5_object, (h5py.Dataset, h5py.Group, h5py.File)):
raise TypeError('h5_object should be a h5py.Dataset, h5py.Group or h5py.File object')
if not isinstance(attr_name, (str, unicode)):
raise TypeError('attr_name should be a string')
if attr_name not in h5_object.attrs.keys():
raise KeyError("'{}' is not an attribute in '{}'".format(attr_name, h5_object.name))
att_val = h5_object.attrs.get(attr_name)
if isinstance(att_val, np.bytes_) or isinstance(att_val, bytes):
att_val = att_val.decode('utf-8')
elif type(att_val) == np.ndarray:
if sys.version_info.major == 3:
if att_val.dtype.type in [np.bytes_, np.object_]:
att_val = np.array([str(x, 'utf-8') for x in att_val])
return att_val
def get_attributes(h5_object, attr_names=None):
"""
Returns attribute associated with some DataSet.
Parameters
----------
h5_object : h5py.Dataset
Dataset object reference.
attr_names : string or list of strings, optional, default = all (DataSet.attrs).
Name of attribute object to return.
Returns
-------
Dictionary containing (name,value) pairs of attributes
"""
if not isinstance(h5_object, (h5py.Dataset, h5py.Group, h5py.File)):
raise TypeError('h5_object should be a h5py.Dataset, h5py.Group or h5py.File object')
if attr_names is None:
attr_names = h5_object.attrs.keys()
else:
if isinstance(attr_names, (str, unicode)):
attr_names = [attr_names]
if not isinstance(attr_names, (list, tuple)):
raise TypeError('attr_names should be a string or list / tuple of strings')
if not np.all([isinstance(x, (str, unicode)) for x in attr_names]):
raise TypeError('attr_names should be a string or list / tuple of strings')
att_dict = {}
for attr in attr_names:
try:
att_dict[attr] = get_attr(h5_object, attr)
except KeyError:
raise KeyError('%s is not an attribute of %s' % (str(attr), h5_object.name))
return att_dict
def get_region(h5_dset, reg_ref_name):
"""
Gets the region in a dataset specified by a region reference
Parameters
----------
h5_dset : h5py.Dataset
Dataset containing the region reference
reg_ref_name : str / unicode
Name of the region reference
Returns
-------
value : np.ndarray
Data specified by the region reference. Note that a squeeze is applied by default.
"""
if not isinstance(reg_ref_name, (str, unicode)):
raise TypeError('reg_ref_name should be a string')
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be of type h5py.Dataset')
# this may raise KeyErrors. Let it
reg_ref = h5_dset.attrs[reg_ref_name]
return np.squeeze(h5_dset[reg_ref])
def get_h5_obj_refs(obj_names, h5_refs):
"""
Given a list of H5 references and a list of names,
this method returns H5 objects corresponding to the names
Parameters
----------
obj_names : string or List of strings
names of target h5py objects
h5_refs : H5 object reference or List of H5 object references
list containing the target reference
Returns
-------
found_objects : List of HDF5 dataset references
Corresponding references
"""
from .pycro_data import PycroDataset
if isinstance(obj_names, (str, unicode)):
obj_names = [obj_names]
if not isinstance(obj_names, (list, tuple)):
raise TypeError('obj_names should be a string or list of strings')
if not np.all([isinstance(x, (str, unicode)) for x in obj_names]):
raise TypeError('obj_names should be a string or list / tuple of strings')
if isinstance(h5_refs, (h5py.File, h5py.Group, h5py.Dataset)):
h5_refs = [h5_refs]
if not isinstance(h5_refs, (list, tuple)):
raise TypeError('h5_refs should be a / list of h5py.Dataset, h5py.Group or h5py.File object(s)')
found_objects = []
for target_name in obj_names:
for h5_object in h5_refs:
if not isinstance(h5_object, (h5py.File, h5py.Group, h5py.Dataset)):
continue
if h5_object.name.split('/')[-1] == target_name:
try:
found_objects.append(PycroDataset(h5_object))
except TypeError:
found_objects.append(h5_object)
return found_objects
def get_group_refs(group_name, h5_refs):
"""
Given a list of H5 references and a group name,
this method returns H5 Datagroup object corresponding to the names.
This function is especially useful when the suffix of the written group
is unknown (due to the autoindexing in HDFwriter)
Parameters
----------
group_name : unicode / string
Name of the datagroup. If the index suffix is left out, all groups matching the basename will be returned
Example - provide 'SourceDataset_ProcessName'
if a specific group is required, provide - 'SourceDataset_ProcessName_017'
h5_refs : list
List of h5 object references
Returns
-------
group_list : list
A list of h5py.Group objects whose name matched with the provided group_name
"""
if not isinstance(group_name, (str, unicode)):
raise TypeError('group_name must be a string')
if not isinstance(h5_refs, (list, tuple)):
raise TypeError('h5_refs should be a list or tuple')
group_list = list()
for h5_object in h5_refs:
if not isinstance(h5_object, h5py.Group):
warn('Ignoring object of type: {}. Expected h5py.Group object'.format(type(h5_object)))
continue
if h5_object.name.split('/')[-1].startswith(group_name):
group_list.append(h5_object)
return group_list
def find_dataset(h5_group, dset_name):
"""
Uses visit() to find all datasets with the desired name
Parameters
----------
h5_group : h5py.Group
Group to search within for the Dataset
dset_name : str
Name of the dataset to search for
Returns
-------
datasets : list
List of [Name, object] pairs corresponding to datasets that match `ds_name`.
"""
from .pycro_data import PycroDataset
if not isinstance(h5_group, (h5py.File, h5py.Group)):
raise TypeError('h5_group should be a h5py.File or h5py.Group object')
if not isinstance(dset_name, (str, unicode)):
raise TypeError('dset_name should be a string')
# print 'Finding all instances of', ds_name
datasets = []
def __find_name(name, obj):
if dset_name in name.split('/')[-1] and isinstance(obj, h5py.Dataset):
try:
datasets.append(PycroDataset(obj))
except TypeError:
datasets.append(obj)
return
h5_group.visititems(__find_name)
return datasets
def find_results_groups(h5_main, tool_name):
"""
Finds a list of all groups containing results of the process of name tool_name being applied to the dataset
Parameters
----------
h5_main : h5 dataset reference
Reference to the target dataset to which the tool was applied
tool_name : String / unicode
Name of the tool applied to the target dataset
Returns
-------
groups : list of references to h5 group objects
groups whose name contains the tool name and the dataset name
"""
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
if not isinstance(tool_name, (str, unicode)):
raise TypeError('tool_name should be a string')
dset_name = h5_main.name.split('/')[-1]
h5_parent_group = h5_main.parent
groups = []
for key in h5_parent_group.keys():
if dset_name in key and tool_name in key and isinstance(h5_parent_group[key], h5py.Group):
groups.append(h5_parent_group[key])
return groups
def get_indices_for_region_ref(h5_main, ref, return_method='slices'):
"""
Given an hdf5 region reference and the dataset it refers to,
return an array of indices within that dataset that
correspond to the reference.
Parameters
----------
h5_main : HDF5 Dataset
dataset that the reference can be returned from
ref : HDF5 Region Reference
Region reference object
return_method : {'slices', 'corners', 'points'}
slices : the reference is return as pairs of slices
corners : the reference is returned as pairs of corners representing
the starting and ending indices of each block
points : the reference is returns as a list of tuples of points
Returns
-------
ref_inds : Numpy Array
array of indices in the source dataset that ref accesses
"""
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
if not isinstance(ref, h5py.RegionReference):
raise TypeError('ref should be a h5py.RegionReference object')
if return_method is not None:
if not isinstance(return_method, (str, unicode)):
raise TypeError('return_method should be a string')
if return_method == 'points':
def __corners_to_point_array(start, stop):
"""
Convert a pair of tuples representing two opposite corners of an HDF5 region reference
into a list of arrays for each dimension.
Parameters
----------
start : Tuple
the starting indices of the region
stop : Tuple
the final indices of the region
Returns
-------
inds : Tuple of arrays
the list of points in each dimension
"""
ranges = []
for i in range(len(start)):
if start[i] == stop[i]:
ranges.append([stop[i]])
else:
ranges.append(np.arange(start[i], stop[i] + 1, dtype=np.uint))
grid = np.meshgrid(*ranges, indexing='ij')
ref_inds = np.asarray(zip(*(x.flat for x in grid)))
return ref_inds
return_func = __corners_to_point_array
elif return_method == 'corners':
def __corners_to_corners(start, stop):
return start, stop
return_func = __corners_to_corners
elif return_method == 'slices':
def __corners_to_slices(start, stop):
"""
Convert a pair of tuples representing two opposite corners of an HDF5 region reference
into a pair of slices.
Parameters
----------
start : Tuple
the starting indices of the region
stop : Tuple
the final indices of the region
Returns
-------
slices : list
pair of slices representing the region
"""
slices = []
for idim in range(len(start)):
slices.append(slice(start[idim], stop[idim]))
return slices
return_func = __corners_to_slices
region = h5py.h5r.get_region(ref, h5_main.id)
reg_type = region.get_select_type()
if reg_type == 2:
"""
Reference is hyperslabs
"""
ref_inds = []
for start, end in region.get_select_hyper_blocklist():
ref_inds.append(return_func(start, end))
ref_inds = np.array(ref_inds).reshape(-1, len(start))
elif reg_type == 3:
"""
Reference is single block
"""
start, end = region.get_select_bounds()
ref_inds = return_func(start, end)
else:
warn('No method currently exists for converting this type of reference.')
ref_inds = np.empty(0)
return ref_inds
def check_and_link_ancillary(h5_dset, anc_names, h5_main=None, anc_refs=None):
"""
This function will add references to auxilliary datasets as attributes
of an input dataset.
If the entries in anc_refs are valid references, they will be added
as attributes with the name taken from the corresponding entry in
anc_names.
If an entry in anc_refs is not a valid reference, the function will
attempt to get the attribute with the same name from the h5_main
dataset
Parameters
----------
h5_dset : HDF5 Dataset
dataset to which the attributes will be written
anc_names : list of str
the attribute names to be used
h5_main : HDF5 Dataset, optional
dataset from which attributes will be copied if `anc_refs` is None
anc_refs : list of HDF5 Object References, optional
references that correspond to the strings in `anc_names`
Returns
-------
None
Notes
-----
Either `h5_main` or `anc_refs` MUST be provided and `anc_refs` has the
higher priority if both are present.
"""
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be a h5py.Dataset object')
if not isinstance(anc_names, (list, tuple)):
raise TypeError('anc_names should be a list / tuple')
if h5_main is not None:
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
if anc_refs is not None:
if not isinstance(anc_refs, (list, tuple)):
raise TypeError('anc_refs should be a list / tuple')
def __check_and_link_single(h5_obj_ref, target_ref_name):
if isinstance(h5_obj_ref, h5py.Reference):
h5_dset.attrs[target_ref_name] = h5_obj_ref
elif isinstance(h5_obj_ref, h5py.Dataset):
h5_dset.attrs[target_ref_name] = h5_obj_ref.ref
elif h5_main is not None:
h5_anc = get_auxiliary_datasets(h5_main, aux_dset_name=[target_ref_name])
if len(h5_anc) == 1:
link_h5_obj_as_alias(h5_dset, h5_anc[0], target_ref_name)
else:
warnstring = '{} is not a valid h5py Reference and will be skipped.'.format(repr(h5_obj_ref))
warn(warnstring)
if bool(np.iterable(anc_refs) and not isinstance(anc_refs, h5py.Dataset)):
"""
anc_refs can be iterated over
"""
for ref_name, h5_ref in zip(anc_names, anc_refs):
__check_and_link_single(h5_ref, ref_name)
elif anc_refs is not None:
"""
anc_refs is just a single value
"""
__check_and_link_single(anc_refs, anc_names)
elif isinstance(anc_names, str) or isinstance(anc_names, unicode):
"""
Single name provided
"""
__check_and_link_single(None, anc_names)
else:
"""
Iterable of names provided
"""
for name in anc_names:
__check_and_link_single(None, name)
h5_dset.file.flush()
def create_region_reference(h5_main, ref_inds):
"""
Create a region reference in the destination dataset using an iterable of pairs of indices
representing the start and end points of a hyperslab block
Parameters
----------
h5_main : HDF5 dataset
dataset the region will be created in
ref_inds : Iterable
index pairs, [start indices, final indices] for each block in the
hyperslab
Returns
-------
new_ref : HDF5 Region reference
reference in `h5_main` for the blocks of points defined by `ref_inds`
"""
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
if not isinstance(ref_inds, Iterable):
raise TypeError('ref_inds should be a list or tuple')
h5_space = h5_main.id.get_space()
h5_space.select_none()
for start, stop in ref_inds:
block = stop - start + 1
h5_space.select_hyperslab(tuple(start), (1, 1), block=tuple(block), op=1)
if not h5_space.select_valid():
warn('Could not create new region reference.')
return None
new_ref = h5py.h5r.create(h5_main.id, b'.', h5py.h5r.DATASET_REGION, space=h5_space)
return new_ref
def get_data_descriptor(h5_dset):
"""
Returns a string of the form 'quantity (unit)'
Parameters
----------
h5_dset : h5py.Dataset object
A 'main' dataset in pycroscopy
Returns
-------
descriptor : String
string of the form 'quantity (unit)'
"""
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be a h5py.Dataset object')
try:
quant = get_attr(h5_dset, 'quantity')
except KeyError:
quant = 'unknown quantity'
try:
unit = get_attr(h5_dset, 'units')
except KeyError:
unit = 'unknown units'
return '{} ({})'.format(quant, unit)
def get_formatted_labels(h5_dset):
"""
Takes any dataset which has the labels and units attributes and returns a list of strings
formatted as 'label k (unit k)'
Parameters
----------
h5_dset : h5py.Dataset object
dataset which has labels and units attributes
Returns
-------
labels : list
list of strings formatted as 'label k (unit k)'
"""
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be a h5py.Dataset object')
try:
labs = get_attr(h5_dset, 'labels')
try:
units = get_attr(h5_dset, 'units')
except KeyError:
warn('units attribute was missing')
units = ['' for _ in labs]
if len(labs) != len(units):
raise ValueError('Labels and units have different sizes!\n\tLabels:{}, units:{}'.format(labs, units))
labels = []
for lab, unit in zip(labs, units):
labels.append('{} ({})'.format(lab, unit))
return labels
except KeyError:
raise KeyError('labels attribute was missing')
def reshape_to_n_dims(h5_main, h5_pos=None, h5_spec=None, get_labels=False, verbose=False, sort_dims=False):
"""
Reshape the input 2D matrix to be N-dimensions based on the
position and spectroscopic datasets.
Parameters
----------
h5_main : HDF5 Dataset
2D data to be reshaped
h5_pos : HDF5 Dataset, optional
Position indices corresponding to rows in `h5_main`
h5_spec : HDF5 Dataset, optional
Spectroscopic indices corresponding to columns in `h5_main`
get_labels : bool, optional
Whether or not to return the dimension labels. Default False
verbose : bool, optional
Whether or not to print debugging statements
sort_dims : bool
If True, the data is sorted so that the dimensions are in order from fastest to slowest
If False, the data is kept in the original order
If `get_labels` is also True, the labels are sorted as well.
Returns
-------
ds_Nd : N-D numpy array
N dimensional numpy array arranged as [positions slowest to fastest, spectroscopic slowest to fastest]
success : boolean or string
True if full reshape was successful
"Positions" if it was only possible to reshape by
the position dimensions
False if no reshape was possible
ds_labels : list of str
List of the labels of each dimension of `ds_Nd`
Notes
-----
If either `h5_pos` or `h5_spec` are not provided, the function will first
attempt to find them as attributes of `h5_main`. If that fails, it will
generate dummy values for them.
"""
# TODO: sort_dims does not appear to do much. Functions as though it was always True
if h5_pos is None and h5_spec is None:
if not check_if_main(h5_main):
raise ValueError('if h5_main is a h5py.Dataset it should be a Main dataset')
else:
if not isinstance(h5_main, (h5py.Dataset, np.ndarray)):
raise TypeError('h5_main should either be a h5py.Dataset or numpy array')
if h5_pos is not None:
if not isinstance(h5_pos, (h5py.Dataset, np.ndarray)):
raise TypeError('h5_pos should either be a h5py.Dataset or numpy array')
if h5_pos.shape[0] != h5_main.shape[0]:
raise ValueError('The size of h5_pos: {} does not match with h5_main: {}'.format(h5_pos.shape,
h5_main.shape))
if h5_spec is not None:
if not isinstance(h5_spec, (h5py.Dataset, np.ndarray)):
raise TypeError('h5_spec should either be a h5py.Dataset or numpy array')
if h5_spec.shape[1] != h5_main.shape[1]:
raise ValueError('The size of h5_spec: {} does not match with h5_main: {}'.format(h5_spec.shape,
h5_main.shape))
pos_labs = np.array(['Positions'])
spec_labs = np.array(['Spectral_Step'])
if h5_pos is None:
"""
Get the Position datasets from the references if possible
"""
if isinstance(h5_main, h5py.Dataset):
try:
h5_pos = h5_main.file[h5_main.attrs['Position_Indices']]
ds_pos = h5_pos[()]
pos_labs = get_attr(h5_pos, 'labels')
except KeyError:
print('No position datasets found as attributes of {}'.format(h5_main.name))
if len(h5_main.shape) > 1:
ds_pos = np.arange(h5_main.shape[0], dtype=INDICES_DTYPE).reshape(-1, 1)
pos_labs = np.array(['Position Dimension {}'.format(ipos) for ipos in range(ds_pos.shape[1])])
else:
ds_pos = np.array(0, dtype=INDICES_DTYPE).reshape(-1, 1)
else:
ds_pos = np.arange(h5_main.shape[0], dtype=INDICES_DTYPE).reshape(-1, 1)
pos_labs = np.array(['Position Dimension {}'.format(ipos) for ipos in range(ds_pos.shape[1])])
elif isinstance(h5_pos, h5py.Dataset):
"""
Position Indices dataset was provided
"""
ds_pos = h5_pos[()]
pos_labs = get_attr(h5_pos, 'labels')
elif isinstance(h5_pos, np.ndarray):
ds_pos = np.atleast_2d(h5_pos)
pos_labs = np.array(['Position Dimension {}'.format(ipos) for ipos in range(ds_pos.shape[1])])
else:
raise TypeError('Position Indices must be either h5py.Dataset or None')
##################################################
if h5_spec is None:
"""
Get the Spectroscopic datasets from the references if possible
"""
if isinstance(h5_main, h5py.Dataset):
try:
h5_spec = h5_main.file[h5_main.attrs['Spectroscopic_Indices']]
ds_spec = h5_spec[()]
spec_labs = get_attr(h5_spec, 'labels')
except KeyError:
print('No spectroscopic datasets found as attributes of {}'.format(h5_main.name))
if len(h5_main.shape) > 1:
ds_spec = np.arange(h5_main.shape[1], dtype=INDICES_DTYPE).reshape([1, -1])
spec_labs = np.array(['Spectral Dimension {}'.format(ispec) for ispec in range(ds_spec.shape[0])])
else:
ds_spec = np.array(0, dtype=INDICES_DTYPE).reshape([1, 1])
else:
ds_spec = np.arange(h5_main.shape[1], dtype=INDICES_DTYPE).reshape([1, -1])
spec_labs = np.array(['Spectral Dimension {}'.format(ispec) for ispec in range(ds_spec.shape[0])])
elif isinstance(h5_spec, h5py.Dataset):
"""
Spectroscopic Indices dataset was provided
"""
ds_spec = h5_spec[()]
spec_labs = get_attr(h5_spec, 'labels')
elif isinstance(h5_spec, np.ndarray):
ds_spec = h5_spec
spec_labs = np.array(['Spectral Dimension {}'.format(ispec) for ispec in range(ds_spec.shape[0])])
else:
raise TypeError('Spectroscopic Indices must be either h5py.Dataset or None')
'''
Sort the indices from fastest to slowest
'''
pos_sort = get_sort_order(np.transpose(ds_pos))
spec_sort = get_sort_order(ds_spec)
if verbose:
print('Position dimensions:', pos_labs)
print('Position sort order:', pos_sort)
print('Spectroscopic Dimensions:', spec_labs)
print('Spectroscopic sort order:', spec_sort)
'''
Get the size of each dimension in the sorted order
'''
pos_dims = get_dimensionality(np.transpose(ds_pos), pos_sort)
spec_dims = get_dimensionality(ds_spec, spec_sort)
if verbose:
print('\nPosition dimensions (sort applied):', pos_labs[pos_sort])
print('Position dimensionality (sort applied):', pos_dims)
print('Spectroscopic dimensions (sort applied):', spec_labs[spec_sort])
print('Spectroscopic dimensionality (sort applied):', spec_dims)
ds_main = h5_main[()]
"""
Now we reshape the dataset based on those dimensions
numpy reshapes correctly when the dimensions are arranged from slowest to fastest.
Since the sort orders we have are from fastest to slowest, we need to reverse the orders
for both the position and spectroscopic dimensions
"""
try:
ds_Nd = np.reshape(ds_main, pos_dims[::-1] + spec_dims[::-1])
except ValueError:
warn('Could not reshape dataset to full N-dimensional form. Attempting reshape based on position only.')
try:
ds_Nd = np.reshape(ds_main, pos_dims[::-1] + [-1])
except ValueError:
warn('Reshape by position only also failed. Will keep dataset in 2d form.')
if get_labels:
return ds_main, False, ['Position', 'Spectral Step']
else:
return ds_main, False
# No exception
else:
if get_labels:
return ds_Nd, 'Positions', ['Position'] + spec_labs
else:
return ds_Nd, 'Positions'
all_labels = np.hstack((pos_labs[pos_sort][::-1],
spec_labs[spec_sort][::-1]))
if verbose:
print('\nAfter first reshape, labels are', all_labels)
print('Data shape is', ds_Nd.shape)
"""
Now we transpose the axes for both the position and spectroscopic dimensions
so that they are in the same order as in the index array
"""
swap_axes = list()
if sort_dims:
for lab in pos_labs[pos_sort]:
swap_axes.append(np.argwhere(all_labels == lab).squeeze())
for lab in spec_labs[spec_sort]:
swap_axes.append(np.argwhere(all_labels == lab).squeeze())
else:
for lab in pos_labs:
swap_axes.append(np.argwhere(all_labels == lab).squeeze())
for lab in spec_labs:
swap_axes.append(np.argwhere(all_labels == lab).squeeze())
swap_axes = np.array(swap_axes)
if verbose:
print('\nAxes will permuted in this order:', swap_axes)
print('New labels ordering:', all_labels[swap_axes])
ds_Nd = np.transpose(ds_Nd, swap_axes)
results = [ds_Nd, True]
if verbose:
print('Dataset now of shape:', ds_Nd.shape)
if get_labels:
'''
Get the labels in the proper order
'''
results.append(all_labels[swap_axes])
return results
def reshape_from_n_dims(data_n_dim, h5_pos=None, h5_spec=None, verbose=False):
"""
Reshape the input 2D matrix to be N-dimensions based on the
position and spectroscopic datasets.
Parameters
----------
data_n_dim : numpy.array
N dimensional numpy array arranged as [positions dimensions..., spectroscopic dimensions]
If h5_pos and h5_spec are not provided, this function will have to assume that the dimensions
are arranged as [positions slowest to fastest, spectroscopic slowest to fastest].
This restriction is removed if h5_pos and h5_spec are provided
h5_pos : HDF5 Dataset, numpy.array
Position indices corresponding to rows in the final 2d array
The dimensions should be arranged in terms of rate of change corresponding to data_n_dim.
In other words if data_n_dim had two position dimensions arranged as [pos_fast, pos_slow, spec_dim_1....],
h5_pos should be arranged as [pos_fast, pos_slow]
h5_spec : HDF5 Dataset, numpy. array
Spectroscopic indices corresponding to columns in the final 2d array
The dimensions should be arranged in terms of rate of change corresponding to data_n_dim.
In other words if data_n_dim had two spectral dimensions arranged as [pos_dim_1,..., spec_fast, spec_slow],
h5_spec should be arranged as [pos_slow, pos_fast]
verbose : bool, optional. Default = False
Whether or not to print log statements
Returns
-------
ds_2d : numpy.array
2 dimensional numpy array arranged as [positions, spectroscopic]
success : boolean or string
True if full reshape was successful
"Positions" if it was only possible to reshape by
the position dimensions
False if no reshape was possible
Notes
-----
If either `h5_pos` or `h5_spec` are not provided, the function will
assume the first dimension is position and the remaining are spectroscopic already
in order from fastest to slowest.
"""
if not isinstance(data_n_dim, np.ndarray):
raise TypeError('data_n_dim is not a numpy array')
if h5_spec is None and h5_pos is None:
raise ValueError('at least one of h5_pos or h5_spec must be specified for an attempt to reshape to 2D')
if data_n_dim.ndim < 2:
return data_n_dim, True
if h5_pos is None:
pass
elif isinstance(h5_pos, h5py.Dataset):
'''
Position Indices dataset was provided
'''
ds_pos = h5_pos[()]
elif isinstance(h5_pos, np.ndarray):
ds_pos = h5_pos
else:
raise TypeError('Position Indices must be either h5py.Dataset or None')
##################################################
if h5_spec is None:
pass
elif isinstance(h5_spec, h5py.Dataset):
'''
Spectroscopic Indices dataset was provided
'''
ds_spec = h5_spec[()]
elif isinstance(h5_spec, np.ndarray):
ds_spec = h5_spec
else:
raise TypeError('Spectroscopic Indices must be either h5py.Dataset or None')
if h5_spec is None and h5_pos is not None:
if verbose:
print('Spectral indices not provided but position indices provided.\n'
'Building spectral indices assuming that dimensions are arranged as slow -> fast')
pos_dims = get_dimensionality(ds_pos, index_sort=get_sort_order(ds_pos))
if not np.all([x in data_n_dim.shape for x in pos_dims]):
raise ValueError('Dimension sizes in pos_dims: {} do not exist in data_n_dim shape: '
'{}'.format(pos_dims, data_n_dim.shape))
spec_dims = [col for col in list(data_n_dim.shape[len(pos_dims):])]
if verbose:
print('data has dimensions: {}. Provided position indices had dimensions of size: {}. Spectral dimensions '
'will built with dimensions: {}'.format(data_n_dim.shape, pos_dims, spec_dims))
ds_spec = make_indices_matrix(spec_dims, is_position=False)
elif h5_pos is None and h5_spec is not None:
if verbose:
print('Position indices not provided but spectral indices provided.\n'
'Building position indices assuming that dimensions are arranged as slow -> fast')
spec_dims = get_dimensionality(ds_spec, index_sort=get_sort_order(ds_spec))
if not np.all([x in data_n_dim.shape for x in spec_dims]):
raise ValueError('Dimension sizes in spec_dims: {} do not exist in data_n_dim shape: '
'{}'.format(spec_dims, data_n_dim.shape))
pos_dims = [col for col in list(data_n_dim.shape[:data_n_dim.ndim-len(spec_dims)])]
if verbose:
print('data has dimensions: {}. Spectroscopic position indices had dimensions of size: {}. Position '
'dimensions will built with dimensions: {}'.format(data_n_dim.shape, spec_dims, pos_dims))
ds_pos = make_indices_matrix(pos_dims, is_position=True)
elif h5_spec is not None and h5_pos is not None:
assert ds_pos.shape[0] * ds_spec.shape[1] == np.product(data_n_dim.shape)
'''
Sort the indices from fastest to slowest
'''
pos_sort = get_sort_order(np.transpose(ds_pos))
spec_sort = get_sort_order(ds_spec)
if h5_spec is None:
spec_sort = spec_sort[::-1]
if h5_pos is None:
pos_sort = pos_sort[::-1]
if verbose:
print('Position sort order: {}'.format(pos_sort))
print('Spectroscopic sort order: {}'.format(spec_sort))
'''
Now we transpose the axes associated with the spectroscopic dimensions
so that they are in the same order as in the index array
'''
swap_axes = np.append(pos_sort[::-1], spec_sort[::-1] + len(pos_sort))
if verbose:
print('swap axes: {} to be applied to N dimensional data of shape {}'.format(swap_axes, data_n_dim.shape))
data_n_dim_2 = np.transpose(data_n_dim, swap_axes)
if verbose:
print('N dimensional data shape after axes swap: {}'.format(data_n_dim_2.shape))
'''
Now we reshape the dataset based on those dimensions
We must use the spectroscopic dimensions in reverse order
'''
try:
ds_2d = np.reshape(data_n_dim_2, [ds_pos.shape[0], ds_spec.shape[1]])
except ValueError:
raise ValueError('Could not reshape dataset to full N-dimensional form')
return ds_2d, True
def get_dimensionality(ds_index, index_sort=None):
"""
Get the size of each index dimension in a specified sort order
Parameters
----------
ds_index : 2D HDF5 Dataset or numpy array
Row matrix of indices
index_sort : Iterable of unsigned integers (Optional)
Sort that can be applied to dimensionality.
For example - Order of rows sorted from fastest to slowest
Returns
-------
sorted_dims : list of unsigned integers
Dimensionality of each row in ds_index. If index_sort is supplied, it will be in the sorted order
"""
if not isinstance(ds_index, (np.ndarray, h5py.Dataset)):
raise TypeError('ds_index should either be a numpy array or h5py.Dataset')
if ds_index.shape[0] > ds_index.shape[1]:
# must be spectroscopic like in shape (few rows, more cols)
ds_index = np.transpose(ds_index)
if index_sort is None:
index_sort = np.arange(ds_index.shape[0])
else:
if not contains_integers(index_sort, min_val=0):
raise ValueError('index_sort should contain integers > 0')
if np.array(index_sort).ndim != 1:
raise ValueError('index_sort should be a 1D array')
assert len(np.unique(index_sort)) <= ds_index.shape[0]
sorted_dims = [len(np.unique(row)) for row in np.array(ds_index, ndmin=2)[index_sort]]
return sorted_dims
def get_sort_order(ds_spec):
"""
Find how quickly the spectroscopic values are changing in each row
and the order of rows from fastest changing to slowest.
Parameters
----------
ds_spec : 2D HDF5 dataset or numpy array
Rows of indices to be sorted from fastest changing to slowest
Returns
-------
change_sort : List of unsigned integers
Order of rows sorted from fastest changing to slowest
"""
if not isinstance(ds_spec, (np.ndarray, h5py.Dataset)):
raise TypeError('ds_spec should either be a numpy array or h5py.Dataset')
if ds_spec.shape[0] > ds_spec.shape[1]:
# must be spectroscopic like in shape (few rows, more cols)
ds_spec = np.transpose(ds_spec)
change_count = [len(np.where([row[i] != row[i - 1] for i in range(len(row))])[0]) for row in ds_spec]
change_sort = np.argsort(change_count)[::-1]
return change_sort
def create_empty_dataset(source_dset, dtype, dset_name, h5_group=None, new_attrs=None, skip_refs=False):
"""
Creates an empty dataset in the h5 file based on the provided dataset in the same or specified group
Parameters
----------
source_dset : h5py.Dataset object
Source object that provides information on the group and shape of the dataset
dtype : dtype
Data type of the fit / guess datasets
dset_name : String / Unicode
Name of the dataset
h5_group : h5py.Group object, optional. Default = None
Group within which this dataset will be created
new_attrs : dictionary (Optional)
Any new attributes that need to be written to the dataset
skip_refs : boolean, optional
Should ObjectReferences and RegionReferences be skipped when copying attributes from the
`source_dset`
Returns
-------
h5_new_dset : h5py.Dataset object
Newly created dataset
"""
if not isinstance(source_dset, h5py.Dataset):
raise TypeError('source_deset should be a h5py.Dataset object')
_ = validate_dtype(dtype)
if new_attrs is not None:
if not isinstance(new_attrs, dict):
raise TypeError('new_attrs should be a dictionary')
else:
new_attrs = dict()
if h5_group is None:
h5_group = source_dset.parent
else:
if not isinstance(h5_group, (h5py.Group, h5py.File)):
raise TypeError('h5_group should be a h5py.Group or h5py.File object')
if not isinstance(dset_name, (str, unicode)):
raise TypeError('dset_name should be a string')
dset_name = dset_name.strip()
if len(dset_name) == 0:
raise ValueError('dset_name cannot be empty!')
if '-' in dset_name:
warn('dset_name should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(dset_name, dset_name.replace('-', '_')))
dset_name = dset_name.replace('-', '_')
if dset_name in h5_group.keys():
if isinstance(h5_group[dset_name], h5py.Dataset):
warn('A dataset named: {} already exists in group: {}'.format(dset_name, h5_group.name))
h5_new_dset = h5_group[dset_name]
# Make sure it has the correct shape and dtype
if any((source_dset.shape != h5_new_dset.shape, dtype != h5_new_dset.dtype)):
warn('Either the shape (existing: {} desired: {}) or dtype (existing: {} desired: {}) of the dataset '
'did not match with expectations. Deleting and creating a new one.'.format(h5_new_dset.shape,
source_dset.shape,
h5_new_dset.dtype,
dtype))
del h5_new_dset, h5_group[dset_name]
h5_new_dset = h5_group.create_dataset(dset_name, shape=source_dset.shape, dtype=dtype,
compression=source_dset.compression, chunks=source_dset.chunks)
else:
raise KeyError('{} is already a {} in group: {}'.format(dset_name, type(h5_group[dset_name]),
h5_group.name))
else:
h5_new_dset = h5_group.create_dataset(dset_name, shape=source_dset.shape, dtype=dtype,
compression=source_dset.compression, chunks=source_dset.chunks)
# This should link the ancillary datasets correctly
h5_new_dset = copy_attributes(source_dset, h5_new_dset, skip_refs=skip_refs)
h5_new_dset.attrs.update(new_attrs)
if check_if_main(h5_new_dset):
from .pycro_data import PycroDataset
h5_new_dset = PycroDataset(h5_new_dset)
# update book keeping attributes
write_book_keeping_attrs(h5_new_dset)
return h5_new_dset
def copy_attributes(source, dest, skip_refs=True):
# TODO: VERY confusing - why call copy_region_refs() AND copy region refs here???
"""
Copy attributes from one h5object to another
Parameters
----------
source : h5py.Dataset, h5py.Group, or h5py.File object
Object containing the desired attributes
dest : h5py.Dataset, h5py.Group, or h5py.File object
Object to which the attributes need to be copied to
skip_refs : bool, optional. default = True
Whether or not the references (dataset and region) should be skipped
"""
if not isinstance(source, (h5py.Dataset, h5py.Group, h5py.File)):
raise TypeError('source should be a h5py.Dataset, h5py.Group,or h5py.File object')
if not isinstance(dest, (h5py.Dataset, h5py.Group, h5py.File)):
raise TypeError('dest should be a h5py.Dataset, h5py.Group, or h5py.File object')
for att_name in source.attrs.keys():
att_val = get_attr(source, att_name)
"""
Don't copy references unless asked
"""
if isinstance(att_val, h5py.Reference):
if not skip_refs and not isinstance(dest, h5py.Dataset):
warn('Skipping region reference named: {}'.format(att_name))
continue
elif isinstance(att_val, h5py.RegionReference):
# """
# Dereference old reference, get the appropriate data
# slice and create new reference.
# """
# try:
# region = h5py.h5r.get_region(att_val, source.id)
#
# start, end = region.get_select_bounds()
# ref_slice = []
# for i in range(len(start)):
# if start[i] == end[i]:
# ref_slice.append(start[i])
# else:
# ref_slice.append(slice(start[i], end[i]))
# except:
# warn('Could not copy region reference:{} to {}'.format(att_name, dest.name))
# continue
#
# dest.attrs[att_name] = dest.regionref[tuple(ref_slice)]
continue
else:
dest.attrs[att_name] = att_val
continue
# everything else
dest.attrs[att_name] = clean_string_att(att_val)
if not skip_refs:
try:
copy_region_refs(source, dest)
except TypeError:
print('Could not copy region references to {}.'.format(dest.name))
return dest
def check_if_main(h5_main, verbose=False):
"""
Checks the input dataset to see if it has all the neccessary
features to be considered a Main dataset. This means it is
2D and has the following attributes
Position_Indices
Position_Values
Spectroscopic_Indices
Spectroscopic_Values
In addition the shapes of the ancillary matricies should match with that of h5_main
In addition, it should have the 'quantity' and 'units' attributes
Parameters
----------
h5_main : HDF5 Dataset
Dataset of interest
verbose : Boolean (Optional. Default = False)
Whether or not to print statements
Returns
-------
success : Boolean
True if all tests pass
"""
# Check that h5_main is a dataset
success = isinstance(h5_main, h5py.Dataset)
if not success:
if verbose:
print('{} is not an HDF5 Dataset object.'.format(h5_main))
return success
h5_name = h5_main.name.split('/')[-1]
# Check dimensionality
success = np.all([success, len(h5_main.shape) == 2])
if not success:
if verbose:
print('{} is not 2D.'.format(h5_name))
return success
# Check for Datasets
dset_names = ['Position_Indices', 'Position_Values',
'Spectroscopic_Indices', 'Spectroscopic_Values']
for name in dset_names:
try:
h5_anc_dset = h5_main.file[h5_main.attrs[name]]
success = np.all([success, isinstance(h5_anc_dset, h5py.Dataset)])
except:
if verbose:
print('{} not found as an attribute of {}.'.format(name, h5_name))
return False
attr_success = np.all([att in h5_main.attrs for att in ['quantity', 'units']])
if not attr_success:
if verbose:
print('{} does not have the mandatory "quantity" and "units" attributes'.format(h5_main.name))
return False
# Blindly linking four datasets is still not sufficient. The sizes need to match:
anc_shape_match = list()
h5_pos_inds = h5_main.file[h5_main.attrs['Position_Indices']]
h5_pos_vals = h5_main.file[h5_main.attrs['Position_Values']]
anc_shape_match.append(np.all(h5_pos_vals.shape == h5_pos_inds.shape))
for anc_dset in [h5_pos_vals, h5_pos_inds]:
anc_shape_match.append(
|
np.all(h5_main.shape[0] == anc_dset.shape[0])
|
numpy.all
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from copy import copy
import unittest
import numpy as np
import pandas as pd
from pandas import DataFrame
from numpy.testing import assert_equal
from trackpy.try_numba import NUMBA_AVAILABLE
from trackpy.utils import pandas_sort, pandas_concat
from trackpy.linking import (link, link_iter, link_df_iter, verify_integrity,
SubnetOversizeException, Linker, link_partial)
from trackpy.linking.subnetlinker import subnet_linker_recursive
from trackpy.tests.common import assert_traj_equal, StrictTestCase
path, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(path, 'data')
def random_walk(N):
return np.cumsum(np.random.randn(N))
def _skip_if_no_numba():
if not NUMBA_AVAILABLE:
raise unittest.SkipTest('numba not installed. Skipping.')
SKLEARN_AVAILABLE = True
try:
from sklearn.neighbors import BallTree
except ImportError:
SKLEARN_AVAILABLE = False
def _skip_if_no_sklearn():
if not SKLEARN_AVAILABLE:
raise unittest.SkipTest('Scikit-learn not installed. Skipping.')
def unit_steps():
return pd.DataFrame(dict(x=np.arange(5), y=5, frame=np.arange(5)))
random_x = np.random.randn(5).cumsum()
random_x -= random_x.min() # All x > 0
max_disp = np.abs(np.diff(random_x)).max()
def random_walk_legacy():
return pd.DataFrame(dict(x=random_x, y=0, frame=np.arange(5)))
def contracting_grid():
"""Two frames with a grid of 441 points.
In the second frame, the points contract, so that the outermost set
coincides with the second-outermost set in the previous frame.
This is a way to challenge (and/or stump) a subnet solver.
"""
pts0x, pts0y = np.mgrid[-10:11, -10:11] * 2.
pts0 = pd.DataFrame(dict(x=pts0x.flatten(), y=pts0y.flatten(),
frame=0))
pts1 = pts0.copy()
pts1.frame = 1
pts1.x = pts1.x * 0.9
pts1.y = pts1.y * 0.9
allpts = pandas_concat([pts0, pts1], ignore_index=True)
allpts.x += 200 # Because BTree doesn't allow negative coordinates
allpts.y += 200
return allpts
class CommonTrackingTests(StrictTestCase):
def setUp(self):
self.linker_opts = dict(link_strategy='recursive')
def test_one_trivial_stepper(self):
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link(f, 5)
assert_traj_equal(actual, expected)
def test_output_dtypes(self):
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': np.arange(N)})
# Integer-typed input
f['frame'] = f['frame'].astype(np.int)
actual = self.link(f, 5)
# Particle and frame columns should be integer typed
assert np.issubdtype(actual['particle'], np.integer)
assert np.issubdtype(actual['frame'], np.integer)
# Float-typed input
f['frame'] = f['frame'].astype(np.float)
actual = self.link(f, 5)
# Particle and frame columns should be integer typed
assert np.issubdtype(actual['particle'], np.integer)
assert np.issubdtype(actual['frame'], np.integer)
def test_two_isolated_steppers(self):
N = 5
Y = 25
# Begin second feature one frame later than the first, so the particle
# labeling (0, 1) is established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
f = pandas_concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
pandas_sort(expected, ['particle', 'frame'], inplace=True)
actual = self.link(f, 5)
assert_traj_equal(actual, expected)
# Sort rows by frame (normal use)
actual = self.link(pandas_sort(f, 'frame'), 5)
assert_traj_equal(actual, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link(f1, 5)
assert_traj_equal(actual, expected)
def test_two_isolated_steppers_one_gapped(self):
N = 5
Y = 25
# Begin second feature one frame later than the first,
# so the particle labeling (0, 1) is established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': np.arange(N)})
a = a.drop(3).reset_index(drop=True)
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1),
'frame': np.arange(1, N)})
f = pandas_concat([a, b])
expected = f.copy()
expected['particle'] = np.concatenate([np.array([0, 0, 0, 2]), np.ones(N - 1)])
pandas_sort(expected, ['particle', 'frame'], inplace=True)
expected.reset_index(drop=True, inplace=True)
actual = self.link(f, 5)
assert_traj_equal(actual, expected)
# link_df_iter() tests not performed, because hash_size is
# not knowable from the first frame alone.
# Sort rows by frame (normal use)
actual = self.link(pandas_sort(f, 'frame'), 5)
assert_traj_equal(actual, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link(f1, 5)
assert_traj_equal(actual, expected)
def test_isolated_continuous_random_walks(self):
# Two 2D random walks
np.random.seed(0)
N = 30
Y = 250
M = 20 # margin, because negative values raise OutOfHash
a = DataFrame({'x': M + random_walk(N), 'y': M + random_walk(N), 'frame': np.arange(N)})
b = DataFrame({'x': M + random_walk(N - 1), 'y': M + Y + random_walk(N - 1), 'frame': np.arange(1, N)})
f = pandas_concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
pandas_sort(expected, ['particle', 'frame'], inplace=True)
actual = self.link(f, 5)
assert_traj_equal(actual, expected)
# Many 2D random walks
np.random.seed(0)
initial_positions = [(100, 100), (200, 100), (100, 200), (200, 200)]
import itertools
c = itertools.count()
def walk(x, y):
i = next(c)
return DataFrame({'x': x + random_walk(N - i),
'y': y + random_walk(N - i),
'frame': np.arange(i, N)})
f = pandas_concat([walk(*pos) for pos in initial_positions])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([i*np.ones(N - i) for i in range(len(initial_positions))])
pandas_sort(expected, ['particle', 'frame'], inplace=True)
actual = self.link(f, 5)
assert_traj_equal(actual, expected)
def test_start_at_frame_other_than_zero(self):
# One 1D stepper
N = 5
FIRST_FRAME = 3
f = DataFrame({'x':
|
np.arange(N)
|
numpy.arange
|
import rosbag
import sys
import pickle
import pdb
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Rectangle
from matplotlib.animation import FuncAnimation
import scipy.io as sio
import os
sys.path.append('../src/pyFun')
from tempfile import TemporaryFile
import glob
from nav_msgs.msg import Odometry as stateTB
from geometry_msgs.msg import Twist
from main import getMOMDP
from MOMDP import MOMDP, MOMDP_TOQ, MOMDP_TO, MOMDP_Q
matplotlib.rcParams.update({'font.size': 22})
# newest = max(glob.iglob('/home/drew/rosbag_exp/*.bag'), key=os.path.getctime)
# print("Open: ", newest)
# bag = rosbag.Bag(newest)
# bagNoBarrier = rosbag.Bag('/home/drew/rosbag_exp/_2020-11-23-13-22-12.bag')
# bagNoBarrier = rosbag.Bag('/home/drew/rosbag_exp/_2020-11-23-13-22-12.bag')
# bag = rosbag.Bag('/home/drew/rosbag_exp/_2020-11-23-13-24-26.bag')
# # Exp 1
# bagNoBarrier = rosbag.Bag('/home/drew/rosbag_exp/_2020-11-21-19-57-44.bag')
# bag = rosbag.Bag('/home/drew/rosbag_exp/_2020-11-21-19-59-41.bag')
#
# Exp 2
# bag = rosbag.Bag('/home/ugo/expDataFinal/expComp_2/_2020-11-21-20-16-55.bag')
# bagNoBarrier = rosbag.Bag('/home/ugo/expDataFinal/expComp_2/_2020-11-21-20-18-57.bag')
# # Exp 3
# bag = rosbag.Bag('/home/drew/rosbag_exp/_2020-11-23-11-31-39.bag')
# bagNoBarrier = rosbag.Bag('/home/drew/rosbag_exp/_2020-11-23-10-53-04.bag')
# # Exp 4
# bag = rosbag.Bag('/home/drew/rosbag_exp/_2020-11-23-13-45-54.bag')
# bagNoBarrier = rosbag.Bag('/home/drew/rosbag_exp/_2020-11-23-13-41-34.bag')
# Video 2 exp 2
# bag = rosbag.Bag('/home/ugo/segExp/test_2_video_2/_2020-11-23-13-49-16.bag')
# bagNoBarrier = rosbag.Bag('/home/segExp/ugo/test_2_video_2/_2020-11-23-13-47-15.bag')
# test 5
bag = rosbag.Bag('/home/ugo/expDataSeg/test_5/_2020-11-23-13-49-16.bag')
bagNoBarrier = rosbag.Bag('/home/ugo/expDataSeg/test_5/_2020-11-23-13-22-12.bag')
# # test 4
# bagNoBarrier = rosbag.Bag('/home/ugo/expDataSeg/test_4/_2020-11-23-10-53-04.bag')
# bag = rosbag.Bag('/home/ugo/expDataSeg/test_4/_2020-11-23-11-31-39.bag')
# test 3
bagNoBarrier = rosbag.Bag('/home/ugo/expDataSeg/test_3/_2020-11-23-13-47-15.bag')
bag = rosbag.Bag('/home/ugo/expDataSeg/test_3/_2020-11-23-13-49-16.bag')
x_start = 0.5
y_start = 4.5
dt_mpc = 0.05
def getPred(optSol):
xPred = []
yPred = []
thetaPred = []
vPred = []
thetaDotPred = []
psiPred = []
psiDotPred = []
u1Pred = []
u2Pred = []
nx = 7; nu = 2; N = 40;
for j in range(0,N+1):
xPred.append(optSol[j*nx + 0])
yPred.append(optSol[j*nx + 1])
thetaPred.append(optSol[j*nx + 2])
vPred.append(optSol[j*nx + 3])
thetaDotPred.append(optSol[j*nx + 4])
psiPred.append(optSol[j*nx + 5])
psiDotPred.append(optSol[j*nx + 6])
for j in range(0,N):
u1Pred.append(optSol[(N+1)*nx + j*nu + 0])
u2Pred.append(optSol[(N+1)*nx + j*nu + 1])
return xPred, yPred, thetaPred, vPred, thetaDotPred, psiPred, psiDotPred, u1Pred, u2Pred
input = 'y'#raw_input("Do you want to plot mid-level data? [y/n] ")
if input == 'y':
tmin = 7+9
tmax = 44+9
dt_ll = 1/800.0
h_val_noBarrier = []
delay_t_noBarrier = []
t_lowLevel_noBarrier = []
uTot_noBarrieri = []
timeCounter = []
for topic, msg, t in bagNoBarrier.read_messages(topics=['/cyberpod/ctrl_info']):
timeCounter.append((len(timeCounter))*dt_ll)
if (timeCounter[-1] > tmin) and (timeCounter[-1] < tmax):
delay_t_noBarrier.append(msg.data[0])
uTot_noBarrieri.append([msg.data[1], msg.data[2]])
h_val_noBarrier.append(msg.data[7])
t_lowLevel_noBarrier.append((len(t_lowLevel_noBarrier))*dt_ll)
uTot = []
uCBF = []
uMPC = []
h_val = []
t_lowLevel = []
delay_t = []
timeCounter = []
for topic, msg, t in bag.read_messages(topics=['/cyberpod/ctrl_info']):
timeCounter.append((len(timeCounter))*dt_ll)
if (timeCounter[-1] > tmin) and (timeCounter[-1] < tmax):
delay_t.append(msg.data[0])
uTot.append([msg.data[1], msg.data[2]])
uMPC.append([msg.data[3], msg.data[4]])
uCBF.append([msg.data[5], msg.data[6]])
h_val.append(msg.data[7])
t_lowLevel.append((len(t_lowLevel))*dt_ll)
plt.figure(figsize=(12,10))
plt.plot(t_lowLevel_noBarrier, h_val_noBarrier, '-r', label='naive MPC')
plt.plot(t_lowLevel, h_val, '-b', label='proposed strategy')
plt.plot([t_lowLevel[0], t_lowLevel[-1]], [0, 0],'-k')
plt.xlabel('Time [s]')
plt.ylabel('h(e)')
plt.legend(loc=0)
plt.ylim(-2,1)
plt.figure()
plt.plot(t_lowLevel_noBarrier, delay_t_noBarrier, '-r', label='naive MPC')
plt.plot(t_lowLevel, delay_t, '-b', label='proposed strategy')
plt.ylabel('delay')
plt.legend()
uMPC_array = np.array(uMPC)
uCBF_array = np.array(uCBF)
uTot_array = np.array(uTot)
plt.figure(figsize=(12,10))
plt.subplot(211)
plt.plot(t_lowLevel, uMPC_array[:, 0], '-r', label='mid-level input')
plt.plot(t_lowLevel, uCBF_array[:, 0], '-k', label='low-level input')
plt.plot(t_lowLevel, uTot_array[:, 0], '-b', label='total input')
plt.xlim(30.85,31.15)
plt.ylim(-4,2)
plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=6, fontsize=18, framealpha=1)
plt.subplot(212)
plt.plot(t_lowLevel, uMPC_array[:, 1], '-r', label='mid-level input')
plt.plot(t_lowLevel, uCBF_array[:, 1], '-k', label='low-level input')
plt.plot(t_lowLevel, uTot_array[:, 1], '-b', label='total input')
plt.ylabel('Input [N/m]')
plt.xlabel('Time [s]')
plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=6, fontsize=18, framealpha=1)
plt.xlim(30.85,31.15)
plt.ylim(-4,2)
uTot_noBarrieri_array = np.array(uTot_noBarrieri)
plt.figure(figsize=(12,10))
plt.subplot(211)
plt.plot(t_lowLevel_noBarrier, uTot_noBarrieri_array[:, 0], '-b', label='tot no barrier')
plt.subplot(212)
plt.plot(t_lowLevel_noBarrier, uTot_noBarrieri_array[:, 1], '-b', label='tot no barrier')
plt.ylabel('input')
plt.legend()
## =======================================================
## Read and plot INPUT
## =======================================================
inputVector = []
u1=[]
u2=[]
time_u = []
for topic, msg, t in bag.read_messages(topics=['/cyberpod/input']):
inputVector.append(msg.input)
if np.abs(msg.input[0]) < 20:
u1.append(msg.input[0])
u2.append(msg.input[1])
else:
u1.append(20.0)
u2.append(20.0)
time_u.append((len(time_u))*0.001)
plt.figure()
plt.plot(time_u, u1, label='u1')
plt.plot(time_u, u2, label='u2')
plt.ylabel('input')
plt.legend()
## =======================================================
## Read and plot STATE
## =======================================================
state = []
time_state = []
for topic, msg, t in bag.read_messages(topics=['/cyberpod/state']):
# state_t = [msg.x, msg.y, msg.theta, msg.v, msg.thetaDot, msg.psi, msg.psiDot]
state_t = [msg.state[0]+x_start, msg.state[1]+y_start, msg.state[2], msg.state[3], msg.state[4], msg.state[5], msg.state[6]]
state.append(state_t)
time_state.append((len(time_state))*0.001)
state_array = np.array(state)
# plt.figure()
# plt.subplot(711)
# plt.plot(time_state, state_array[:,0], label='x')
# plt.subplot(712)
# plt.plot(time_state, state_array[:,1], label='x')
# plt.subplot(713)
# plt.plot(time_state, state_array[:,2], label='x')
# plt.subplot(714)
# plt.plot(time_state, state_array[:,3], label='x')
# plt.subplot(715)
# plt.plot(time_state, state_array[:,4], label='x')
# plt.subplot(716)
# plt.plot(time_state, state_array[:,5], label='x')
# plt.subplot(717)
# plt.plot(time_state, state_array[:,6], label='x')
# plt.legend()
## =======================================================
## Read and plot PRED TRAJECTORY
## =======================================================
optSol = []
time_optSol = []
solverFlag = []
solverTime = []
xGoal = []
yGoal = []
xCurr = []
x_IC = []
for topic, msg, t in bag.read_messages(topics=['/cyberpod/optimal_sol']):
optSol.append(msg.optimalSolution)
time_optSol.append((len(time_optSol))*dt_mpc)
solverFlag.append(msg.solverFlag)
solverTime.append(msg.solverTime)
xGoal.append(msg.x)
yGoal.append(msg.y)
x_IC.append(msg.x_IC)
xCurr.append(msg.xCurr)
delay_ms = msg.delay_ms
error = []
print("================== delay_ms: ", delay_ms)
for i in range(1, len(xCurr)):
if delay_ms > -0.5:
error.append((np.array(xCurr[i])-np.array(optSol[i-1][0:7])).tolist())
else:
error.append((np.array(xCurr[i])-np.array(optSol[i-1][7:14])).tolist())
error_array =
|
np.array(error)
|
numpy.array
|
import re
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.ticker import FormatStrFormatter
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.animation as animation
from mpl_toolkits import axes_grid1
from DeepSparseCoding.tf1x.utils import data_processing as dp
def plot_ellipse(axis, center, shape, angle, color_val="auto", alpha=1.0, lines=False,
fill_ellipse=False):
"""
Add an ellipse to given axis
Inputs:
axis [matplotlib.axes._subplots.AxesSubplot] axis on which ellipse should be drawn
center [tuple or list] specifying [y, x] center coordinates
shape [tuple or list] specifying [width, height] shape of ellipse
angle [float] specifying angle of ellipse
color_val [matplotlib color spec] specifying the color of the edge & face of the ellipse
alpha [float] specifying the transparency of the ellipse
lines [bool] if true, output will be a line, where the secondary axis of the ellipse
is collapsed
fill_ellipse [bool] if true and lines is false then a filled ellipse will be plotted
Outputs:
ellipse [matplotlib.patches.ellipse] ellipse object
"""
if fill_ellipse:
face_color_val = "none" if color_val=="auto" else color_val
else:
face_color_val = "none"
y_cen, x_cen = center
width, height = shape
if lines:
min_length = 0.1
if width < height:
width = min_length
elif width > height:
height = min_length
ellipse = matplotlib.patches.Ellipse(xy=[x_cen, y_cen], width=width,
height=height, angle=angle, edgecolor=color_val, facecolor=face_color_val,
alpha=alpha, fill=True)
axis.add_artist(ellipse)
ellipse.set_clip_box(axis.bbox)
return ellipse
def plot_ellipse_summaries(bf_stats, num_bf=-1, lines=False, rand_bf=False):
"""
Plot basis functions with summary ellipses drawn over them
Inputs:
bf_stats [dict] output of dp.get_dictionary_stats()
num_bf [int] number of basis functions to plot (<=0 is all; >total is all)
lines [bool] If true, will plot lines instead of ellipses
rand_bf [bool] If true, will choose a random set of basis functions
"""
tot_num_bf = len(bf_stats["basis_functions"])
if num_bf <= 0 or num_bf > tot_num_bf:
num_bf = tot_num_bf
SFs = np.asarray([np.sqrt(fcent[0]**2 + fcent[1]**2)
for fcent in bf_stats["fourier_centers"]], dtype=np.float32)
sf_sort_indices = np.argsort(SFs)
if rand_bf:
bf_range = np.random.choice([i for i in range(tot_num_bf)], num_bf, replace=False)
num_plots_y = int(np.ceil(np.sqrt(num_bf)))
num_plots_x = int(np.ceil(np.sqrt(num_bf)))
gs = gridspec.GridSpec(num_plots_y, num_plots_x)
fig = plt.figure(figsize=(17,17))
filter_idx = 0
for plot_id in np.ndindex((num_plots_y, num_plots_x)):
ax = clear_axis(fig.add_subplot(gs[plot_id]))
if filter_idx < tot_num_bf and filter_idx < num_bf:
if rand_bf:
bf_idx = bf_range[filter_idx]
else:
bf_idx = filter_idx
bf = bf_stats["basis_functions"][bf_idx]
ax.imshow(bf, interpolation="Nearest", cmap="Greys_r")
ax.set_title(str(bf_idx), fontsize="8")
center = bf_stats["gauss_centers"][bf_idx]
evals, evecs = bf_stats["gauss_orientations"][bf_idx]
orientations = bf_stats["fourier_centers"][bf_idx]
angle = np.rad2deg(np.pi/2 + np.arctan2(*orientations))
alpha = 1.0
ellipse = plot_ellipse(ax, center, evals, angle, color_val="b", alpha=alpha, lines=lines)
filter_idx += 1
ax.set_aspect("equal")
plt.show()
return fig
def plot_pooling_summaries(bf_stats, pooling_filters, num_pooling_filters,
num_connected_weights=None, lines=False, figsize=None):
"""
Plot 2nd layer (fully-connected) weights in terms of connection strengths to 1st layer weights
Inputs:
bf_stats [dict] output of dp.get_dictionary_stats() which was run on the 1st layer weights
pooling_filters [np.ndarray] 2nd layer weights
should be shape [num_1st_layer_neurons, num_2nd_layer_neurons]
num_pooling_filters [int] How many 2nd layer neurons to plot
num_connected_weights [int] How many 1st layer weight summaries to include
for a given 2nd layer neuron
lines [bool] if True, 1st layer weight summaries will appear as lines instead of ellipses
"""
num_inputs = bf_stats["num_inputs"]
num_outputs = bf_stats["num_outputs"]
tot_pooling_filters = pooling_filters.shape[1]
patch_edge_size = np.int32(np.sqrt(num_inputs))
filter_idx_list = np.arange(num_pooling_filters, dtype=np.int32)
assert num_pooling_filters <= num_outputs, (
"num_pooling_filters must be less than or equal to bf_stats['num_outputs']")
if num_connected_weights is None:
num_connected_weights = num_inputs
cmap = plt.get_cmap('bwr')
cNorm = matplotlib.colors.SymLogNorm(linthresh=0.03, linscale=0.01, vmin=-1.0, vmax=1.0)
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap)
num_plots_y = np.int32(np.ceil(np.sqrt(num_pooling_filters)))
num_plots_x = np.int32(np.ceil(np.sqrt(num_pooling_filters)))+1 # +cbar col
gs_widths = [1 for _ in range(num_plots_x-1)]+[0.3]
gs = gridspec.GridSpec(num_plots_y, num_plots_x, width_ratios=gs_widths)
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=(17,17))
filter_total = 0
for plot_id in np.ndindex((num_plots_y, num_plots_x-1)):
(y_id, x_id) = plot_id
ax = fig.add_subplot(gs[plot_id])
if (filter_total < num_pooling_filters and x_id != num_plots_x-1):
ax = clear_axis(ax, spines="k")
filter_idx = filter_idx_list[filter_total]
example_filter = pooling_filters[:, filter_idx]
top_indices = np.argsort(np.abs(example_filter))[::-1] #descending
filter_norm = np.max(np.abs(example_filter))
SFs = np.asarray([np.sqrt(fcent[0]**2 + fcent[1]**2)
for fcent in bf_stats["fourier_centers"]], dtype=np.float32)
# Plot weakest of the top connected filters first because of occlusion
for bf_idx in top_indices[:num_connected_weights][::-1]:
connection_strength = example_filter[bf_idx]/filter_norm
color_val = scalarMap.to_rgba(connection_strength)
center = bf_stats["gauss_centers"][bf_idx]
evals, evecs = bf_stats["gauss_orientations"][bf_idx]
angle = np.rad2deg(np.pi/2 + bf_stats["ellipse_orientations"][bf_idx])
alpha = 0.5
ellipse = plot_ellipse(ax, center, evals, angle, color_val, alpha=alpha, lines=lines)
ax.set_xlim(0, patch_edge_size-1)
ax.set_ylim(patch_edge_size-1, 0)
filter_total += 1
else:
ax = clear_axis(ax, spines="none")
ax.set_aspect("equal")
scalarMap._A = []
ax = clear_axis(fig.add_subplot(gs[0, -1]))
cbar = fig.colorbar(scalarMap, ax=ax, ticks=[-1, 0, 1])
cbar.ax.set_yticklabels(["-1", "0", "1"])
for label in cbar.ax.yaxis.get_ticklabels():
label.set_weight("bold")
label.set_fontsize(14)
plt.show()
return fig
def plot_pooling_centers(bf_stats, pooling_filters, num_pooling_filters, num_connected_weights=None,
filter_indices=None, spot_size=10, figsize=None):
"""
Plot 2nd layer (fully-connected) weights in terms of spatial/frequency centers of
1st layer weights
Inputs:
bf_stats [dict] Output of dp.get_dictionary_stats() which was run on the 1st layer weights
pooling_filters [np.ndarray] 2nd layer weights
should be shape [num_1st_layer_neurons, num_2nd_layer_neurons]
num_pooling_filters [int] How many 2nd layer neurons to plot
num_connected_weights [int] How many 1st layer neurons to plot
spot_size [int] How big to make the points
filter_indices [list] indices to plot from pooling_filters. len should equal num_pooling_filters
set to None for default, which is a random selection
figsize [tuple] Containing the (width, height) of the figure, in inches.
Set to None for default figure size
"""
num_filters_y = int(np.ceil(np.sqrt(num_pooling_filters)))
num_filters_x = int(np.ceil(np.sqrt(num_pooling_filters)))
tot_pooling_filters = pooling_filters.shape[1]
if filter_indices is None:
filter_indices = np.random.choice(tot_pooling_filters, num_pooling_filters, replace=False)
else:
assert len(filter_indices) == num_pooling_filters, (
"len(filter_indices) must equal num_pooling_filters")
if num_connected_weights is None:
num_connected_weights = bf_stats["num_inputs"]
cmap = plt.get_cmap(bgr_colormap())# Could also use "nipy_spectral", "coolwarm", "bwr"
cNorm = matplotlib.colors.SymLogNorm(linthresh=0.03, linscale=0.01, vmin=-1.0, vmax=1.0)
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap)
x_p_cent = [x for (y,x) in bf_stats["gauss_centers"]]# Get raw points
y_p_cent = [y for (y,x) in bf_stats["gauss_centers"]]
x_f_cent = [x for (y,x) in bf_stats["fourier_centers"]]
y_f_cent = [y for (y,x) in bf_stats["fourier_centers"]]
max_sf = np.max(np.abs(x_f_cent+y_f_cent))
pair_w_gap = 0.01
group_w_gap = 0.03
h_gap = 0.03
plt_w = (num_filters_x/num_pooling_filters)
plt_h = plt_w
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize) #figsize is (w,h)
axes = []
filter_id = 0
for plot_id in np.ndindex((num_filters_y, num_filters_x)):
if all(pid == 0 for pid in plot_id):
axes.append(clear_axis(fig.add_axes([0, plt_h+h_gap, 2*plt_w, plt_h])))
scalarMap._A = []
cbar = fig.colorbar(scalarMap, ax=axes[-1], ticks=[-1, 0, 1], aspect=10, location="bottom")
cbar.ax.set_xticklabels(["-1", "0", "1"])
cbar.ax.xaxis.set_ticks_position("top")
cbar.ax.xaxis.set_label_position("top")
for label in cbar.ax.xaxis.get_ticklabels():
label.set_weight("bold")
label.set_fontsize(10+figsize[0])
if (filter_id < num_pooling_filters):
example_filter = pooling_filters[:, filter_indices[filter_id]]
top_indices = np.argsort(np.abs(example_filter))[::-1] #descending
selected_indices = top_indices[:num_connected_weights][::-1] #select top, plot weakest first
filter_norm = np.max(np.abs(example_filter))
connection_colors = [scalarMap.to_rgba(example_filter[bf_idx]/filter_norm)
for bf_idx in range(bf_stats["num_outputs"])]
if num_connected_weights < top_indices.size:
black_indices = top_indices[num_connected_weights:][::-1]
xp = [x_p_cent[i] for i in black_indices]+[x_p_cent[i] for i in selected_indices]
yp = [y_p_cent[i] for i in black_indices]+[y_p_cent[i] for i in selected_indices]
xf = [x_f_cent[i] for i in black_indices]+[x_f_cent[i] for i in selected_indices]
yf = [y_f_cent[i] for i in black_indices]+[y_f_cent[i] for i in selected_indices]
c = [(0.1,0.1,0.1,1.0) for i in black_indices]+[connection_colors[i] for i in selected_indices]
else:
xp = [x_p_cent[i] for i in selected_indices]
yp = [y_p_cent[i] for i in selected_indices]
xf = [x_f_cent[i] for i in selected_indices]
yf = [y_f_cent[i] for i in selected_indices]
c = [connection_colors[i] for i in selected_indices]
(y_id, x_id) = plot_id
if x_id == 0:
ax_l = 0
ax_b = - y_id * (plt_h+h_gap)
else:
bbox = axes[-1].get_position().get_points()[0]#bbox is [[x0,y0],[x1,y1]]
prev_l = bbox[0]
prev_b = bbox[1]
ax_l = prev_l + plt_w + group_w_gap
ax_b = prev_b
ax_w = plt_w
ax_h = plt_h
axes.append(clear_axis(fig.add_axes([ax_l, ax_b, ax_w, ax_h])))
axes[-1].invert_yaxis()
axes[-1].scatter(xp, yp, c=c, s=spot_size, alpha=0.8)
axes[-1].set_xlim(0, bf_stats["patch_edge_size"]-1)
axes[-1].set_ylim(bf_stats["patch_edge_size"]-1, 0)
axes[-1].set_aspect("equal")
axes[-1].set_facecolor("w")
axes.append(clear_axis(fig.add_axes([ax_l+ax_w+pair_w_gap, ax_b, ax_w, ax_h])))
axes[-1].scatter(xf, yf, c=c, s=spot_size, alpha=0.8)
axes[-1].set_xlim([-max_sf, max_sf])
axes[-1].set_ylim([-max_sf, max_sf])
axes[-1].xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
axes[-1].yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
axes[-1].set_aspect("equal")
axes[-1].set_facecolor("w")
#histogram - note: axis widths/heights are not setup for a third plot
#axes.append(fig.add_axes([ax_l+ax_w+pair_w_gap, ax_b, ax_w, ax_h]))
#axes[-1].set_yticklabels([])
#axes[-1].tick_params(axis="y", bottom="off", top="off", left="off", right="off")
#axes[-1].hist([example_filter[bf_idx]/filter_norm for bf_idx in range(bf_stats["num_outputs"])])
filter_id += 1
plt.show()
return fig
def plot_top_bases(a_cov, weights, bf_indices, num_top_cov_bases):
"""
Plot the top correlated bases for basis functions indexed in bf_indices
Inputs:
a_cov [np.ndarray]
weights [np.ndarray] of shape [num_inputs, num_outputs]
bf_indices [list] of basis functions indices
num_top_cov_bases [int] number of top correlated basis functions to plot
"""
num_bases = len(bf_indices)
fig = plt.figure(figsize=(num_top_cov_bases+2, num_bases))
gs = gridspec.GridSpec(num_bases, num_top_cov_bases+2, hspace=0.6)
for x_id in range(num_bases):
primary_bf_idx = bf_indices[x_id]
sorted_cov_indices = np.argsort(a_cov[primary_bf_idx, :])[-2::-1]
primary_bf = np.squeeze(dp.reshape_data(weights.T[primary_bf_idx,...], flatten=False)[0])
ax = plt.subplot(gs[x_id,0])
ax.imshow(primary_bf, cmap="Greys_r", interpolation="nearest")
ax.tick_params(axis="both", bottom="off", top="off",
left="off", right="off")
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
[i.set_linewidth(3.0) for i in ax.spines.values()]
strengths = []
for y_id, bf_idx in enumerate(sorted_cov_indices[:num_top_cov_bases]):
bf = np.squeeze(dp.reshape_data(weights.T[bf_idx,...],
flatten=False)[0])
ax = plt.subplot(gs[x_id, y_id+1])
ax.imshow(bf, cmap="Greys_r", interpolation="nearest")
ax.tick_params(axis="both", bottom="off", top="off", left="off", right="off")
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
strengths.append(a_cov[primary_bf_idx, bf_idx])
ax = plt.subplot(gs[x_id, -1])
ax.plot(strengths)
ax.set_xticklabels([])
ylims = ax.get_ylim()
ax.set_yticks([0, ylims[1]])
ax.xaxis.set_ticks(np.arange(0, num_top_cov_bases, 1.0))
ax.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.2f"))
ax.yaxis.tick_right()
ax.tick_params(axis="y", bottom="off", top="off", left="off", right="off")
ax.tick_params(axis="x", direction="in")
for idx, tick in enumerate(ax.yaxis.get_majorticklabels()):
if idx == 0:
tick.set_verticalalignment("bottom")
else:
tick.set_verticalalignment("top")
plt.subplot(gs[0,0]).set_title("rand bf", horizontalalignment="center", fontsize=18);
plt.subplot(gs[0,1]).set_title("stronger correlation --$>$ weaker correlation",
horizontalalignment="left", fontsize=18);
plt.subplot(gs[0,-1]).set_title("activity covariance", horizontalalignment="center", fontsize=18)
plt.show()
return fig
def plot_bf_stats(bf_stats, num_bf=2):
"""
Plot outputs of the dp.get_dictionary_stats()
Inputs:
bf_stats [dict] output of dp.get_dictionary_stats()
num_bf [int] number of basis functions to plot
"""
tot_num_bf = len(bf_stats["basis_functions"])
bf_idx_list = np.random.choice(tot_num_bf, num_bf, replace=False)
fig, sub_ax = plt.subplots(num_bf, 5, figsize=(15,15))
for plot_id in range(int(num_bf)):
bf_idx = bf_idx_list[plot_id]
# Basis function in pixel space
bf = bf_stats["basis_functions"][bf_idx]
sub_ax[plot_id, 0].imshow(bf, cmap="Greys_r", interpolation="Nearest")
sub_ax[plot_id, 0] = clear_axis(sub_ax[plot_id, 0], spines="k")
sub_ax[plot_id, 0].set_title(str(bf_idx), fontsize="8")
# Hilbert envelope
env = bf_stats["envelopes"][bf_idx]
sub_ax[plot_id, 1].imshow(env, cmap="Greys_r", interpolation="Nearest")
sub_ax[plot_id, 1] = clear_axis(sub_ax[plot_id, 1], spines="k")
# Fourier transform of basis function
fourier = bf_stats["fourier_maps"][bf_idx]
sub_ax[plot_id, 2].imshow(fourier, cmap="Greys_r", interpolation="Nearest")
sub_ax[plot_id, 2] = clear_axis(sub_ax[plot_id, 2], spines="k")
sub_ax[plot_id, 2].spines["left"].set_position("center")
sub_ax[plot_id, 2].spines["left"].set_linewidth(2.5)
sub_ax[plot_id, 2].spines["bottom"].set_position("center")
sub_ax[plot_id, 2].spines["bottom"].set_linewidth(2.5)
sub_ax[plot_id, 2].spines["top"].set_color("none")
sub_ax[plot_id, 2].spines["right"].set_color("none")
sub_ax[plot_id, 2].set_ylim([fourier.shape[0]-1, 0])
sub_ax[plot_id, 2].set_xlim([0, fourier.shape[1]-1])
# Fourier summary stats
sub_ax[plot_id, 3].imshow(bf, interpolation="Nearest", cmap="Greys_r")
center = bf_stats["gauss_centers"][bf_idx]
evals, evecs = bf_stats["gauss_orientations"][bf_idx]
orientation = bf_stats["fourier_centers"][bf_idx]
angle = np.rad2deg(np.pi/2 + np.arctan2(*orientation))
alpha = 1.0
color_val = "b"
ellipse = plot_ellipse(sub_ax[plot_id, 3], center, evals, angle, color_val, alpha)
sub_ax[plot_id, 3] = clear_axis(sub_ax[plot_id, 3], spines="k")
sub_ax[plot_id, 4].imshow(bf, interpolation="Nearest", cmap="Greys_r")
sub_ax[plot_id, 4] = clear_axis(sub_ax[plot_id, 4], spines="k")
ellipse = plot_ellipse(sub_ax[plot_id, 4], center, evals, angle, color_val, alpha, lines=True)
sub_ax[0,0].set_title("Basis function", fontsize=12)
sub_ax[0,1].set_title("Envelope", fontsize=12)
sub_ax[0,2].set_title("Fourier map", fontsize=12)
sub_ax[0,3].set_title("Summary ellipse", fontsize=12)
sub_ax[0,4].set_title("Summary line", fontsize=12)
plt.tight_layout()
plt.show()
return fig
def plot_loc_freq_summary(bf_stats, spotsize=10, figsize=(15, 5), fontsize=16):
plt.rc('text', usetex=True)
fig = plt.figure(figsize=figsize)
gs = fig.add_gridspec(1, 3, wspace=0.3)
ax = fig.add_subplot(gs[0])
x_pos = [x for (y,x) in bf_stats["gauss_centers"]]
y_pos = [y for (y,x) in bf_stats["gauss_centers"]]
ax.scatter(x_pos, y_pos, color='k', s=spotsize)
ax.set_xlim([0, bf_stats["patch_edge_size"]-1])
ax.set_ylim([bf_stats["patch_edge_size"]-1, 0])
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
ax.set_aspect("equal")
ax.set_ylabel("Pixels", fontsize=fontsize)
ax.set_xlabel("Pixels", fontsize=fontsize)
ax.set_title("Centers", fontsize=fontsize, pad=32)
ax = fig.add_subplot(gs[1])
x_sf = [x for (y,x) in bf_stats["fourier_centers"]]
y_sf = [y for (y,x) in bf_stats["fourier_centers"]]
max_sf = np.max(np.abs(x_sf+y_sf))
ax.scatter(x_sf, y_sf, color='k', s=spotsize)
ax.set_xlim([-max_sf, max_sf])
ax.set_ylim([-max_sf, max_sf])
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
ax.set_aspect("equal")
ax.set_ylabel("Cycles / Patch", fontsize=fontsize)
ax.set_xlabel("Cycles / Patch", fontsize=fontsize)
ax.set_title("Spatial Frequencies", fontsize=fontsize, pad=32)
num_bins = 360
orientations = [np.pi + orientation
for orientation in [np.arctan2(*fyx[::-1]) for fyx in bf_stats["fourier_centers"]]]
bins = np.linspace(0, 2*np.pi, num_bins)
count, bin_edges = np.histogram(orientations, bins)
count = count / np.max(count)
bin_left, bin_right = bin_edges[:-1], bin_edges[1:]
bin_centers = bin_left + (bin_right - bin_left)/2
ax = fig.add_subplot(gs[2], polar=True)
ax.plot(bin_centers, count, linewidth=3, color='k')
ax.set_yticks([])
ax.set_thetamin(0)
ax.set_thetamax(2*np.pi)
ax.set_xticks([0, np.pi/4, 2*np.pi/4, 3*np.pi/4, 4*np.pi/4,
5*np.pi/4, 6*np.pi/4, 7*np.pi/4, 2*np.pi])
ax.set_xticklabels([r"0", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$",
r"$\frac{3\pi}{4}$", r"$\pi$", r"$\frac{5\pi}{4}$", r"$\frac{3\pi}{2}$",
r"$\frac{7\pi}{4}$"], fontsize=fontsize)
ax.set_title("Orientaitons", fontsize=fontsize, pad=23)
plt.show()
return fig
def plot_hilbert_analysis(weights, padding=None):
"""
Plot results from performing Hilbert amplitude processing on weight matrix
Inputs:
weights: [np.ndarray] with shape [num_inputs, num_outputs]
num_inputs must have even square root.
"""
Envelope, bff_filt, Hil_filter, bff = dp.hilbert_amplitude(weights, padding)
num_inputs, num_outputs = weights.shape
assert np.sqrt(num_inputs) == np.floor(np.sqrt(num_inputs)), (
"weights.shape[0] must have an even square root.")
patch_edge_size = int(np.sqrt(num_inputs))
N = np.int32(np.sqrt(bff_filt.shape[1]))
fig, sub_ax = plt.subplots(3, 1, figsize=(64,64))
plot_data = pad_data(weights.T.reshape((num_outputs, patch_edge_size,
patch_edge_size)))
bf_axis_image = sub_ax[0].imshow(plot_data, cmap="Greys_r",
interpolation="nearest")
sub_ax[0].tick_params(axis="both", bottom="off", top="off", left="off",
right="off")
sub_ax[0].get_xaxis().set_visible(False)
sub_ax[0].get_yaxis().set_visible(False)
sub_ax[0].set_title("Basis Functions", fontsize=20)
plot_data = pad_data(np.abs(Envelope).reshape((num_outputs,
patch_edge_size, patch_edge_size)))
hil_axis_image = sub_ax[1].imshow(plot_data, cmap="Greys_r",
interpolation="nearest")
sub_ax[1].tick_params(axis="both", bottom="off", top="off", left="off",
right="off")
sub_ax[1].get_xaxis().set_visible(False)
sub_ax[1].get_yaxis().set_visible(False)
sub_ax[1].set_title("Analytic Signal Amplitude Envelope", fontsize=20)
resh_Zf = np.abs(bff_filt).reshape((num_outputs, N, N))
output_z = np.zeros(resh_Zf.shape)
for i in range(num_outputs):
output_z[i,...] = resh_Zf[i,...] / np.max(resh_Zf[i,...])
plot_data = pad_data(output_z)
hil_axis_image = sub_ax[2].imshow(plot_data, cmap="Greys_r",
interpolation="nearest")
sub_ax[2].tick_params(axis="both", bottom="off", top="off", left="off",
right="off")
sub_ax[2].get_xaxis().set_visible(False)
sub_ax[2].get_yaxis().set_visible(False)
sub_ax[2].set_title("Fourier Amplitude Spectrum", fontsize=20)
plt.show()
return fig
def plot_image(image, vmin=None, vmax=None, title="", save_filename=None):
"""
Plot single image
Inputs:
image [np.ndarray] 2-D image
title [str] indicating the title for the figure
"""
if vmin is None:
vmin = np.min(image)
if vmax is None:
vmax = np.max(image)
fig, ax = plt.subplots(1, figsize=(10,10))
ax = clear_axis(ax)
im = ax.imshow(image, cmap="Greys_r", vmin=vmin, vmax=vmax, interpolation="nearest")
ax.set_title(title, fontsize=20)
if save_filename is not None:
fig.savefig(save_filename)
plt.close(fig)
return None
plt.show()
return fig
def plot_matrix(matrix, title="", save_filename=None):
"""
Plot covariance matrix as an image
Inputs:
matrix [np.ndarray] covariance matrix
title [str] indicating the title for the figure
"""
fig, ax = plt.subplots(1, figsize=(10,10))
im = ax.imshow(matrix, cmap="Greys_r", interpolation="nearest")
im.set_clim(vmin=np.min(matrix), vmax=np.max(matrix))
ax.set_title(title, fontsize=20)
add_colorbar_to_im(im)
if save_filename is not None:
fig.savefig(save_filename)
plt.close(fig)
return None
plt.show()
return fig
def plot_eigenvalues(evals, ylim=None, xlim=None):
"""
Plot the input eigenvalues
Inputs:
evals [np.ndarray]
ylim [2-D list] specifying the [min,max] of the y-axis
xlim [2-D list] specifying the [min,max] of the x-axis
"""
if xlim is None:
xlim = [0, evals.shape[0]]
if ylim is None:
ylim = [np.min(evals), np.max(evals)]
fig, ax = plt.subplots(1, figsize=(10,10))
ax.semilogy(evals)
ax.set_xlim(xlim[0], xlim[1]) # Ignore first eigenvalue
ax.set_ylim(ylim[0], ylim[1])
ax.set_yscale("log")
ax.set_title("Sorted eigenvalues of covariance matrix", fontsize=20)
plt.show()
return fig
def plot_gaussian_contours(bf_stats, num_plots):
"""
Plot basis functions with contour lines for Gaussian fits
Inputs:
bf_stats [dict] output from dp.get_dictionary_stats()
num_plots [int] indicating the number of random BFs to plot
"""
num_bf = bf_stats["num_outputs"]
bf_range = np.random.choice([i for i in range(num_bf)], num_plots)
num_plots_y = int(np.ceil(np.sqrt(num_plots)))
num_plots_x = int(np.floor(np.sqrt(num_plots)))
fig, sub_ax = plt.subplots(num_plots_y, num_plots_x, figsize=(10,10))
filter_total = 0
for plot_id in np.ndindex((num_plots_y, num_plots_x)):
if filter_total < num_plots:
bf_idx = bf_range[filter_total]
envelope = bf_stats["envelopes"][bf_idx]
center = bf_stats["envelope_centers"][bf_idx]
(gauss_fit, grid) = bf_stats["gauss_fits"][bf_idx]
contour_levels = 3
sub_ax[plot_id].imshow(envelope, cmap="Greys_r", extent=(0, 16, 16, 0))
sub_ax[plot_id].contour(grid[1], grid[0], gauss_fit, contour_levels, colors='b')
sub_ax[plot_id].plot(center[1], center[0], "ro")
sub_ax[plot_id].set_title("bf:"+str(bf_idx), fontsize=10)
filter_total += 1
sub_ax[plot_id].spines["right"].set_color("none")
sub_ax[plot_id].spines["top"].set_color("none")
sub_ax[plot_id].spines["left"].set_color("none")
sub_ax[plot_id].spines["bottom"].set_color("none")
sub_ax[plot_id].tick_params(axis="both", bottom="off", top="off", left="off", right="off")
sub_ax[plot_id].get_xaxis().set_visible(False)
sub_ax[plot_id].get_yaxis().set_visible(False)
sub_ax[plot_id].set_aspect("equal")
plt.show()
return fig
def plot_bar(data, num_xticks=5, title="", xlabel="", ylabel="", save_filename=None):
"""
Generate a bar graph of data
Inputs:
data: [np.ndarray] of shape (N,)
xticklabels: [list of N str] indicating the labels for the xticks
save_filename: [str] indicating where the file should be saved
if None, don't save the file
xlabel: [str] indicating the x-axis label
ylabel: [str] indicating the y-axis label
title: [str] indicating the plot title
TODO: set num_xticks
"""
fig, ax = plt.subplots(1)
bar = ax.bar(np.arange(len(data)), data)
#xticklabels = [str(int(val)) for val in np.arange(len(data))]
#xticks = ax.get_xticks()
#ax.set_xticklabels(xticklabels)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.suptitle(title, y=1.0, x=0.5)
if save_filename is not None:
fig.savefig(save_filename, transparent=True)
plt.close(fig)
return None
plt.show()
return fig
def plot_contrast_orientation_tuning(bf_indices, contrasts, orientations, activations, figsize=(32,32)):
"""
Generate contrast orientation tuning curves. Every subplot will have curves for each contrast.
Inputs:
bf_indices: [list or array] of neuron indices to use
all indices should be less than activations.shape[0]
contrasts: [list or array] of contrasts to use
orientations: [list or array] of orientations to use
"""
orientations = np.asarray(orientations)*(180/np.pi) #convert to degrees for plotting
num_bfs = np.asarray(bf_indices).size
cmap = plt.get_cmap('Greys')
cNorm = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap)
fig = plt.figure(figsize=figsize)
num_plots_y = np.int32(np.ceil(np.sqrt(num_bfs)))+1
num_plots_x = np.int32(np.ceil(np.sqrt(num_bfs)))
gs_widths = [1.0,]*num_plots_x
gs_heights = [1.0,]*num_plots_y
gs = gridspec.GridSpec(num_plots_y, num_plots_x, wspace=0.5, hspace=0.7,
width_ratios=gs_widths, height_ratios=gs_heights)
bf_idx = 0
for plot_id in np.ndindex((num_plots_y, num_plots_x)):
(y_id, x_id) = plot_id
if y_id == 0 and x_id == 0:
ax = fig.add_subplot(gs[plot_id])
#ax.set_ylabel("Activation", fontsize=16)
#ax.set_xlabel("Orientation", fontsize=16)
ax00 = ax
else:
ax = fig.add_subplot(gs[plot_id])#, sharey=ax00)
if bf_idx < num_bfs:
for co_idx, contrast in enumerate(contrasts):
co_idx = -1
contrast = contrasts[co_idx]
activity = activations[bf_indices[bf_idx], co_idx, :]
color_val = scalarMap.to_rgba(contrast)
ax.plot(orientations, activity, linewidth=1, color=color_val)
ax.scatter(orientations, activity, s=4, c=[color_val])
ax.yaxis.set_major_formatter(FormatStrFormatter('%0.2g'))
ax.set_yticks([0, np.max(activity)])
ax.set_xticks([0, 90, 180])
bf_idx += 1
else:
ax = clear_axis(ax, spines="none")
plt.show()
return fig
def plot_masked_orientation_tuning(bf_indices, mask_orientations, base_responses, test_responses):
"""
Generate orientation tuning curves for superimposed masks.
Maximum contrast (index -1) will be selected for the base and mask
Inputs:
bf_indices: [list or array] of neuron indices to use
all indices should be less than base_responsees.shape[0] and test_responses.shape[0]
mask_orientations: [list or array] of mask orientation values
base_responses: [list or array] of responses to base stimulus at optimal orientation
should be shape [num_neurons, num_base_contrasts, num_mask_contrasts, num_orientations]
test_responses: [list or array] of responses to the base+mask stimulus
should be shape [num_neurons, num_base_contrasts, num_mask_contrasts, num_orientations]
"""
mask_orientations = np.asarray(mask_orientations) * (180/np.pi)
num_bfs = np.asarray(bf_indices).size
num_orientations = mask_orientations.size
cmap = plt.get_cmap('Greys')
cNorm = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap)
fig = plt.figure(figsize=(32,32))
num_plots_y = np.int32(np.ceil(np.sqrt(num_bfs)))+1
num_plots_x = np.int32(np.ceil(np.sqrt(num_bfs)))
gs_widths = [1.0,]*num_plots_x
gs_heights = [1.0,]*num_plots_y
gs = gridspec.GridSpec(num_plots_y, num_plots_x, wspace=0.5, hspace=0.7,
width_ratios=gs_widths, height_ratios=gs_heights)
bf_idx = 0
for plot_id in np.ndindex((num_plots_y, num_plots_x)):
(y_id, x_id) = plot_id
if y_id == 0 and x_id == 0:
ax = fig.add_subplot(gs[plot_id])
#ax.set_ylabel("Normalized Activation", fontsize=16)
#ax.set_xlabel("Mask Orientation", fontsize=16)
#ax.set_ylim([0.0, np.max(co_test_mean_responses)])
ax00 = ax
else:
ax = fig.add_subplot(gs[plot_id])#, sharey=ax00)
if bf_idx < num_bfs:
bco_idx = -1; co_idx = -1 # we want highest contrasts used for this experiment
base_activity = base_responses[bf_indices[bf_idx], bco_idx]
test_activity = test_responses[bf_indices[bf_idx], bco_idx, co_idx, :]
color_val = scalarMap.to_rgba(1.0) # One could alternatively set this to the contrast value
ax.plot(mask_orientations, [base_activity,]*num_orientations, linestyle="--",
linewidth=1, color=color_val)
ax.plot(mask_orientations, test_activity, linestyle="-", linewidth=1, color=color_val)
ax.scatter(mask_orientations, test_activity, s=4, c=color_val)
ax.set_yticks([0, np.max(test_activity)])
ax.yaxis.set_major_formatter(FormatStrFormatter('%0.2g'))
ax.set_xticks([0, 90, 180])
bf_idx += 1
else:
ax = clear_axis(ax, spines="none")
plt.show()
return fig
def plot_plaid_contrast_tuning(bf_indices, base_contrasts, mask_contrasts, base_orientations,
mask_orientations, test_responses):
"""
Plot responses to orthogonal plaid stimulus at different base and mask contrasts
Inputs:
bf_indices: [list or array] of neuron indices to use
all indices should be less than test_responsees.shape[0]
base_contrasts: [list or array] of base contrasts.
mask_contrasts: [list or array] of mask contrasts.
each plot will have one line per mask_contrast
base_orientations: [list or array] of optimal base orientations for all neurons
should be a 1-D array with size = test_responses.shape[0]
mask_orientations: [list or array] of mask orientation values
function will compute the plaid response for orthogonal orientations
test_responses: [list or array] of responses to the base+mask stimulus
should be shape [num_neurons, num_base_contrasts, num_mask_contrasts, num_orientations]
"""
bf_indices = np.asarray(bf_indices)
mask_orientations = np.asarray(mask_orientations)
mask_contrasts = np.asarray(mask_contrasts)
num_bfs = bf_indices.size
num_orientations = mask_orientations.size
num_contrasts = mask_contrasts.size
# index of value in mask_orientations that is closest to orthogonal to base_orientations[bf_idx]
orthogonal_orientations = [base_orientations[bf_indices[bf_idx]]-(np.pi/2)
for bf_idx in range(num_bfs)]
orthogonal_orientations = np.asarray([val + np.pi if val < 0 else val
for val in orthogonal_orientations])
mask_or_idx = [np.argmin(orthogonal_orientations[bf_idx] - mask_orientations)
for bf_idx in range(num_bfs)]
cmap = plt.get_cmap('Greys')
cNorm = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap)
num_plots_y = np.int32(np.ceil(np.sqrt(num_bfs)))+1
num_plots_x = np.int32(np.ceil(np.sqrt(num_bfs)))
gs_widths = [1.0,]*num_plots_x
gs_heights = [1.0,]*num_plots_y
gs = gridspec.GridSpec(num_plots_y, num_plots_x, wspace=0.5, hspace=0.7,
width_ratios=gs_widths, height_ratios=gs_heights)
fig = plt.figure(figsize=(32,32)) #TODO: Adjust fig size according to num plots
bf_idx = 0
for plot_id in np.ndindex((num_plots_y, num_plots_x)):
(y_id, x_id) = plot_id
if y_id == 0 and x_id == 0:
ax = fig.add_subplot(gs[plot_id])
#ax.set_ylabel("Normalized Activation", fontsize=16)
#ax.set_xlabel("Base Contrast", fontsize=16)
#ax.set_ylim([0.0, 1.0])
ax00 = ax
else:
ax = fig.add_subplot(gs[plot_id], sharey=ax00)
if bf_idx < num_bfs:
for co_idx, mask_contrast in enumerate(mask_contrasts):
# vary base contrast for fixed mask contrast & orthogonal mask
activity = test_responses[bf_indices[bf_idx], :, co_idx, mask_or_idx[bf_idx]]
color_val = scalarMap.to_rgba(mask_contrast)
ax.plot(base_contrasts, activity, linestyle="-", color=color_val)
ax.scatter(base_contrasts, activity, s=4, c=color_val, label=str(mask_contrast))
ax.set_xticks([base_contrasts[0], base_contrasts[-1]])
bf_idx += 1
else:
ax = clear_axis(ax, spines="none")
plt.show()
return fig
def plot_activity_hist(data, num_bins="auto", title="", save_filename=None):
"""
Histogram activity matrix
Inputs:
data [np.ndarray] data matrix, can have shapes:
1D tensor [data_points]
2D tensor [batch, data_points] - will plot avg hist, summing over batch
3D tensor [batch, time_point, data_points] - will plot avg hist over time
title: [str] for title of figure
save_filename: [str] holding output directory for writing,
"""
num_dim = data.ndim
if num_dim > 1:
data = np.mean(data, axis=0)
(fig, ax) = plt.subplots(1)
vals, bins, patches = ax.hist(data, bins=num_bins, histtype="barstacked",
stacked=True)
if np.min(data) != np.max(data):
ax.set_xlim([np.min(data), np.max(data)])
ax.set_xlabel('Value')
ax.set_ylabel('Count')
fig.suptitle(title, y=1.0, x=0.5)
fig.tight_layout()
if save_filename is not None:
fig.savefig(save_filename)
plt.close(fig)
return None
plt.show()
return fig
def plot_phase_avg_power_spec(data, title="", save_filename=None):
"""
Plot phase averaged power spectrum for a set of images
Inputs:
data: [np.ndarray] 1D data to be plotted
title: [str] for title of figure
save_filename: [str] holding output directory for writing,
figures will not display with GUI if set
"""
(fig, ax) = plt.subplots(1)
ax.loglog(range(data[data>1].shape[0]), data[data>1])
fig.suptitle(title, y=1.0, x=0.5)
if save_filename is not None:
fig.savefig(save_filename)
plt.close(fig)
return None
plt.show()
return fig
def plot_group_weights(weights, group_ids, title="", figsize=None, save_filename=None):
"""
weights: [np.ndarray] of shape [num_neurons, num_input_y, num_input_x]
group_ids: [list of lists] containing ids for each group [[,]*neurons_per_group,]*num_groups
"""
num_neurons = weights.shape[0]
for weight_id in range(num_neurons):
weights[weight_id,...] = weights[weight_id,...] - weights[weight_id,...].mean()
weights[weight_id,...] = weights[weight_id,...] / (weights[weight_id,...].max()-weights[weight_id,...].min())
vmin = np.min(weights)
vmax = np.max(weights)
indices = [idx for id_list in group_ids for idx in id_list]
num_groups = len(group_ids)
num_groups_x = int(np.floor(np.sqrt(num_groups)))
num_groups_y = int(np.ceil(np.sqrt(num_groups)))
num_neurons_per_group = len(group_ids[0])
num_neurons_x = int(np.floor(np.sqrt(num_neurons_per_group)))
num_neurons_y = int(np.ceil(np.sqrt(num_neurons_per_group)))
outer_spacing = 0.20
inner_spacing = 0.1
fig = plt.figure(figsize=figsize)
gs1 = gridspec.GridSpec(num_groups_y, num_groups_x,
hspace=outer_spacing*num_groups_y/(num_groups_x+num_groups_y),
wspace=outer_spacing*num_groups_x/(num_groups_x+num_groups_y))
neuron_index = 0
for group_plot_id in np.ndindex((num_groups_y, num_groups_x)):
gs_inner = gridspec.GridSpecFromSubplotSpec(num_neurons_y, num_neurons_x, gs1[group_plot_id],
hspace=inner_spacing*num_neurons_y/(num_neurons_x+num_neurons_y),
wspace=inner_spacing*num_neurons_x/(num_neurons_x+num_neurons_y))
for inner_plot_id in np.ndindex((num_neurons_y, num_neurons_x)):
ax = clear_axis(fig.add_subplot(gs_inner[inner_plot_id]))
ax.set_aspect("equal")
if neuron_index < num_neurons:
ax.imshow(weights[indices[neuron_index], ...], cmap="Greys_r", vmin=vmin, vmax=vmax)
neuron_index += 1
fig.suptitle(title, y=0.9, x=0.5, fontsize=20)
if save_filename is not None:
fig.savefig(save_filename)
plt.close(fig)
return None
plt.show()
return fig
def plot_weights(weights, title="", figsize=None, save_filename=None):
"""
weights: [np.ndarray] of shape [num_outputs, num_input_y, num_input_x]
The matrices are renormalized before plotting.
"""
weights = dp.norm_weights(weights)
vmin = np.min(weights)
vmax = np.max(weights)
num_plots = weights.shape[0]
num_plots_y = int(np.ceil(np.sqrt(num_plots))+1)
num_plots_x = int(np.floor(np.sqrt(num_plots)))
fig, sub_ax = plt.subplots(num_plots_y, num_plots_x, figsize=figsize)
filter_total = 0
for plot_id in np.ndindex((num_plots_y, num_plots_x)):
if filter_total < num_plots:
sub_ax[plot_id].imshow(np.squeeze(weights[filter_total, ...]), vmin=vmin, vmax=vmax, cmap="Greys_r")
filter_total += 1
clear_axis(sub_ax[plot_id])
sub_ax[plot_id].set_aspect("equal")
fig.suptitle(title, y=0.95, x=0.5, fontsize=20)
if save_filename is not None:
fig.savefig(save_filename)
plt.close(fig)
return None
plt.show()
return fig
def plot_data_tiled(data, normalize=False, title="", vmin=None, vmax=None, cmap="Greys_r",
save_filename=None):
"""
Save figure for input data as a tiled image
Inpus:
data: [np.ndarray] of shape:
(height, width, features) - single image
(n, height, width, features) - n images
normalize: [bool] indicating whether the data should be streched (normalized)
This is recommended for dictionary plotting.
title: [str] for title of figure
vmin, vmax: [int] the min and max of the color range
cmap: [str] indicating cmap, or None for imshow default
save_filename: [str] holding output directory for writing,
figures will not display with GUI if set
"""
data = dp.reshape_data(data, flatten=False)[0]
if normalize:
data = dp.normalize_data_with_max(data)[0]
vmin = -1.0
vmax = 1.0
if vmin is None:
vmin = np.min(data)
if vmax is None:
vmax = np.max(data)
if data.ndim == 3:
data = np.squeeze(data)
elif data.ndim == 4:
data = np.squeeze(pad_data(data))
#If rgb, need to rescale from 0 .. 1
if(data.shape[-1] == 3):
data = (data - data.min())/(data.max() - data.min())
else:
assert False, ("input data must have ndim==3 or 4")
fig, sub_axis = plt.subplots(1, figsize=(24, 24))
axis_image = sub_axis.imshow(np.squeeze(data), cmap=cmap, interpolation="nearest")
axis_image.set_clim(vmin=vmin, vmax=vmax)
if data.shape[-1] == 1:
cbar = add_colorbar_to_im(axis_image)
sub_axis = clear_axis(sub_axis, spines="k")
sub_axis.set_title(title, fontsize=20)
if save_filename is not None:
if save_filename == "":
save_filename = "./output.png"
fig.savefig(save_filename, transparent=True, bbox_inches="tight", pad_inches=0.01)
plt.close(fig)
return None
plt.show()
return fig
def plot_stats(data, keys=None, labels=None, start_index=0, figsize=None, save_filename=None):
"""
Generate time-series plots of stats specified by keys
Inputs:
data: [dict] containing data to be plotted. len of all values should be equal
data must have the key "batch_step"
keys: [list of str] optional list of keys to plot, each should exist in data.keys()
If nothing is given, data.keys() will be used
labels: [list of str] optional list of labels, should be the same length as keys input
If nothing is given, data.keys() will be used
save_filename: [str] containing the complete output filename.
"""
if keys is None:
keys = list(data.keys())
else:
assert all([key in data.keys() for key in keys]), (
"All input keys must exist as keys in the data dictionary")
assert len(keys) > 0, "Keys must be None or have length > 0."
if "batch_step" in keys:
keys.remove("batch_step")
if "schedule_index" in keys:
keys.remove("schedule_index")
if "global_batch_index" in keys:
keys.remove("global_batch_index")
if labels is None:
labels = keys
else:
assert len(labels) == len(keys), (
"The number of labels must match the number of keys")
num_keys = len(keys)
gs = gridspec.GridSpec(num_keys, 1, hspace=0.5)
fig = plt.figure(figsize=figsize)
axis_image = [None]*num_keys
for key_idx, key in enumerate(keys):
x_dat = data["batch_step"][start_index:]
y_dat = data[key][start_index:]
ax = fig.add_subplot(gs[key_idx])
axis_image[key_idx] = ax.plot(x_dat, y_dat)
if key_idx < len(keys)-1:
ax.get_xaxis().set_ticklabels([])
ax.locator_params(axis="y", nbins=5)
ax.set_ylabel("\n".join(re.split("_", labels[key_idx])))
ax.set_yticks([np.minimum(0.0, np.min(y_dat)), np.maximum(0.0, np.max(y_dat))])
ylabel_xpos = -0.15
ax.yaxis.set_label_coords(ylabel_xpos, 0.5)
ax.set_xlabel("Batch Number")
fig.suptitle("Stats per Batch", y=1.0, x=0.5)
if save_filename is not None:
fig.savefig(save_filename, transparent=True)
plt.close(fig)
return None
plt.show()
return fig
def plot_inference_stats(data, title="", save_filename=None):
"""
Plot loss values during LCA inference
Inputs:
data: [dict] that must contain the "losses"
this can be created by using the LCA analyzer objects
TODO: Add a 4th plot that shows the % change in active coefficients (inactive-to-active + active-to-inactive)
e.g. in bottom left of figure 7 in rozell et al 2008 LCA paper
"""
labels = [key.title() for key in data["losses"].keys()]
losses = [val for val in data["losses"].values()]
num_im, num_steps = losses[0].shape
means = [None,]*len(labels)
sems = [None,]*len(labels)
for loss_id, loss in enumerate(losses):
means[loss_id] = np.mean(loss, axis=0) # mean across num_imgs
sems[loss_id] = np.std(loss, axis=0) / np.sqrt(num_im)
num_plots_y = np.int32(np.ceil(np.sqrt(len(labels))))+1
num_plots_x = np.int32(np.ceil(np.sqrt(len(labels))))
gs = gridspec.GridSpec(num_plots_y, num_plots_x)
fig = plt.figure(figsize=(12,12))
loss_id = 0
for plot_id in np.ndindex((num_plots_y, num_plots_x)):
(y_id, x_id) = plot_id
ax = fig.add_subplot(gs[plot_id])
if loss_id < len(labels):
time_steps = np.arange(num_steps)
ax.plot(time_steps, means[loss_id], "k-")
ax.fill_between(time_steps, means[loss_id]-sems[loss_id],
means[loss_id]+sems[loss_id], alpha=0.2)
ax.set_ylabel(labels[loss_id].replace('_', ' '), fontsize=16)
ax.set_xlim([1, np.max(time_steps)])
ax.set_xticks([1, int(np.floor(np.max(time_steps)/2)), np.max(time_steps)])
ax.set_xlabel("Time Step", fontsize=16)
ax.tick_params("both", labelsize=14)
loss_id += 1
else:
ax = clear_axis(ax, spines="none")
fig.tight_layout()
fig.suptitle(title, y=1.03, x=0.5, fontsize=20)
if save_filename is not None:
fig.savefig(save_filename, transparent=True, bbox_inches="tight", pad=0.1)
plt.close(fig)
return None
plt.show()
return fig
def plot_inference_traces(data, activation_threshold, img_idx=None, act_indicator_threshold=None):
"""
Plot of model neurons' inputs over time
Args:
data: [dict] with each trace, with keys [b, u, a, ga, images]
Dictionary is created by analyze_lca.evaluate_inference()
img_idx: [int] which image in data["images"] to run analysis on
act_indicator_threshold: [float] sets the threshold for when a neuron is marked as "recently active"
Recently active neurons are those that became active towards the end of the inference process
Recency is computed as any time step that is greater than num_inference_steps * act_indicator_threshold
Recently active neurons are indicated by a dotted magenta border
This input must be between 0.0 and 1.0
"""
plt.rc('text', usetex=True)
(num_images, num_time_steps, num_neurons) = data["b"].shape
sqrt_nn = int(np.sqrt(num_neurons))
if img_idx is None:
img_idx = np.random.choice(num_images)
global_max_val = float(np.max(np.abs([data["b"][img_idx,...],
data["u"][img_idx,...], data["ga"][img_idx,...], data["a"][img_idx,...],
np.ones_like(data["b"][img_idx,...])*activation_threshold])))
fig, sub_axes = plt.subplots(sqrt_nn+2, sqrt_nn+1, figsize=(20, 20))
fig.subplots_adjust(hspace=0.20, wspace=0.20)
for (axis_idx, axis) in enumerate(fig.axes): # one axis per neuron
if axis_idx < num_neurons:
t = np.arange(data["b"].shape[1])
b = data["b"][img_idx, :, axis_idx]
u = data["u"][img_idx, :, axis_idx]
ga = data["ga"][img_idx, :, axis_idx]
a = data["a"][img_idx, :, axis_idx]
l1, = axis.plot(t, b, linewidth=0.25, color="g", label="b")
l2, = axis.plot(t, u, linewidth=0.25, color="b", label="u")
l3, = axis.plot(t, ga, linewidth=0.25, color="r", label="Ga")
l4, = axis.plot(t, [0 for _ in t], linewidth=0.25, color="k", linestyle="-",
label="zero")
l5, = axis.plot(t, [activation_threshold for _ in t], linewidth=0.25, color="k",
linestyle=":", dashes=(1,1), label=r"$\lambda$")
if "fb" in data.keys():
fb = data["fb"][img_idx,:,axis_idx]
l6, = axis.plot(t, fb, linewidth=0.25, color="darkorange", label="fb")
max_val = np.max(np.abs([b, ga, u, a]))
scale_ratio = max_val / global_max_val
transFigure = fig.transFigure.inverted()
axis_height = axis.get_window_extent().transformed(transFigure).height
line_length = axis_height * scale_ratio
x_offset = 0.003
axis_origin = transFigure.transform(axis.transAxes.transform([0,0]))
coord1 = [axis_origin[0] - x_offset, axis_origin[1]]
coord2 = [coord1[0], coord1[1] + line_length]
line = matplotlib.lines.Line2D((coord1[0], coord2[0]), (coord1[1],
coord2[1]), transform=fig.transFigure, color="0.3")
fig.lines.append(line)
if (a[-1] > 0):
clear_axis(axis, spines="magenta")
if act_indicator_threshold is not None:
assert act_indicator_threshold > 0.0 and act_indicator_threshold < 1.0, (
"act_indicator_threshold must be between 0.0 and 1.0")
thresh_index = int(num_time_steps * act_indicator_threshold)
if np.all([a[idx] == 0 for idx in range(0, thresh_index)]): # neuron has recently become active
for ax_loc in ["top", "bottom", "left", "right"]:
axis.spines[ax_loc].set_linestyle((1, (1, 3))) #length, spacing (on, off)
else:
clear_axis(axis, spines="black")
if act_indicator_threshold is not None:
thresh_index = int(num_time_steps * act_indicator_threshold)
if np.any([a[idx] > 0 for idx in range(thresh_index, num_time_steps)]): # neuron has recently become inactive
for ax_loc in ["top", "bottom", "left", "right"]:
axis.spines[ax_loc].set_linestyle((1, (1, 3))) #length, spacing (on, off)
else:
clear_axis(axis)
num_pixels = np.size(data["images"][img_idx])
image = data["images"][img_idx,...].reshape(int(np.sqrt(num_pixels)), int(np.sqrt(num_pixels)))
sub_axes[sqrt_nn+1, 0].imshow(image, cmap="Greys", interpolation="nearest")
for plot_col in range(sqrt_nn):
clear_axis(sub_axes[sqrt_nn+1, plot_col])
fig.suptitle("LCA Activity", y=0.9, fontsize=18)
handles, labels = sub_axes[0,0].get_legend_handles_labels()
legend = sub_axes[sqrt_nn+1, 1].legend(handles, labels, fontsize=12, ncol=3,
borderaxespad=0., bbox_to_anchor=[0, 0], fancybox=True, loc="upper left")
for line in legend.get_lines():
line.set_linewidth(3)
plt.show()
return fig
def plot_weight_image(weights, colorbar_aspect=50, title="", figsize=None, save_filename=None):
fig, ax = plt.subplots(1, 1, figsize=figsize, squeeze=False)
ax = ax.item()
im = ax.imshow(weights, vmin=np.min(weights), vmax=np.max(weights), cmap="Greys_r")
ax.set_title(title)
clear_axis(ax)
add_colorbar_to_im(im, aspect=colorbar_aspect)
if save_filename is not None:
fig.savefig(save_filename, transparent=True)
plt.close(fig)
return None
plt.show()
return fig
def plot_weight_angle_heatmap(weight_angles, angle_min=0, angle_max=180, title="", figsize=None, save_filename=None):
vmin = angle_min
vmax = angle_max
cmap = plt.get_cmap('viridis')
cNorm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap)
scalarMap._A = []
fig, ax = plt.subplots(1, figsize=figsize)
im = ax.imshow(weight_angles, vmin=vmin, vmax=vmax)
ax.set_title(title, fontsize=18)
cbar = add_colorbar_to_im(im, aspect=20, pad_fraction=0.5, labelsize=16, ticks=[vmin, vmax])
cbar.ax.set_yticklabels(["{:.0f}".format(vmin), "{:.0f}".format(vmax)])
if save_filename is not None:
fig.savefig(save_filename, transparent=True)
plt.close(fig)
return None
plt.show()
return fig
def plot_weight_angle_histogram(weight_angles, num_bins=50, angle_min=0, angle_max=180,
y_max=None, figsize=None, save_filename=None):
bins = np.linspace(angle_min, angle_max, num_bins)
hist, bin_edges = np.histogram(weight_angles.flatten(), bins)
if y_max is None:
y_max = np.max(hist)
bin_left, bin_right = bin_edges[:-1], bin_edges[1:]
bin_centers = bin_left + (bin_right - bin_left)/2
fig, ax = plt.subplots(1, figsize=figsize)
ax.bar(bin_centers, hist, width=2.0, log=True, align="center")
ax.set_xticks(bin_left, minor=True)
ax.set_xticks(bin_left[::4], minor=False)
ax.xaxis.set_major_formatter(FormatStrFormatter("%0.0f"))
ax.tick_params("both", labelsize=16)
ax.set_xlim([angle_min, angle_max])
ax.set_xticks([angle_min, int(np.floor(angle_max/4)), int(2*np.floor(angle_max/4)),
int(3*np.floor(angle_max/4)), angle_max])
ax.set_ylim([1, y_max])
ax.set_title("Neuron Angle Histogram", fontsize=18)
ax.set_xlabel("Angle (Degrees)", fontsize=18)
ax.set_ylabel("Log Count", fontsize=18)
if save_filename is not None:
fig.savefig(save_filename)
plt.close(fig)
return None
plt.show()
return fig
def plot_weight_nearest_neighbor_histogram(weight_angles, num_bins=25, angle_min=0, angle_max=90,
y_max=None, figsize=None, save_filename=None):
nn_angles = np.zeros(weight_angles.shape[0])
for neuron_id in range(weight_angles.shape[0]):
neighbors = np.delete(weight_angles[neuron_id,:], neuron_id)
nn_angles[neuron_id] = np.min(neighbors[neighbors>=0])
bins = np.linspace(angle_min, angle_max, num_bins)
hist, bin_edges = np.histogram(nn_angles.flatten(), bins)
if y_max is None:
y_max = np.max(hist)
bin_left, bin_right = bin_edges[:-1], bin_edges[1:]
bin_centers = bin_left + (bin_right - bin_left)/2
fig, ax = plt.subplots(1, figsize=figsize)
ax.bar(bin_centers, hist, width=1.0, log=True, align="center")
ax.set_xticks(bin_left, minor=True)
ax.set_xticks(bin_left[::4], minor=False)
ax.xaxis.set_major_formatter(FormatStrFormatter("%0.0f"))
ax.tick_params("both", labelsize=16)
ax.set_xlim([angle_min, angle_max])
ax.set_xticks([angle_min, int(np.floor(angle_max/4)), int(2*np.floor(angle_max/4)),
int(3*np.floor(angle_max/4)), angle_max])
ax.set_ylim([1, y_max])
ax.set_title("Neuron Nearest Neighbor Angle", fontsize=18)
ax.set_xlabel("Angle (Degrees)", fontsize=18)
ax.set_ylabel("Log Count", fontsize=18)
if save_filename is not None:
fig.savefig(save_filename)
plt.close(fig)
return None
plt.show()
return fig
def pad_data(data, pad_values=1):
"""
Pad data with ones for visualization
Outputs:
padded version of input
Inputs:
data: np.ndarray
pad_values: [int] specifying what value will be used for padding
"""
n = int(np.ceil(
|
np.sqrt(data.shape[0])
|
numpy.sqrt
|
#! /usr/bin/python
import numpy as np
from sensor_msgs.msg import PointCloud
from geometry_msgs.msg import Point32
import std_msgs.msg
import rospy
from sklearn.cluster import KMeans as KMeans
import math, copy, os, itertools
import matplotlib.pyplot as plt
from sklearn.neighbors import KDTree
from stl import mesh as stl_mesh_module
from abc import ABCMeta, abstractmethod
import openravepy as orpy
import hfts_grasp_planner.external.transformations as transformations
import hfts_grasp_planner.hfts_generation as hfts_generation
from scipy.spatial import ConvexHull
DEFAULT_HFTS_GENERATION_PARAMS = {'max_normal_variance': 0.2,
'min_contact_patch_radius': 0.015,
'contact_density': 300,
'max_num_points': 10000,
'position_weight': 2,
'branching_factor': 4,
'first_level_branching_factor': 3}
class ObjectIO(object):
__metaclass__ = ABCMeta
@abstractmethod
def get_hfts(self, obj_id, force_new=False):
pass
@abstractmethod
def get_openrave_file_name(self, obj_id):
pass
class ObjectFileIO(ObjectIO):
def __init__(self, data_path, var_filter=True,
hfts_generation_parameters=DEFAULT_HFTS_GENERATION_PARAMS,
max_num_points=10000):
self._data_path = data_path
self._b_var_filter = var_filter
self._hfts_generation_params = hfts_generation_parameters
self._max_num_points = max_num_points
self._last_obj_id = None
self._last_hfts = None
self._last_hfts_param = None
self._last_obj_com = None
def get_points(self, obj_id, b_filter=None):
if b_filter is None:
b_filter = self._b_var_filter
obj_file = self._data_path + '/' + obj_id + '/objectModel'
file_extension = self.get_obj_file_extension(obj_id)
points = None
contact_density = extract_hfts_gen_parameter(self._hfts_generation_params, 'contact_density')
if file_extension == '.ply':
points = hfts_generation.create_contact_points_from_ply(file_name=obj_file + file_extension,
density=contact_density)
elif file_extension == '.stl':
points = hfts_generation.create_contact_points_from_stl(file_name=obj_file + file_extension,
density=contact_density)
# TODO read point cloud if there no files stored.
# rospy.logwarn('No previous file found in the database, will proceed with raw point cloud instead.')
if points is not None:
com = np.mean(points[:, :3], axis=0)
if b_filter:
patch_size = extract_hfts_gen_parameter(self._hfts_generation_params,
'min_contact_patch_radius')
max_variance = extract_hfts_gen_parameter(self._hfts_generation_params,
'max_normal_variance')
points = hfts_generation.filter_unsmooth_points(points,
radius=patch_size,
max_variance=max_variance)
max_num_points = extract_hfts_gen_parameter(self._hfts_generation_params, 'max_num_points')
points = hfts_generation.down_sample_points(points, max_num_points)
else:
rospy.logerr('[ObjectFileIO] Failed to load mesh from ' + str(file_extension) +
' file for object ' + obj_id)
com = None
return points, com
def get_obj_file_extension(self, obj_id):
obj_file = self._data_path + '/' + obj_id + '/objectModel'
b_is_valid_file = os.path.exists(obj_file + '.ply') and os.path.isfile(obj_file + '.ply')
if b_is_valid_file:
return '.ply'
b_is_valid_file = os.path.exists(obj_file + '.stl') and os.path.isfile(obj_file + '.stl')
if b_is_valid_file:
return '.stl'
rospy.logerr('[ObjectFileIO::get_obj_file_extension] No compatible file found with prefix name ' + obj_file)
return None
def get_openrave_file_name(self, obj_id):
file_extension = self.get_obj_file_extension(obj_id)
if file_extension is not None:
return self._data_path + '/' + obj_id + '/' + 'objectModel' + file_extension
xml_file_name = self._data_path + '/' + obj_id + '/' + obj_id + '.kinbody.xml'
b_xml_file_exists = os.path.exists(xml_file_name)
if b_xml_file_exists:
return xml_file_name
return None
def get_hfts(self, obj_id, force_new=False):
# Check whether we have an HFTS for this object in memory
if self._last_obj_id != obj_id or force_new:
# If not, update
b_success = self._update_hfts(obj_id, force_new)
if not b_success:
return None, None, None
return self._last_hfts, self._last_hfts_param.astype(int), self._last_obj_com
def _read_hfts(self, obj_id, hfts_file, hfts_param_file, obj_com_file):
if os.path.exists(hfts_file) and os.path.isfile(hfts_file) \
and os.path.exists(hfts_param_file) and os.path.isfile(hfts_param_file) \
and os.path.exists(obj_com_file) and os.path.isfile(obj_com_file):
self._last_obj_id = obj_id
self._last_hfts = np.load(hfts_file)
self._last_hfts_param = np.load(hfts_param_file)
self._last_obj_com = np.load(obj_com_file)
return True
return False
def set_hfts_generation_parameters(self, params):
if type(params) is not dict:
raise TypeError('ObjectFileIO::set_hfts_generation_parameters] Expected a dictionary, received ' + str(type(params)))
self._hfts_generation_params = params
def show_hfts(self, level, or_drawer, object_transform=None, b_normals=False):
"""
Renders the most recently loaded hfts in OpenRAVE.
:param level: the level of the hfts to show
:param or_drawer: an instance of an OpenRAVEDrawer used for rendering
:param object_transform: An optional transform of the object frame.
:param b_normals: If true, also renders normals of each point
"""
if self._last_hfts is None:
rospy.logerr('[ObjectFileIO::show_hfts] Non hfts model loaded.')
return
if level > len(self._last_hfts_param) - 1:
raise ValueError('[objectFileIO::showHFTS] level ' + str(level) + ' does not exist')
hfts_generation.or_render_hfts(or_drawer, self._last_hfts, self._last_hfts_param,
level, transform=object_transform, b_normals=b_normals)
# b_factors = []
# for i in range(level + 1):
# b_factors.append(np.arange(self._last_hfts_param[i]))
# labels = itertools.product(*b_factors)
# hfts_labels = self._last_hfts[:, 6:7 + level]
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# for label in labels:
# idx = np.where((hfts_labels == label).all(axis=1))[0]
# cluster_points = self._last_hfts[idx, :3]
# ax.scatter(cluster_points[:, 0], cluster_points[:, 1], cluster_points[:, 2], c=np.random.rand(3,1), s = 100)
# plt.show()
def _update_hfts(self, obj_id, force_new=False):
""" Updates the cached hfts """
hfts_file = self._data_path + '/' + obj_id + '/hfts.npy'
hfts_param_file = self._data_path + '/' + obj_id + '/hftsParam.npy'
obj_com_file = self._data_path + '/' + obj_id + '/objCOM.npy'
# If it does not need to be regenerated, try to load it from file
if not force_new:
b_hfts_read = self._read_hfts(obj_id, hfts_file, hfts_param_file, obj_com_file)
if b_hfts_read:
return True
rospy.logwarn('HFTS is not available in the database')
# If we reached this point, we have to generate a new HFTS from mesh/point cloud
points, com = self.get_points(obj_id)
if points is None:
rospy.logerr('Could not generate HFTS for object ' + obj_id)
return False
# If we have points, generate an hfts
hfts_gen = hfts_generation.HFTSGenerator(points, com)
hfts_gen.set_branch_factor(extract_hfts_gen_parameter(self._hfts_generation_params, 'branching_factor'))
hfts_gen.set_position_weight(extract_hfts_gen_parameter(self._hfts_generation_params, 'position_weight'))
hfts_gen.run()
self._last_obj_id = obj_id
self._last_hfts = hfts_gen.get_hfts()
self._last_hfts_param = hfts_gen.get_hfts_param()
self._last_obj_com = com
hfts_gen.save_hfts(hfts_file=hfts_file, hfts_param_file=hfts_param_file,
com_file=obj_com_file)
return True
def extract_hfts_gen_parameter(param_dict, name):
if name in param_dict:
return param_dict[name]
elif name in DEFAULT_HFTS_GENERATION_PARAMS:
return DEFAULT_HFTS_GENERATION_PARAMS[name]
else:
raise ValueError('[utils::extract_hfts_gen_parameter] Unknown HFTS generation parameter ' + str(name))
def clamp(values, min_values, max_values):
clamped_values = len(values) * [0.0]
assert len(values) == len(min_values) and len(values) == len(max_values)
for i in range(len(values)):
clamped_values[i] = max(min(values[i], max_values[i]), min_values[i])
return clamped_values
def read_stl_file(file_id):
stl_mesh = stl_mesh_module.Mesh.from_file(file_id, calculate_normals=False)
points = np.zeros((len(stl_mesh.points), 6))
# Extract points with normals from the mesh surface
for face_idx in range(len(stl_mesh.points)):
# For this, we select the center of each face
points[face_idx, 0:3] = (stl_mesh.v0[face_idx] + stl_mesh.v1[face_idx] + stl_mesh.v2[face_idx]) / 3.0
normal_length = np.linalg.norm(stl_mesh.normals[face_idx])
if normal_length == 0.0:
stl_mesh.update_normals()
normal_length = np.linalg.norm(stl_mesh.normals[face_idx])
if normal_length == 0.0:
raise IOError('[utils.py::read_stl_file] Could not extract valid normals from the given file ' \
+ str(file_id))
points[face_idx, 3:6] = stl_mesh.normals[face_idx] / normal_length
return points
def create_point_cloud(points):
point_cloud = PointCloud()
header = std_msgs.msg.Header()
header.stamp = rospy.Time.now()
header.frame_id = 'map'
point_cloud.header = header
for point in points:
point_cloud.points.append(Point32(point[0], point[1], point[2]))
return point_cloud
def vec_angel_diff(v0, v1):
# in radians
assert len(v0) == len(v1)
l0 = math.sqrt(np.inner(v0, v0))
l1 = math.sqrt(np.inner(v1, v1))
if l0 == 0 or l1 == 0:
return 0
x = np.dot(v0, v1) / (l0*l1)
x = min(1.0, max(-1.0, x)) # fixing math precision error
angel = math.acos(x)
return angel
def dist_in_range(d, r):
if d < r[0]:
return r[0] - d
elif d > r[1]:
return d - r[1]
else:
return 0.0
def normal_distance(normals_a, normals_b):
d = 0.0
for i in range(len(normals_a)):
d += vec_angel_diff(normals_a[i], normals_b[i])
return d
def position_distance(pos_values_a, pos_values_b):
d = 0.0
for i in range(len(pos_values_a)):
d += np.linalg.norm(pos_values_a[i] - pos_values_b[i])
return d
def generate_wrench_cone(contact, normal, mu, center, face_n):
ref_vec = np.array([0, 0, 1])
center = np.array(center)
contact = np.array(contact)
normal = np.array(normal)
forces = []
angle_step = float(2 * math.pi) / face_n
# create face_n cone edges
for i in range(face_n):
angle = angle_step * i
x = mu * math.cos(angle)
y = mu * math.sin(angle)
z = 1
forces.append([x, y, z])
forces = np.asarray(forces)
rot_angle = transformations.angle_between_vectors(ref_vec, normal)
axis = np.cross(ref_vec, normal)
# take care of axis aligned normals
if np.linalg.norm(axis) > 0.01:
r_mat = transformations.rotation_matrix(rot_angle, axis)[:3, :3]
else:
if np.dot(ref_vec, normal) > 0:
r_mat = np.identity(3, float)
else:
r_mat =
|
np.identity(3, float)
|
numpy.identity
|
from typing import List, Optional, Union
import numpy as np
from scipy import sparse as sps
from .ext.sparse import (
csc_rmatvec,
csc_rmatvec_unrestricted,
csr_dense_sandwich,
csr_matvec,
csr_matvec_unrestricted,
sparse_sandwich,
transpose_square_dot_weights,
)
from .matrix_base import MatrixBase
from .util import (
check_matvec_out_shape,
check_transpose_matvec_out_shape,
set_up_rows_or_cols,
setup_restrictions,
)
class SparseMatrix(sps.csc_matrix, MatrixBase):
"""
A scipy.sparse csc matrix subclass that allows such objects to conform
to the ``MatrixBase`` interface.
SparseMatrix is instantiated in the same way as scipy.sparse.csc_matrix.
"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
super().__init__(arg1, shape, dtype, copy)
self.idx_dtype = max(self.indices.dtype, self.indptr.dtype)
if self.indices.dtype != self.idx_dtype:
self.indices = self.indices.astype(self.idx_dtype)
if self.indptr.dtype != self.idx_dtype:
self.indptr = self.indptr.astype(self.idx_dtype)
assert self.indices.dtype == self.idx_dtype
if not self.has_sorted_indices:
self.sort_indices()
self._x_csr = None
@property
def x_csr(self):
"""Cache the CSR representation of the matrix."""
if self._x_csr is None:
self._x_csr = self.tocsr(copy=False)
if self._x_csr.indices.dtype != self.idx_dtype:
self._x_csr.indices = self._x_csr.indices.astype(self.idx_dtype)
if self._x_csr.indptr.dtype != self.idx_dtype:
self._x_csr.indptr = self._x_csr.indptr.astype(self.idx_dtype)
return self._x_csr
def sandwich(
self, d: np.ndarray, rows: np.ndarray = None, cols: np.ndarray = None
) -> np.ndarray:
"""Perform a sandwich product: X.T @ diag(d) @ X."""
if not hasattr(d, "dtype"):
d = np.asarray(d)
if not self.dtype == d.dtype:
raise TypeError(
f"""self and d need to be of same dtype, either np.float64
or np.float32. self is of type {self.dtype}, while d is of type
{d.dtype}."""
)
rows, cols = setup_restrictions(self.shape, rows, cols, dtype=self.idx_dtype)
return sparse_sandwich(self, self.x_csr, d, rows, cols)
def _cross_sandwich(
self,
other: MatrixBase,
d: np.ndarray,
rows: np.ndarray,
L_cols: Optional[np.ndarray] = None,
R_cols: Optional[np.ndarray] = None,
):
"""Perform a sandwich product: X.T @ diag(d) @ Y."""
if isinstance(other, np.ndarray):
return self.sandwich_dense(other, d, rows, L_cols, R_cols)
from .categorical_matrix import CategoricalMatrix
if isinstance(other, CategoricalMatrix):
return other._cross_sandwich(self, d, rows, R_cols, L_cols).T
raise TypeError
def sandwich_dense(
self,
B: np.ndarray,
d: np.ndarray,
rows: np.ndarray,
L_cols: np.ndarray,
R_cols: np.ndarray,
) -> np.ndarray:
"""Perform a sandwich product: self.T @ diag(d) @ B."""
if not hasattr(d, "dtype"):
d = np.asarray(d)
if self.dtype != d.dtype or B.dtype != d.dtype:
raise TypeError(
f"""self, B and d all need to be of same dtype, either
np.float64 or np.float32. This matrix is of type {self.dtype},
B is of type {B.dtype}, while d is of type {d.dtype}."""
)
if np.issubdtype(d.dtype, np.signedinteger):
d = d.astype(float)
rows, L_cols = setup_restrictions(self.shape, rows, L_cols)
R_cols = set_up_rows_or_cols(R_cols, B.shape[1])
return csr_dense_sandwich(self.x_csr, B, d, rows, L_cols, R_cols)
def _matvec_helper(
self,
vec: Union[List, np.ndarray],
rows: Optional[np.ndarray],
cols: Optional[np.ndarray],
out: Optional[np.ndarray],
transpose: bool,
):
match_dim = 0 if transpose else 1
vec = np.asarray(vec)
if self.shape[match_dim] != vec.shape[0]:
raise ValueError(
f"shapes {self.shape} and {vec.shape} not aligned:"
f"{self.shape[match_dim]} (dim {match_dim}) != {vec.shape[0]} (dim 0)"
)
unrestricted_rows = rows is None or len(rows) == self.shape[0]
unrestricted_cols = cols is None or len(cols) == self.shape[1]
if unrestricted_rows and unrestricted_cols and vec.ndim == 1:
if transpose:
return csc_rmatvec_unrestricted(self, vec, out, self.indices)
else:
return csr_matvec_unrestricted(self.x_csr, vec, out, self.x_csr.indices)
matrix_matvec = lambda x, v: sps.csc_matrix.dot(x, v)
if transpose:
matrix_matvec = lambda x, v: sps.csr_matrix.dot(x.T, v)
rows, cols = setup_restrictions(self.shape, rows, cols, dtype=self.idx_dtype)
if transpose:
fast_fnc = lambda v: csc_rmatvec(self, v, rows, cols)
else:
fast_fnc = lambda v: csr_matvec(self.x_csr, v, rows, cols)
if vec.ndim == 1:
res = fast_fnc(vec)
elif vec.ndim == 2 and vec.shape[1] == 1:
res = fast_fnc(vec[:, 0])[:, None]
else:
res = matrix_matvec(
self[
|
np.ix_(rows, cols)
|
numpy.ix_
|
# ==============================================================================
# Copyright (c) 2022 The PersFormer Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import re
import os
import sys
import copy
import json
import glob
import random
import pickle
import warnings
from pathlib import Path
import numpy as np
from numpy import int32#, result_type
import cv2
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torchvision.transforms.functional as F
from torchvision.transforms import InterpolationMode
from utils.utils import *
from models.networks.libs.lane import Lane
from scipy.interpolate import UnivariateSpline
sys.path.append('./')
warnings.simplefilter('ignore', np.RankWarning)
matplotlib.use('Agg')
class LaneDataset(Dataset):
"""
Dataset with labeled lanes
This implementation considers:
w/o laneline 3D attributes
w/o centerline annotations
default considers 3D laneline, including centerlines
This new version of data loader prepare ground-truth anchor tensor in flat ground space.
It is assumed the dataset provides accurate visibility labels. Preparing ground-truth tensor depends on it.
"""
# dataset_base_dir is image path, json_file_path is json file path,
def __init__(self, dataset_base_dir, json_file_path, args, data_aug=False, save_std=False, seg_bev=False):
"""
:param dataset_info_file: json file list
"""
# define image pre-processor
self.totensor = transforms.ToTensor()
# expect same mean/std for all torchvision models
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
self.normalize = transforms.Normalize(args.vgg_mean, args.vgg_std)
self.data_aug = data_aug
self.seg_bev = seg_bev
self.dataset_base_dir = dataset_base_dir
self.json_file_path = json_file_path
# dataset parameters
self.dataset_name = args.dataset_name
self.no_3d = args.no_3d
self.no_centerline = args.no_centerline
self.num_category = args.num_category
self.h_org = args.org_h
self.w_org = args.org_w
self.h_crop = args.crop_y
# parameters related to service network
self.h_net = args.resize_h
self.w_net = args.resize_w
self.ipm_h = args.ipm_h
self.ipm_w = args.ipm_w
self.u_ratio = float(self.w_net) / float(self.w_org)
self.v_ratio = float(self.h_net) / float(self.h_org - self.h_crop)
self.top_view_region = args.top_view_region
# LaneATT params
self.max_lanes = args.max_lanes
self.S = args.S
self.n_strips = self.S - 1
self.n_offsets = self.S
self.strip_size = self.h_net / self.n_strips
self.offsets_ys = np.arange(self.h_net, -1, -self.strip_size)
self.K = args.K
self.H_crop = homography_crop_resize([args.org_h, args.org_w], args.crop_y, [args.resize_h, args.resize_w])
# transformation from ipm to ground region
self.H_ipm2g = cv2.getPerspectiveTransform(np.float32([[0, 0],
[self.ipm_w-1, 0],
[0, self.ipm_h-1],
[self.ipm_w-1, self.ipm_h-1]]),
np.float32(args.top_view_region))
self.H_g2ipm = np.linalg.inv(self.H_ipm2g)
# self.H_g2ipm = np.linalg.inv(H_ipm2g)
# segmentation setting
self.lane_width = args.lane_width
if args.fix_cam:
self.fix_cam = True
# compute the homography between image and IPM, and crop transformation
self.cam_height = args.cam_height
self.cam_pitch = np.pi / 180 * args.pitch
self.P_g2im = projection_g2im(self.cam_pitch, self.cam_height, args.K)
self.H_g2im = homograpthy_g2im(self.cam_pitch, self.cam_height, args.K)
self.H_im2g = np.linalg.inv(self.H_g2im)
self.H_im2ipm = np.linalg.inv(np.matmul(self.H_crop, np.matmul(self.H_g2im, self.H_ipm2g)))
else:
self.fix_cam = False
# compute anchor steps
self.use_default_anchor = args.use_default_anchor
self.new_match = args.new_match
if self.new_match:
self.match_dist_thre_3d = args.match_dist_thre_3d
if self.use_default_anchor:
x_min = self.top_view_region[0, 0]
x_max = self.top_view_region[1, 0]
self.x_min = x_min
self.x_max = x_max
self.anchor_x_steps = np.linspace(x_min, x_max, np.int(args.ipm_w/8), endpoint=True)
self.anchor_y_steps = args.anchor_y_steps
self.num_y_steps = len(self.anchor_y_steps)
self.anchor_num = np.int32(self.ipm_w / 8)
args.fmap_mapping_interp_index = None
args.fmap_mapping_interp_weight = None
else:
self.x_min, self.x_max = self.top_view_region[0, 0], self.top_view_region[1, 0]
self.y_min, self.y_max = self.top_view_region[2, 1], self.top_view_region[0, 1]
self.anchor_num_before_shear = self.ipm_w // 8
self.anchor_x_steps = np.linspace(self.x_min, self.x_max, self.anchor_num_before_shear, endpoint=True)
self.anchor_y_steps = args.anchor_y_steps
self.num_y_steps = len(self.anchor_y_steps)
# use only by draw ipm
# self.num_y_steps_bev = args.num_y_steps
# compute anchor grid with different far center points
# currently, anchor grid consists of [center, left-sheared, right-sheared] concatenated
self.anchor_num = self.anchor_num_before_shear * 7
self.anchor_grid_x = np.repeat(np.expand_dims(self.anchor_x_steps, axis=1), self.num_y_steps, axis=1) # center
anchor_grid_y = np.repeat(np.expand_dims(self.anchor_y_steps, axis=0), self.anchor_num_before_shear, axis=0)
x2y_ratio = self.x_min / (self.y_max - self.y_min) # x change per unit y change (for left-sheared anchors)
anchor_grid_x_left_10 = (anchor_grid_y - self.y_min) * x2y_ratio + self.anchor_grid_x
# right-sheared anchors are symmetrical to left-sheared ones
anchor_grid_x_right_10 = np.flip(-anchor_grid_x_left_10, axis=0)
x2y_ratio = (self.x_min - self.x_max) / (self.y_max - self.y_min) # x change per unit y change (for left-sheared anchors)
anchor_grid_x_left_20 = (anchor_grid_y - self.y_min) * x2y_ratio + self.anchor_grid_x
# right-sheared anchors are symmetrical to left-sheared ones
anchor_grid_x_right_20 = np.flip(-anchor_grid_x_left_20, axis=0)
x2y_ratio = 2.0 * (self.x_min - self.x_max) / (self.y_max - self.y_min) # x change per unit y change (for left-sheared anchors)
anchor_grid_x_left_40 = (anchor_grid_y - self.y_min) * x2y_ratio + self.anchor_grid_x
# right-sheared anchors are symmetrical to left-sheared ones
anchor_grid_x_right_40 = np.flip(-anchor_grid_x_left_40, axis=0)
# concat the three parts
self.anchor_grid_x = np.concatenate((self.anchor_grid_x,
anchor_grid_x_left_10, anchor_grid_x_right_10,
anchor_grid_x_left_20, anchor_grid_x_right_20,
anchor_grid_x_left_40, anchor_grid_x_right_40), axis=0)
args.anchor_grid_x = self.anchor_grid_x
# compute mapping and linear interpolation for sheared feature maps
fmap_height, fmap_width = args.ipm_h // 8, self.anchor_num_before_shear
fmap_u_steps = np.arange(fmap_width) + 0.5
fmap_grid_u = np.repeat(np.expand_dims(fmap_u_steps, axis=0), fmap_height, axis=0)
fmap_v_steps = np.arange(fmap_height)
fmap_grid_v = np.repeat(np.expand_dims(fmap_v_steps, axis=1), fmap_width, axis=1)
fmap_u2v_ratio = 0.5 * fmap_width / fmap_height # u change per unit v change, 8/26
fmap_mapping_left_10 = fmap_grid_u - fmap_u2v_ratio * (26 - fmap_grid_v) # float u index to access
fmap_mapping_right_10 = fmap_grid_u + fmap_u2v_ratio * (26 - fmap_grid_v)
fmap_u2v_ratio = fmap_width / fmap_height # u change per unit v change, 16/26
fmap_mapping_left_20 = fmap_grid_u - fmap_u2v_ratio * (26 - fmap_grid_v) # float u index to access
fmap_mapping_right_20 = fmap_grid_u + fmap_u2v_ratio * (26 - fmap_grid_v)
fmap_u2v_ratio = 2 * fmap_width / fmap_height # u change per unit v change, 32/26
fmap_mapping_left_40 = fmap_grid_u - fmap_u2v_ratio * (26 - fmap_grid_v) # float u index to access
fmap_mapping_right_40 = fmap_grid_u + fmap_u2v_ratio * (26 - fmap_grid_v)
fmap_mapping_left_10_interp_index = np.zeros((fmap_height, fmap_width, 2), dtype=int)
fmap_mapping_left_10_interp_weight = np.zeros((fmap_height, fmap_width, 2))
fmap_mapping_right_10_interp_index = np.zeros((fmap_height, fmap_width, 2), dtype=int)
fmap_mapping_right_10_interp_weight = np.zeros((fmap_height, fmap_width, 2))
fmap_mapping_left_20_interp_index = np.zeros((fmap_height, fmap_width, 2), dtype=int)
fmap_mapping_left_20_interp_weight = np.zeros((fmap_height, fmap_width, 2))
fmap_mapping_right_20_interp_index = np.zeros((fmap_height, fmap_width, 2), dtype=int)
fmap_mapping_right_20_interp_weight = np.zeros((fmap_height, fmap_width, 2))
fmap_mapping_left_40_interp_index = np.zeros((fmap_height, fmap_width, 2), dtype=int)
fmap_mapping_left_40_interp_weight = np.zeros((fmap_height, fmap_width, 2))
fmap_mapping_right_40_interp_index = np.zeros((fmap_height, fmap_width, 2), dtype=int)
fmap_mapping_right_40_interp_weight = np.zeros((fmap_height, fmap_width, 2))
for i in range(fmap_height):
for j in range(fmap_width):
if fmap_mapping_left_10[i, j] >= 0.5 and fmap_mapping_left_10[i, j] < fmap_width-0.5:
low_bound = np.floor(fmap_mapping_left_10[i, j] + 0.5) - 0.5
up_bound = low_bound + 1
fmap_mapping_left_10_interp_index[i, j, :] = np.array([low_bound-0.5, up_bound-0.5])
fmap_mapping_left_10_interp_weight[i, j, :] = np.array([up_bound-fmap_mapping_left_10[i, j], fmap_mapping_left_10[i, j]-low_bound])
if fmap_mapping_right_10[i, j] >= 0.5 and fmap_mapping_right_10[i, j] < fmap_width-0.5:
low_bound = np.floor(fmap_mapping_right_10[i, j] + 0.5) - 0.5
up_bound = low_bound + 1
fmap_mapping_right_10_interp_index[i, j, :] = np.array([low_bound-0.5, up_bound-0.5])
fmap_mapping_right_10_interp_weight[i, j, :] = np.array([up_bound-fmap_mapping_right_10[i, j], fmap_mapping_right_10[i, j]-low_bound])
if fmap_mapping_left_20[i, j] >= 0.5 and fmap_mapping_left_20[i, j] < fmap_width-0.5:
low_bound = np.floor(fmap_mapping_left_20[i, j] + 0.5) - 0.5
up_bound = low_bound + 1
fmap_mapping_left_20_interp_index[i, j, :] = np.array([low_bound-0.5, up_bound-0.5])
fmap_mapping_left_20_interp_weight[i, j, :] = np.array([up_bound-fmap_mapping_left_20[i, j], fmap_mapping_left_20[i, j]-low_bound])
if fmap_mapping_right_20[i, j] >= 0.5 and fmap_mapping_right_20[i, j] < fmap_width-0.5:
low_bound = np.floor(fmap_mapping_right_20[i, j] + 0.5) - 0.5
up_bound = low_bound + 1
fmap_mapping_right_20_interp_index[i, j, :] = np.array([low_bound-0.5, up_bound-0.5])
fmap_mapping_right_20_interp_weight[i, j, :] = np.array([up_bound-fmap_mapping_right_20[i, j], fmap_mapping_right_20[i, j]-low_bound])
if fmap_mapping_left_40[i, j] >= 0.5 and fmap_mapping_left_40[i, j] < fmap_width-0.5:
low_bound = np.floor(fmap_mapping_left_40[i, j] + 0.5) - 0.5
up_bound = low_bound + 1
fmap_mapping_left_40_interp_index[i, j, :] = np.array([low_bound-0.5, up_bound-0.5])
fmap_mapping_left_40_interp_weight[i, j, :] = np.array([up_bound-fmap_mapping_left_40[i, j], fmap_mapping_left_40[i, j]-low_bound])
if fmap_mapping_right_40[i, j] >= 0.5 and fmap_mapping_right_40[i, j] < fmap_width-0.5:
low_bound = np.floor(fmap_mapping_right_40[i, j] + 0.5) - 0.5
up_bound = low_bound + 1
fmap_mapping_right_40_interp_index[i, j, :] = np.array([low_bound-0.5, up_bound-0.5])
fmap_mapping_right_40_interp_weight[i, j, :] = np.array([up_bound-fmap_mapping_right_40[i, j], fmap_mapping_right_40[i, j]-low_bound])
args.fmap_mapping_interp_index = np.concatenate((fmap_mapping_left_10_interp_index, fmap_mapping_right_10_interp_index,
fmap_mapping_left_20_interp_index, fmap_mapping_right_20_interp_index,
fmap_mapping_left_40_interp_index, fmap_mapping_right_40_interp_index), axis=1)
args.fmap_mapping_interp_weight = np.concatenate((fmap_mapping_left_10_interp_weight, fmap_mapping_right_10_interp_weight,
fmap_mapping_left_20_interp_weight, fmap_mapping_right_20_interp_weight,
fmap_mapping_left_40_interp_weight, fmap_mapping_right_40_interp_weight), axis=1)
if self.no_centerline:
self.num_types = 1
else:
self.num_types = 3
if self.no_3d:
# self.anchor_dim = self.num_y_steps + 1
self.anchor_dim = self.num_y_steps + args.num_category
else:
if 'no_visibility' in args.mod:
self.anchor_dim = 2 * args.num_y_steps + args.num_category
else:
# self.anchor_dim = 3 * args.num_y_steps + 1
self.anchor_dim = 3 * self.num_y_steps + args.num_category
self.y_ref = args.y_ref
self.ref_id = np.argmin(np.abs(self.num_y_steps - self.y_ref))
self.save_json_path = args.save_json_path
# parse ground-truth file
if 'openlane' in self.dataset_name:
self._x_off_std, \
self._y_off_std, \
self._z_std, \
self._im_anchor_origins, \
self._im_anchor_angles = self.init_dataset_openlane_beta(dataset_base_dir, json_file_path)
args.im_anchor_origins = self._im_anchor_origins
args.im_anchor_angles = self._im_anchor_angles
else: # assume loading apollo sim 3D lane
self._label_image_path, \
self._label_laneline_all_org, \
self._label_laneline_all, \
self._label_centerline_all, \
self._label_cam_height_all, \
self._label_cam_pitch_all, \
self._laneline_ass_ids, \
self._centerline_ass_ids, \
self._x_off_std, \
self._y_off_std, \
self._z_std, \
self._gt_laneline_visibility_all, \
self._gt_centerline_visibility_all, \
self._gt_laneline_category_all_org, \
self._gt_laneline_category_all, \
self._gt_laneline_im_all, \
self._gt_centerline_im_all, \
self._im_anchor_origins, \
self._im_anchor_angles = self.init_dataset_3D(dataset_base_dir, json_file_path)
args.im_anchor_origins = self._im_anchor_origins
args.im_anchor_angles = self._im_anchor_angles
if hasattr(self, '_label_list'):
self.n_samples = len(self._label_list)
else:
self.n_samples = self._label_image_path.shape[0]
if save_std is True:
with open(ops.join(args.save_path, 'geo_anchor_std.json'), 'w') as jsonFile:
json_out = {}
json_out["x_off_std"] = self._x_off_std.tolist()
if not self.no_3d:
json_out["z_std"] = self._z_std.tolist()
json.dump(json_out, jsonFile)
jsonFile.write('\n')
# # normalize label values: manual execute in main function, in case overwriting stds is needed
# self.normalize_lane_label()
# memcache init
self.use_memcache = args.use_memcache
if self.use_memcache:
from petrel_client.client import Client
self._client = Client("~/petreloss.conf")
def preprocess_data_from_json_openlane(self, idx_json_file):
_label_image_path = None
_label_cam_height = None
_label_cam_pitch = None
cam_extrinsics = None
cam_intrinsics = None
_label_laneline = None
_label_laneline_org = None
_gt_laneline_visibility = None
_gt_laneline_category = None
_gt_laneline_category_org = None
_laneline_ass_id = None
with open(idx_json_file, 'r') as file:
file_lines = [line for line in file]
info_dict = json.loads(file_lines[0])
image_path = ops.join(self.dataset_base_dir, info_dict['file_path'])
assert ops.exists(image_path), '{:s} not exist'.format(image_path)
_label_image_path = image_path
if not self.fix_cam:
cam_extrinsics = np.array(info_dict['extrinsic'])
# Re-calculate extrinsic matrix based on ground coordinate
R_vg = np.array([[0, 1, 0],
[-1, 0, 0],
[0, 0, 1]], dtype=float)
R_gc = np.array([[1, 0, 0],
[0, 0, 1],
[0, -1, 0]], dtype=float)
cam_extrinsics[:3, :3] = np.matmul(np.matmul(
np.matmul(np.linalg.inv(R_vg), cam_extrinsics[:3, :3]),
R_vg), R_gc)
cam_extrinsics[0:2, 3] = 0.0
# gt_cam_height = info_dict['cam_height']
gt_cam_height = cam_extrinsics[2, 3]
if 'cam_pitch' in info_dict:
gt_cam_pitch = info_dict['cam_pitch']
else:
gt_cam_pitch = 0
if 'intrinsic' in info_dict:
cam_intrinsics = info_dict['intrinsic']
cam_intrinsics = np.array(cam_intrinsics)
else:
cam_intrinsics = self.K
_label_cam_height = gt_cam_height
_label_cam_pitch = gt_cam_pitch
gt_lanes_packed = info_dict['lane_lines']
gt_lane_pts, gt_lane_visibility, gt_laneline_category = [], [], []
for i, gt_lane_packed in enumerate(gt_lanes_packed):
# A GT lane can be either 2D or 3D
# if a GT lane is 3D, the height is intact from 3D GT, so keep it intact here too
lane = np.array(gt_lane_packed['xyz'])
lane_visibility = np.array(gt_lane_packed['visibility'])
# Coordinate convertion for openlane_300 data
lane = np.vstack((lane, np.ones((1, lane.shape[1]))))
cam_representation = np.linalg.inv(
np.array([[0, 0, 1, 0],
[-1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 0, 1]], dtype=float)) # transformation from apollo camera to openlane camera
lane = np.matmul(cam_extrinsics, np.matmul(cam_representation, lane))
lane = lane[0:3, :].T
gt_lane_pts.append(lane)
gt_lane_visibility.append(lane_visibility)
if 'category' in gt_lane_packed:
lane_cate = gt_lane_packed['category']
if lane_cate == 21: # merge left and right road edge into road edge
lane_cate = 20
gt_laneline_category.append(lane_cate)
else:
gt_laneline_category.append(1)
# _label_laneline_org = copy.deepcopy(gt_lane_pts)
_gt_laneline_category_org = copy.deepcopy(np.array(gt_laneline_category))
if not self.fix_cam:
cam_K = cam_intrinsics
if 'openlane' in self.dataset_name:
cam_E = cam_extrinsics
P_g2im = projection_g2im_extrinsic(cam_E, cam_K)
H_g2im = homograpthy_g2im_extrinsic(cam_E, cam_K)
else:
gt_cam_height = _label_cam_height
gt_cam_pitch = _label_cam_pitch
P_g2im = projection_g2im(gt_cam_pitch, gt_cam_height, cam_K)
H_g2im = homograpthy_g2im(gt_cam_pitch, gt_cam_height, cam_K)
H_im2g = np.linalg.inv(H_g2im)
else:
P_g2im = self.P_g2im
H_im2g = self.H_im2g
P_g2gflat = np.matmul(H_im2g, P_g2im)
gt_lanes = gt_lane_pts
gt_visibility = gt_lane_visibility
gt_category = gt_laneline_category
# prune gt lanes by visibility labels
gt_lanes = [prune_3d_lane_by_visibility(gt_lane, gt_visibility[k]) for k, gt_lane in enumerate(gt_lanes)]
_label_laneline_org = copy.deepcopy(gt_lanes)
# prune out-of-range points are necessary before transformation
gt_lanes = [prune_3d_lane_by_range(gt_lane, 3*self.x_min, 3*self.x_max) for gt_lane in gt_lanes]
gt_lanes = [lane for lane in gt_lanes if lane.shape[0] > 1]
# convert 3d lanes to flat ground space
self.convert_lanes_3d_to_gflat(gt_lanes, P_g2gflat)
gt_anchors = []
ass_ids = []
visibility_vectors = []
category_ids = []
if self.new_match:
frame_x_off_values, frame_z_values, frame_visibility_vectors = [], [], []
for i in range(len(gt_lanes)):
# convert gt label to anchor label
# consider individual out-of-range interpolation still visible
ass_id, x_off_values, z_values, visibility_vec = self.convert_label_to_anchor(gt_lanes[i], H_im2g)
if not self.new_match:
if ass_id >= 0:
gt_anchors.append(np.vstack([x_off_values, z_values]).T)
ass_ids.append(ass_id)
visibility_vectors.append(visibility_vec)
category_ids.append(gt_category[i])
else:
if len(x_off_values) > 0:
frame_x_off_values.append(x_off_values)
frame_z_values.append(z_values)
frame_visibility_vectors.append(visibility_vec)
if self.new_match:
frame_x_off_values, frame_z_values, frame_visibility_vectors = np.array(frame_x_off_values), np.array(frame_z_values), np.array(frame_visibility_vectors)
if frame_visibility_vectors.shape[0] > 0: # frame has lane
frame_visibility_vectors_expand = np.repeat(np.expand_dims(frame_visibility_vectors, axis=1), self.anchor_num, axis=1)
frame_x_off_norm = np.linalg.norm(np.multiply(frame_x_off_values, frame_visibility_vectors_expand), axis=2) / np.sum(frame_visibility_vectors_expand, axis=2)
for an_id in range(frame_x_off_values.shape[1]):
anchor_gts_x_off = frame_x_off_norm[:, an_id]
matched_gt_id = np.argmin(anchor_gts_x_off)
# # decide whether match, use different threshold for anchors of different shear angle
# if an_id < 3 * self.anchor_num_before_shear and anchor_gts_x_off[matched_gt_id] < self.match_dist_thre_3d:
# matched = True
# elif an_id >= 3 * self.anchor_num_before_shear and an_id < 5 * self.anchor_num_before_shear and anchor_gts_x_off[matched_gt_id] < 2 * self.match_dist_thre_3d:
# matched = True
# elif an_id >= 5 * self.anchor_num_before_shear and anchor_gts_x_off[matched_gt_id] < 4 * self.match_dist_thre_3d:
# matched = True
# else:
# matched = False
# if matched:
if anchor_gts_x_off[matched_gt_id] < self.match_dist_thre_3d:
gt_anchors.append(np.vstack([frame_x_off_values[matched_gt_id, an_id], frame_z_values[matched_gt_id]]).T)
ass_ids.append((an_id, matched_gt_id))
visibility_vectors.append(frame_visibility_vectors[matched_gt_id])
category_ids.append(gt_category[matched_gt_id])
_laneline_ass_id = ass_ids
_label_laneline = gt_anchors
_gt_laneline_visibility = visibility_vectors
_gt_laneline_category = category_ids
# normalize x anad z, in replacement of normalize_lane_label
for lane in _label_laneline:
lane[:, 0] = np.divide(lane[:, 0], self._x_off_std)
if not self.no_3d:
lane[:, 1] = np.divide(lane[:, 1], self._z_std)
return _label_image_path, _label_cam_height, _label_cam_pitch, cam_extrinsics, cam_intrinsics, \
_label_laneline, _label_laneline_org, _gt_laneline_visibility, _gt_laneline_category, \
_gt_laneline_category_org, _laneline_ass_id
def __len__(self):
"""
Conventional len method
"""
return self.n_samples
# new getitem, WIP
def WIP__getitem__(self, idx):
"""
Args: idx (int): Index in list to load image
"""
idx_json_file = self._label_list[idx]
# preprocess data from json file
_label_image_path, _label_cam_height, _label_cam_pitch, cam_extrinsics, cam_intrinsics, \
_label_laneline, _label_laneline_org, _gt_laneline_visibility, _gt_laneline_category, \
_gt_laneline_category_org, _laneline_ass_id = self.preprocess_data_from_json_openlane(idx_json_file)
with open(idx_json_file, 'r') as file:
file_lines = [line for line in file]
info_dict = json.loads(file_lines[0])
# fetch camera height and pitch
if not self.fix_cam:
gt_cam_height = _label_cam_height
gt_cam_pitch = _label_cam_pitch
if 'openlane' in self.dataset_name:
intrinsics = cam_intrinsics
extrinsics = cam_extrinsics
else:
# should not be used
intrinsics = self.K
extrinsics = np.zeros((3,4))
extrinsics[2,3] = gt_cam_height
else:
gt_cam_height = self.cam_height
gt_cam_pitch = self.cam_pitch
# should not be used
intrinsics = self.K
extrinsics = np.zeros((3,4))
extrinsics[2,3] = gt_cam_height
img_name = _label_image_path
if 'openlane' in self.dataset_name:
pattern = "/segment-(.*)_with_camera_labels"
seg_result = re.search(pattern=pattern, string=img_name)
# print(seg_result.group(1))
seg_name = seg_result.group(1)
else:
seg_name = str(idx)
if self.use_memcache:
# use memcache to accelerate
img_bytes = self._client.get(str(img_name))
assert(img_bytes is not None)
img_mem_view = memoryview(img_bytes)
img_array = np.frombuffer(img_mem_view, np.uint8)
image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = (Image.fromarray(image))
# print(type(image))
# if isinstance(image, Image.Image):
# print(str(image.size))
# else:
# print(len(image))
# assert(1==0)
else:
# original way
with open(img_name, 'rb') as f:
image = (Image.open(f).convert('RGB'))
# image preprocess with crop and resize
image = F.crop(image, self.h_crop, 0, self.h_org-self.h_crop, self.w_org)
image = F.resize(image, size=(self.h_net, self.w_net), interpolation=InterpolationMode.BILINEAR)
gt_anchor = np.zeros([self.anchor_num, self.num_types, self.anchor_dim], dtype=np.float32)
gt_anchor[:, :, self.anchor_dim - self.num_category] = 1.0
gt_lanes = _label_laneline
gt_vis_inds = _gt_laneline_visibility
# gt_laneline_img = self._gt_laneline_im_all[idx]
gt_category_2d = _gt_laneline_category_org
gt_category_3d = _gt_laneline_category
for i in range(len(gt_lanes)):
# if ass_id >= 0:
if not self.new_match:
ass_id = _laneline_ass_id[i]
else:
ass_id = _laneline_ass_id[i][0]
x_off_values = gt_lanes[i][:, 0]
z_values = gt_lanes[i][:, 1]
visibility = gt_vis_inds[i]
# assign anchor tensor values
gt_anchor[ass_id, 0, 0: self.num_y_steps] = x_off_values
if not self.no_3d:
gt_anchor[ass_id, 0, self.num_y_steps:2*self.num_y_steps] = z_values
gt_anchor[ass_id, 0, 2*self.num_y_steps:3*self.num_y_steps] = visibility
# gt_anchor[ass_id, 0, -1] = 1.0
if 'openlane' not in self.dataset_name:
gt_anchor[ass_id, 0, self.anchor_dim - self.num_category] = 0.0
gt_anchor[ass_id, 0, -1] = 1.0
else:
gt_anchor[ass_id, 0, self.anchor_dim - self.num_category] = 0.0
gt_anchor[ass_id, 0, self.anchor_dim - self.num_category + gt_category_3d[i]] = 1.0
if self.data_aug:
img_rot, aug_mat = data_aug_rotate(image)
image = Image.fromarray(img_rot)
image = self.totensor(image).float()
image = self.normalize(image)
gt_anchor = gt_anchor.reshape([self.anchor_num, -1])
gt_anchor = torch.from_numpy(gt_anchor)
gt_cam_height = torch.tensor(gt_cam_height, dtype=torch.float32)
gt_cam_pitch = torch.tensor(gt_cam_pitch, dtype=torch.float32)
gt_category_2d = torch.from_numpy(gt_category_2d)
gt_category_3d = torch.tensor(gt_category_3d, dtype=torch.int)
intrinsics = torch.from_numpy(intrinsics)
extrinsics = torch.from_numpy(extrinsics)
# prepare binary segmentation label map
seg_label = np.zeros((self.h_net, self.w_net), dtype=np.int8)
gt_lanes = _label_laneline_org
gt_laneline_img = [0] * len(gt_lanes)
for i, lane in enumerate(gt_lanes):
# project lane3d to image
if self.no_3d:
x_2d = lane[:, 0]
y_2d = lane[:, 1]
H_g2im, P_g2im, H_crop, H_im2ipm = self.transform_mats_impl(cam_extrinsics, \
cam_intrinsics, _label_cam_pitch, _label_cam_height)
M = H_crop
# update transformation with image augmentation
if self.data_aug:
M = np.matmul(aug_mat, M)
x_2d, y_2d = homographic_transformation(M, x_2d, y_2d)
gt_laneline_img[i] = np.array([x_2d, y_2d]).T.tolist()
else:
H_g2im, P_g2im, H_crop, H_im2ipm = self.transform_mats_impl(cam_extrinsics, \
cam_intrinsics, _label_cam_pitch, _label_cam_height)
M = np.matmul(H_crop, P_g2im)
# update transformation with image augmentation
if self.data_aug:
M = np.matmul(aug_mat, M)
x_2d, y_2d = projective_transformation(M, lane[:, 0],
lane[:, 1], lane[:, 2])
gt_laneline_img[i] = np.array([x_2d, y_2d]).T.tolist()
for j in range(len(x_2d) - 1):
seg_label = cv2.line(seg_label,
(int(x_2d[j]), int(y_2d[j])), (int(x_2d[j+1]), int(y_2d[j+1])),
color=np.asscalar(np.array([1])))
seg_label = torch.from_numpy(seg_label.astype(np.float32))
seg_label.unsqueeze_(0)
if len(gt_lanes) > self.max_lanes:
print(img_name + " has over 20 lanes")
gt_laneline_img = gt_laneline_img[:self.max_lanes]
gt_laneline_img = self.transform_annotation(gt_laneline_img, gt_category_2d, img_wh=(self.w_net, self.h_net))
gt_laneline_img = torch.from_numpy(gt_laneline_img.astype(np.float32))
# gt_centerline_img = self.transform_annotation(gt_centerline_img, img_wh=(self.w_net, self.h_net))
if self.seg_bev:
gt_anchor_bev = np.copy(gt_anchor)
unormalize_lane_anchor(gt_anchor_bev, self)
seg_bev_map = np.zeros((self.ipm_h, self.ipm_w), dtype=np.int8)
seg_bev_map = self.draw_on_ipm_seg_bev(seg_bev_map, gt_anchor_bev, width=self.lane_width)
seg_bev_map = torch.from_numpy(seg_bev_map.astype(np.float32))
seg_bev_map.unsqueeze_(0)
if self.seg_bev:
if self.data_aug:
aug_mat = torch.from_numpy(aug_mat.astype(np.float32))
# print(type(aug_mat)) <class 'numpy.ndarray'>
return idx_json_file, image, seg_label, gt_anchor, gt_laneline_img, idx, gt_cam_height, gt_cam_pitch, intrinsics, extrinsics, aug_mat, seg_name, seg_bev_map
return idx_json_file, image, seg_label, gt_anchor, gt_laneline_img, idx, gt_cam_height, gt_cam_pitch, intrinsics, extrinsics, seg_name, seg_bev_map
else:
if self.data_aug:
aug_mat = torch.from_numpy(aug_mat.astype(np.float32))
return idx_json_file, image, seg_label, gt_anchor, gt_laneline_img, idx, gt_cam_height, gt_cam_pitch, intrinsics, extrinsics, aug_mat, seg_name
return idx_json_file, image, seg_label, gt_anchor, gt_laneline_img, idx, gt_cam_height, gt_cam_pitch, intrinsics, extrinsics, seg_name
# old getitem, workable
def __getitem__(self, idx):
"""
Args: idx (int): Index in list to load image
"""
return self.WIP__getitem__(idx)
def init_dataset_3D(self, dataset_base_dir, json_file_path):
"""
:param dataset_info_file:
:return: image paths, labels in unormalized net input coordinates
data processing:
ground truth labels map are scaled wrt network input sizes
"""
# load image path, and lane pts
label_image_path = []
gt_laneline_pts_all = []
gt_centerline_pts_all = []
gt_laneline_visibility_all = []
gt_centerline_visibility_all = []
gt_laneline_category_all = []
gt_cam_height_all = []
gt_cam_pitch_all = []
assert ops.exists(json_file_path), '{:s} not exist'.format(json_file_path)
with open(json_file_path, 'r') as file:
for line in file:
info_dict = json.loads(line)
image_path = ops.join(dataset_base_dir, info_dict['raw_file'])
assert ops.exists(image_path), '{:s} not exist'.format(image_path)
label_image_path.append(image_path)
gt_lane_pts = info_dict['laneLines']
gt_lane_visibility = info_dict['laneLines_visibility']
for i, lane in enumerate(gt_lane_pts):
# A GT lane can be either 2D or 3D
# if a GT lane is 3D, the height is intact from 3D GT, so keep it intact here too
lane = np.array(lane)
gt_lane_pts[i] = lane
gt_lane_visibility[i] = np.array(gt_lane_visibility[i])
gt_laneline_pts_all.append(gt_lane_pts)
gt_laneline_visibility_all.append(gt_lane_visibility)
if 'category' in info_dict:
gt_laneline_category = info_dict['category']
gt_laneline_category_all.append(np.array(gt_laneline_category, dtype=np.int32))
else:
gt_laneline_category_all.append(np.ones(len(gt_lane_pts), dtype=np.int32))
if not self.no_centerline:
gt_lane_pts = info_dict['centerLines']
gt_lane_visibility = info_dict['centerLines_visibility']
for i, lane in enumerate(gt_lane_pts):
# A GT lane can be either 2D or 3D
# if a GT lane is 3D, the height is intact from 3D GT, so keep it intact here too
lane = np.array(lane)
gt_lane_pts[i] = lane
gt_lane_visibility[i] = np.array(gt_lane_visibility[i])
gt_centerline_pts_all.append(gt_lane_pts)
gt_centerline_visibility_all.append(gt_lane_visibility)
if not self.fix_cam:
gt_cam_height = info_dict['cam_height']
gt_cam_height_all.append(gt_cam_height)
gt_cam_pitch = info_dict['cam_pitch']
gt_cam_pitch_all.append(gt_cam_pitch)
label_image_path = np.array(label_image_path)
gt_cam_height_all = np.array(gt_cam_height_all)
gt_cam_pitch_all = np.array(gt_cam_pitch_all)
gt_laneline_pts_all_org = copy.deepcopy(gt_laneline_pts_all)
gt_laneline_category_all_org = copy.deepcopy(gt_laneline_category_all)
anchor_origins = None
anchor_angles = None
if not self.use_default_anchor:
# calculate 2D anchor location by projecting 3D anchor
# Non-perfect method: use fixed camera parameter to ensure fixed anchor on both 2D and 3D
mean_cam_height = np.mean(gt_cam_height_all)
mean_cam_pitch = np.mean(gt_cam_pitch_all)
print("mean_cam_height {}, mean_cam_pitch {}".format(mean_cam_height, mean_cam_pitch))
mean_H_g2im = homograpthy_g2im(mean_cam_pitch, mean_cam_height, self.K)
left_orig, right_orig, bottom_orig = [], [], []
left_angles, right_angles, bottom_angles = [], [], []
for aid, anchor_line in enumerate(self.anchor_grid_x):
end_points_x = np.array([anchor_line[0], anchor_line[-1]])
end_points_y = np.array([self.anchor_y_steps[0], self.anchor_y_steps[-1]])
end_points_u, end_points_v = homographic_transformation(mean_H_g2im, end_points_x, end_points_y)
u1, v1, u2, v2 = end_points_u[0], end_points_v[0], end_points_u[1], end_points_v[1]
angle_rad = np.arctan(((v1 - v2) * self.v_ratio) / ((u1 - u2) * self.u_ratio))
angle_rad = -angle_rad if angle_rad < 0 else np.pi - angle_rad
angle_deg = angle_rad * 180 / np.pi
u_bot = (u1 - u2) / (v1 - v2) * (self.h_org - v1) + u1
if u_bot < 0: # intersect on left edge
v_orig = (v1 - v2) / (u1 - u2) * (-u1) + v1
left_orig.append(v_orig / self.h_org)
left_angles.append(angle_deg)
elif u_bot > self.w_org: # intersect on right edge
v_orig = (v1 - v2) / (u1 - u2) * (self.w_org - u1) + v1
right_orig.append(v_orig / self.h_org)
right_angles.append(angle_deg)
else: # intersect on bottom edge
bottom_orig.append(u_bot / self.w_org)
bottom_angles.append(angle_deg)
anchor_origins = [np.array(left_orig), np.array(right_orig), np.array(bottom_orig)]
anchor_angles = [np.array(left_angles), np.array(right_angles), np.array(bottom_angles)]
# convert labeled laneline to anchor format
gt_laneline_ass_ids = []
gt_centerline_ass_ids = []
lane_x_off_all = []
lane_z_all = []
lane_y_off_all = [] # this is the offset of y when transformed back 3 3D
visibility_all_flat = []
gt_laneline_im_all = []
gt_centerline_im_all = []
for idx in range(len(gt_laneline_pts_all)):
# if idx == 936:
# print(label_image_path[idx])
# fetch camera height and pitch
gt_cam_height = gt_cam_height_all[idx]
gt_cam_pitch = gt_cam_pitch_all[idx]
if not self.fix_cam:
P_g2im = projection_g2im(gt_cam_pitch, gt_cam_height, self.K)
H_g2im = homograpthy_g2im(gt_cam_pitch, gt_cam_height, self.K)
H_im2g = np.linalg.inv(H_g2im)
else:
P_g2im = self.P_g2im
H_im2g = self.H_im2g
P_g2gflat =
|
np.matmul(H_im2g, P_g2im)
|
numpy.matmul
|
from pcpca import PCPCA, CPCA
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score, silhouette_score
from sklearn.decomposition import PCA
from scipy import stats
from tqdm import tqdm
from scipy.stats import t as studentT
DATA_PATH = "../../../data/mouse_protein_expression/clean/Data_Cortex_Nuclear.csv"
N_COMPONENTS = 2
def mean_confidence_interval(data, confidence=0.95):
n = data.shape[0]
m, se = np.mean(data, axis=0), stats.sem(data, axis=0)
width = se * stats.t.ppf((1 + confidence) / 2.0, n - 1)
return width
if __name__ == "__main__":
# Read in data
data = pd.read_csv(DATA_PATH)
data = data.fillna(0)
# Get names of proteins
protein_names = data.columns.values[1:78]
# Background
Y_df = data[
(data.Behavior == "C/S")
& (data.Genotype == "Control")
& (data.Treatment == "Saline")
]
Y = Y_df[protein_names].values
Y -= Y.mean(0)
Y /= Y.std(0)
Y = Y.T
# Foreground
X_df = data[(data.Behavior == "S/C") & (data.Treatment == "Saline")]
X_df = pd.concat([X_df.iloc[:177, :], X_df.iloc[180:, :]], axis=0)
X = X_df[protein_names].values
X -= X.mean(0)
X /= X.std(0)
X = X.T
p = X.shape[0]
n, m = X.shape[1], Y.shape[1]
import matplotlib
font = {"size": 20}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
gamma_range_pcpca = list(np.linspace(0, 0.99, 5))
gamma_range_cpca = np.linspace(0, 10, 5)
sigma2_range = np.arange(0, 5.5, 0.5)
n_repeats = 50
plt.figure(figsize=(14, 5))
for plot_ii, noise_type in enumerate(["N", "t"]):
best_gammas_cpca = []
best_gammas_pcpca = []
best_cluster_scores_cpca = np.empty((n_repeats, len(sigma2_range)))
best_cluster_scores_pcpca = np.empty((n_repeats, len(sigma2_range)))
with tqdm(total=n_repeats * len(sigma2_range)) as pbar:
for repeat_ii in range(n_repeats):
for sigma2_ii, sigma2 in enumerate(sigma2_range):
# print("Sigma2 = {}".format(sigma2))
# Add noise
if noise_type == "N":
noise_X = np.random.normal(loc=0, scale=
|
np.sqrt(sigma2)
|
numpy.sqrt
|
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2018 <NAME> and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
import h5py
import math
import numpy as np
import platform
import random
from datetime import datetime as dt
from warnings import warn
# noinspection PyPep8Naming
class FauxSixK(h5py.Group):
"""
Creates a Faux '6K Compumotor' Group in a HDF5 file.
"""
_MAX_CONFIGS = 4
# noinspection PyProtectedMember
class _knobs(object):
"""
A class that contains all the controls for specifying the
digitizer group structure.
"""
def __init__(self, val):
super().__init__()
self._faux = val
@property
def n_configs(self):
"""Number of 6K configurations"""
return self._faux._n_configs
@n_configs.setter
def n_configs(self, val: int):
"""Set number of 6K configurations"""
if 1 <= val <= self._faux._MAX_CONFIGS and isinstance(val, int):
if val != self._faux._n_configs:
self._faux._n_configs = val
self._faux._n_probes = self._faux._n_configs
if val > 1:
self._faux._n_motionlists = 1
self._faux._update()
else:
warn("`val` not valid, no update performed")
@property
def n_motionlists(self):
"""
Number of motion lists used. Will always be one unless
:code:`n_configs == 1` and then :code:`n_motionlists >= 1`
"""
return self._faux._n_motionlists
@n_motionlists.setter
def n_motionlists(self, val):
"""Setter for n_motionlists"""
if val >= 1 and isinstance(val, int):
if val != self._faux._n_motionlists and self._faux._n_configs == 1:
self._faux._n_motionlists = val
self._faux._update()
else:
warn("`val` not valid, no update performed")
@property
def sn_size(self):
"""Number of shot numbers in dataset"""
return self._faux._sn_size
@sn_size.setter
def sn_size(self, val):
"""Set the number of shot numbers in the dataset"""
if val >= 1 and isinstance(val, int):
if val != self._faux._sn_size:
self._faux._sn_size = val
self._faux._update()
else:
warn("`val` not valid, no update performed")
def reset(self):
"""Reset '6K Compumotor' group to defaults."""
self._faux._n_configs = 1
self._faux._n_probes = 1
self._faux._n_motionlists = 1
self._faux._sn_size = 100
self._faux._update()
def __init__(self, id, n_configs=1, n_motionlists=1, sn_size=100, **kwargs):
# ensure id is for a HDF5 group
if not isinstance(id, h5py.h5g.GroupID):
raise ValueError(f"{id} is not a GroupID")
# create control group
gid = h5py.h5g.create(id, b"6K Compumotor")
h5py.Group.__init__(self, gid)
# store number on configurations
self._n_configs = n_configs
self._n_probes = n_configs
self._n_motionlists = n_motionlists if n_configs == 1 else 1
# define number of shot numbers
self._sn_size = sn_size
# set root attributes
self._set_6K_attrs()
# build control device sub-groups, datasets, and attributes
self._update()
@property
def knobs(self):
"""Knobs for controlling structure of control device group"""
return self._knobs(self)
@property
def n_probes(self):
"""Number of probes drives used"""
return self._n_probes
@property
def config_names(self):
"""list of configuration names"""
return tuple(self._configs)
def _update(self):
"""
Updates control group structure (Groups, Datasets, and
Attributes)
"""
# clear group before rebuild
self.clear()
# re-initialize key lists
# self._config_names = []
self._probe_names = []
self._motionlist_names = []
# re-initialize key dicts
self._configs = {}
# add sub-groups
self._add_probe_groups()
self._add_motionlist_groups()
# add datasets
self._add_datasets()
def _set_6K_attrs(self):
"""Sets the '6K Compumotor' group attributes"""
self.attrs.update(
{
"Created date": np.bytes_("5/21/2004 4:09:05 PM"),
"Description": np.bytes_(
"Controls XY probe drives using the 6K "
"Compumotor motor controller."
),
"Device name": np.bytes_("6K Compumotor"),
"Module IP address": np.bytes_("192.168.7.6"),
"Module VI path": np.bytes_(
"C:\\ACQ II home\\Modules\\XY probe drive\\XY probe drive.vi"
),
"Type": np.bytes_("Motion"),
}
)
def _add_probe_groups(self):
"""Adds all probe groups"""
# - define probe names
# - define receptacle number
# - define configuration name
# - create probe groups and sub-groups
# - define probe group attributes
for i in range(self._n_configs):
# define probe name
pname = f"probe{i+1:02}"
self._probe_names.append(pname)
# define receptacle number
if self._n_configs == 1:
receptacle = random.randint(1, self._MAX_CONFIGS)
else:
receptacle = i + 1
# create probe group
probe_gname = f"Probe: XY[{receptacle}]: {pname}"
self.create_group(probe_gname)
self.create_group(f"{probe_gname}/Axes[0]")
self.create_group(f"{probe_gname}/Axes[1]")
# set probe group attributes
self[probe_gname].attrs.update(
{
"Calibration": np.bytes_("2004-06-04 0.375 inch calibration"),
"Level sy (cm)": np.float64(70.46),
"Port": np.uint8(27),
"Probe": np.bytes_(pname),
"Probe channels": np.bytes_(""),
"Probe type": np.bytes_("LaPD probe"),
"Receptacle":
|
np.int8(receptacle)
|
numpy.int8
|
# ******************************************************************************
# Name: Caculate all BC4-based Adinkras
# Author: <NAME>
# Email: <EMAIL>
# Date: December 2016
# Version: 1.3A
#
# Description: The code calculates all all unique 36,864 ordered BC4-based
# adinkras with four colors, four open-nodes and four closed nodes and then
# calculates the Vij holoraumy matrices and the Gadget values
#
# ******************************************************************************
# ******************************************************************************
# Library Imports
import sys, math, time
import numpy as np
import numpy.matlib
import itertools
from numpy import array
from numpy.linalg import inv
# ******************************************************************************
# Function Imports
import vij_holoraumy_calc
# ******************************************************************************
# Main() function.
def main():
print("# ***********************************************************************")
print("# Name: Caculate all BC4-based Adinkras")
print("# Author: <NAME> ")
print("# Email: <EMAIL>")
print("# Date: December 2016 ")
print("# Version: 1.#A ")
print("# ")
print("# Description: Calculates all unique 36,864 ordered BC4-based adinkras")
print("# with four colors, four open-nodes and four closed nodes. ")
print("# ")
print("# ***********************************************************************")
print(" ")
calc_all_adinkras(4)
# ******************************************************************************
# Calculate all possible Adinkra (L matrix tetrads) in BC4 space
def calc_all_adinkras(n):
""" Main tetrad list """
numonly_tetrads = []
tetrad_reftup = []
res = []
calc_check = []
pcal_check = []
invs_check = []
trnp_check = []
sign_pmat = gen_signm(n)
uperm_mat = gen_dpermut_mat(n)
# duplik_one = []
# duplik_extras = []
for x in sign_pmat:
t1 = np.asmatrix(x)
for y in uperm_mat:
t2 = np.asmatrix(y)
dprod = np.dot(t1,t2)
dprod2 = np.dot(t1,t2)
if np.array_equal(dprod, dprod2):
res.append(dprod)
# if duplik_one:
# if [mx for mx in duplik_one if np.array_equal(dprod, mx)]:
# duplik_extras.append(dprod)
# else:
# duplik_one.append(dprod)
# else:
# duplik_one.append(dprod)
sigflip = np.multiply(dprod, -1)
trnpmat = np.transpose(dprod)
if calc_check:
if [mt for mt in calc_check if np.array_equal(mt, sigflip)]:
pcal_check.append(dprod)
else:
calc_check.append(dprod)
else:
calc_check.append(dprod)
self_inverse = []
for i, im in enumerate(res):
invsmat = inv(im)
trnpmat = np.transpose(im)
# print(im)
for j, jm in enumerate(res):
if i != j:
temp = [i, j]
if np.array_equal(invsmat, jm):
# print("INVERSE")
# print("i, j:", i, j)
# temp = [i, j]
temp.sort()
if temp not in invs_check:
invs_check.append(temp)
else:
pass
if np.array_equal(trnpmat, jm):
# temp = [i, j]
temp.sort()
if temp not in trnp_check:
trnp_check.append(temp)
else:
pass
print("Finishing building 384 matrices")
# print("Checking for duplicates in 384")
# print("Number of unique matrices:", len(duplik_one))
# print("Number of duplicate matrices:", len(duplik_extras))
print("Sign flips removed")
print("Number of sign-flipped matrices:", len(calc_check))
print("Inverse check")
print("Number of inverse duplicate matrices:", len(invs_check))
print("Transpose check")
print("Number of Transpose duplicate matrices:", len(trnp_check))
test_list = []
for xmat in invs_check:
if [mat for mat in trnp_check if np.array_equal(mat, xmat)]:
test_list.append(xmat)
print("Checking if Inverse and Transpose sets are the same")
print("Number of Inverse - Transpose matches:", len(test_list))
""" Creating the 2 * I, 4x4 identity matrix """
idt_mat = 2 * np.matlib.identity(n, dtype=int)
""" Start finding Li - Lj pairs that satisfy 2.4b, 2.4c and 2.4d """
print("")
print("---Finding Matrix Pairs---\n")
for i, li in enumerate(res):
tetrad_reftup.append((i, li))
# Hold the Li and Lj matching matrices tuples
# temp_tup = []
# Holds only the Lj matrices that match Li
temp_m = []
""" Testing temps """
# Temporary lists for error checking/debugging
# temp_tst = []
print("Finding pairs for Li: ", i)
for j, lj in enumerate(res):
# sigflip_lj = np.multiply(lj, -1)
ri = np.transpose(li)
rj = np.transpose(lj)
if i == j:
if np.array_equal(ri,rj):
# tmat = np.dot(li,ri) + np.dot(li,ri)
tmat = 2 * np.dot(li,ri)
rtmat = 2 * np.dot(ri, li)
if np.array_equal(tmat, idt_mat) and np.array_equal(rtmat, idt_mat):
# print("EQ satisfied\n", tmat, idt_mat)
# print("EQ 2.4a satisfied for I = J ",i,j)
pass
else:
# pass
print("EQ 2.4a failed", i, j)
sys.exit("FAILURE")
elif i != j:
tmat = np.dot(li,rj) + np.dot(lj,ri)
rtmat = np.dot(ri,lj) + np.dot(rj,li)
if np.count_nonzero(rtmat) == 0 and np.count_nonzero(tmat) == 0:
# if np.count_nonzero(rtmat) == 0:
if np.array_equal(ri, inv(li)) and np.array_equal(rj, inv(lj)):
# print("EQ 2.4d satisfied", i, j)
# packing away all the 12 matching matrices
temp_m.append([j,lj])
# print("Matching I J pair: ",i,j)
# temp_tup.append((li,lj))
else:
pass
# print("EQ 2.4d failed", i ,j)
# else:
# """ Testing purposes """
# pass
# temp_tst.append([j,lj])
# print("EQ 2.4b, 2.4c failed", i, j)
# """ Check whether the jth, lj matrix is a sign flip of li """
# if np.array_equal(li,sigflip_lj):
# temp_l = [(i,li),(j,lj)]
# if sorted(temp_l,key=lambda item: item[0]) not in negs_filter:
# negs_filter.append(temp_l)
temp_i_tetrad = build_four_pillars([i, li], temp_m)
new_tets = [x for x in temp_i_tetrad if x not in numonly_tetrads]
numonly_tetrads.extend(new_tets)
# print("Break point for new i:", i)
print("Number of pair matrices:",len(temp_m))
print("Length of numonly_tetrads list:", len(numonly_tetrads))
# print("Number of sign-reduced matrices:", len(temp_tst))
print(" <<>> \n")
temp_m = []
# temp_tst = []
main_tetrad = []
# A tetrad is a collection of 4 matrices that all satify the pair conditions.
print("# ********************************")
print(" ")
print("Building tetrad matrix list")
print(" ")
print("Printing tetrads")
for tets in numonly_tetrads:
new_tet = []
for mat_num in tets:
if tetrad_reftup[mat_num][0] == mat_num:
temp_tup = tetrad_reftup[mat_num]
new_tet.append(temp_tup)
# print(new_tet)
main_tetrad.append(new_tet)
# mtetrad_size = sys.getsizeof(main_tetrad)
# print("Size of main_tetrads list: bytes / kilobytes:", mtetrad_size, mtetrad_size/1024)
print("Total number of unique tetrad permutations:", len(main_tetrad))
# pie_slicing(main_tetrad)
""" vij_holoraumy_calc proceeds to calculate all the corresponding Vij
matrices for 36864 unique Adinkra tetrads """
vij_holoraumy_calc.calculate_vij_matrices(main_tetrad)
# ******************************************************************************
# Calculate all matches per a 12 match group.
def build_four_pillars(zeropt_mat, doz_mats):
idt_mat = 2 * np.matlib.identity(4, dtype=int)
pt0_mat = zeropt_mat[1]
pt0_mnum = zeropt_mat[0]
tetnum_list = []
""" ark_12of4_pairs is a dict that stores
the index number of 12 matrices that match with the 1st one
as keys. For each 12 matrix index numbers it then saves
the index numbers of 4 other matrices out of the 12 that pairs
match satisfying equation
"""
ark_12of4_pairs = {}
for i, li in enumerate(doz_mats):
num_li = li[0]
matli = li[1]
if (num_li) not in ark_12of4_pairs:
ark_12of4_pairs['%i' % num_li] = []
# print("Finding pairs within the 12 matrices, i = ",num_li)
for j, lj in enumerate(doz_mats):
matlj = lj[1]
num_lj = lj[0]
ri = np.transpose(matli)
rj = np.transpose(matlj)
if i == j:
continue
# if np.array_equal(ri,rj):
# tmat = np.matmul(matli,ri) + np.matmul(matli,ri)
# if np.array_equal(tmat, idt_mat):
# # print("EQ 2.4a satisfied for I = J ",num_li, num_lj)
# pass
# else:
# print("FAILURE EQ 2.4a not satisfied\n", tmat, num_lj)
elif i != j:
tmat = np.dot(matli,rj) + np.dot(matlj,ri)
rtmat = np.dot(ri,matlj) + np.dot(rj,matli)
if np.count_nonzero(rtmat) == 0:
if np.array_equal(ri, inv(matli)):
# if np.array_equal(ri, inv(matli)) and np.array_equal(rj, inv(matlj)):
ark_12of4_pairs[str(num_li)].append(num_lj)
elif np.count_nonzero(rtmat) != 0:
pass
# print("Not matching I J pair: ",num_li,num_lj)
""" Build all the possible tetrad combinations """
for key, four_pairs in ark_12of4_pairs.items():
""" Hold 3 matrix numbers """
temp_tri = []
# print("Building tetrad combinations")
# print(pt0_mnum, key, four_pairs)
# Temp list for storing local tetrads for each of ix pairs.
local_tetrad = []
for oneof4 in four_pairs:
temp_tri.append((pt0_mnum))
temp_tri.append(int(key))
# temp_tri.append((int(key),doz_mats[int(key)-1][0]))
i_4temp = ark_12of4_pairs[str(oneof4)]
# print("Matching pairs for i:", oneof4, i_4temp)
s = set(four_pairs)
temp_tri.append(oneof4)
# Ixpairs is a list of matrices from the 12 matrices
# that pair with each other and with any 1 of the 12
# This is necessary to construct a tetrad, the 1st member
# of the tetrad is always going to be the 0 matrix, and then
# the other 3 matrices come from the list of 12, provided that
# all 3 pair with each other as well.
ixpairs = [m for m in i_4temp if m in s]
# print("Pair matches for", key, "and ", oneof4, ixpairs)
# print("Matches for:",oneof4,"of the 4")
# print(key, oneof4, ixpairs)
lt = []
for m in ixpairs:
lt.extend(temp_tri)
lt.append(m)
lt.sort()
lt_perm_list = []
if lt not in local_tetrad:
local_tetrad.append(lt)
lt_perm_list = list(itertools.permutations([lt[0],lt[1],lt[2],lt[3]],4))
# print("Length of permutations for ",m," = ", len(lt_perm_list))
# if lt not in tetnum_list:
# tetnum_list.append(lt)
for ltx in lt_perm_list:
if ltx not in tetnum_list:
tetnum_list.append(ltx)
else:
pass
lt = []
""" Wipe the temp_tri for next matrices in the four_pairs """
temp_tri = []
# print("Number of unique tetrads:", len(tetnum_list))
# print(len(tetnum_list))
return tetnum_list
# ******************************************************************************
# Find all patterns within the list of tetrads.
def pie_slicing(big_list_oftetrads):
self_kein = []
kein_flip = []
for ind, itet in enumerate(big_list_oftetrads):
# ivt = [n for n ]
ivt = [np.transpose(xm[1]) for xm in itet]
# for i in range(0, len(itet)):
# if np.array_equal(ivt[i], )
for jnd, jtet in enumerate(big_list_oftetrads):
if ind != jnd:
if np.array_equal(ivt[0], jtet[0][1]) and np.array_equal(ivt[1], jtet[1][1]):
if np.array_equal(ivt[2], jtet[2][1]) and
|
np.array_equal(ivt[3], jtet[3][1])
|
numpy.array_equal
|
from nose.tools import *
import analysis.core as core
import analysis.fom as fom
import numpy as np
import os
class TestClass:
@classmethod
def setup_class(cls):
cls.base_dir = './tests/fom_data/'
cls.test_analyzer = fom.Analyzer(cls.base_dir)
cls.cycles = [10, 20, 30]
cls.cpu = np.array([10.5, 20.5, 30.5])
cls.error1 = np.array([0.00030, 0.00020, 0.00010])
cls.error2 = np.array([0.00032, 0.00022, 0.00012])
cls.materror11 = np.array([ 0.00030, 0.00020, 0.00010])
cls.materror12 = np.array([ 0.00032, 0.00022, 0.00012])
cls.materror21 = np.array([ 0.00034, 0.00024, 0.00014])
cls.materror22 = np.array([ 0.00036, 0.00026, 0.00016])
def test_fom_file_upload(self):
""" FOM Analyzer should upload all the correct files in a directory """
files = self.test_analyzer.get_filenames()
file_loc = os.path.abspath(self.base_dir) + '/'
eq_(len(files), 3)
ok_(all([file_loc + e in files for e in ['res_10.m', 'res_20.m', 'res_30.m']]))
@raises(AssertionError)
def test_fom_bad_location(self):
""" FOM Analyzer should return an error if a non-existent folder
is given """
base_dir = './wrong_file_name/'
new_analyzer = fom.Analyzer(base_dir)
def test_fom_good_locations(self):
""" FOM Analyzer should work for locations without slash"""
base_dir = './tests/fom_data'
new_analyzer = fom.Analyzer(base_dir)
def test_fom_fom_values(self):
ans = np.power(self.cpu * np.power(self.error1, 2), -1)
func = self.test_analyzer.get_data('TEST_VAL', 1)
ok_(all([e in func[:,1] for e in ans]))
ok_(all([c in func[:,0] for c in self.cycles]))
def test_fom_fom_values_grp(self):
ans = np.power(self.cpu * np.power(self.error2, 2), -1)
func = self.test_analyzer.get_data('TEST_VAL', 2)
ok_(all([e in func[:,1] for e in ans]))
ok_(all([c in func[:,0] for c in self.cycles]))
def test_fom_fom_values_cpu(self):
ans = np.power(self.cpu * np.power(self.error1, 2), -1)
func = self.test_analyzer.get_data('TEST_VAL', 1, cycle = False)
ok_(all([e in func[:,1] for e in ans]))
ok_(all([c in func[:,0] for c in self.cpu]))
def test_fom_fom_length(self):
eq_(np.shape(self.test_analyzer.get_data('TEST_VAL', 1)), (3,2))
def test_fom_err_values(self):
func = self.test_analyzer.get_data('TEST_VAL', 1, fom = False)
ok_(all([e in func[:,1] for e in self.error1]))
ok_(all([c in func[:,0] for c in self.cycles]))
def test_fom_err_size(self):
eq_(np.shape(self.test_analyzer.get_data('TEST_VAL', 1, fom = False)), (3,2))
def test_fom_fom_multigroup(self):
""" FOM data should return the correct values for multiple groups """
ans1 = np.power(self.cpu * np.power(self.error1, 2), -1)
ans2 = np.power(self.cpu * np.power(self.error2, 2), -1)
func = self.test_analyzer.get_data('TEST_VAL', [1,2], fom = True)
eq_(np.shape(func), (3,3))
ok_(all([c in func[:,0] for c in self.cycles]))
ok_(all([e in func[:,1] for e in ans1]))
ok_(all([e in func[:,2] for e in ans2]))
def test_fom_err_mat(self):
""" FOM data should return the correct error values"""
func = self.test_analyzer.get_data('TEST_MAT', [(1,1),(1,2),(2,1),(2,2)], fom = False)
eq_(np.shape(func), (3,5))
ok_(all([c in func[:,0] for c in self.cycles]))
ok_(all([e in func[:,1] for e in self.materror11]))
ok_(all([e in func[:,2] for e in self.materror12]))
ok_(all([e in func[:,3] for e in self.materror21]))
ok_(all([e in func[:,4] for e in self.materror22]))
def test_fom_err_mat_entry(self):
""" FOM data should work for single entries """
func = self.test_analyzer.get_data('TEST_MAT', (1,1), fom = False)
eq_(np.shape(func), (3,2))
ok_(all([c in func[:,0] for c in self.cycles]))
ok_(all([e in func[:,1] for e in self.materror11]))
def test_fom_err_mat_shape(self):
""" FOM data should return the correct shape of err"""
func = self.test_analyzer.get_data('TEST_MAT', [(1,1),(1,2),(2,1),(2,2)], fom = False)
eq_(np.shape(func), (3,5))
@raises(AssertionError)
def test_fom_err_mat_nonmatrix(self):
""" FOM data should throw an assertion error if entries of a non-matrix
quantity is requested """
func = self.test_analyzer.get_data('TEST_VAL', [(1,1),(1,2),(2,1),(2,2)], fom = False)
@raises(AssertionError)
def test_fom_err_mat_invalid_entries(self):
""" FOM data should throw an assertion error if invalid entries
are passed """
func = self.test_analyzer.get_data('TEST_MAT', [(3,1), (1,1),
(5,2)], fom = False)
def test_fom_fom_mat(self):
""" FOM data should return the correct FOM values"""
func = self.test_analyzer.get_data('TEST_MAT', [(1,1),(1,2),(2,1),(2,2)], fom = True)
ans11 = np.power(self.cpu * np.power(self.materror11, 2), -1)
ans21 = np.power(self.cpu * np.power(self.materror21, 2), -1)
ans12 = np.power(self.cpu *
|
np.power(self.materror12, 2)
|
numpy.power
|
import math
import random
from dataclasses import dataclass, field
from functools import lru_cache
import numpy as np
import utils
from .policy import random_policy
@dataclass
class MCNode:
state: tuple
parent: 'MCNode' = None
successors: dict = field(default_factory=dict)
Q: float = 0 # Expected reward
E: float = 0 # Total reward (evaluation)
N: int = 0 # Num. visits
player: int = 0
#@lru_cache(maxsize=None) # Meomizes node generation by state
@staticmethod
def from_state(state):
return MCNode(state=state, player=state[0])
def uct(self, c, is_max=False): # TODO: Maybe try math.ln(...)
"""Returns the Upper Confidence Bound for Trees (UCT) metric."""
if is_max:
return self.Q + c * (math.log(self.parent.N) / (1 + self.N))**0.5
else:
return self.Q - c * (math.log(self.parent.N) / (1 + self.N))**0.5
def __str__(self):
if self.N == 0:
return ""
board = self.state[1:]
sides = int(len(board)**0.5)
board =
|
np.array(board, dtype=int)
|
numpy.array
|
from modules.CycleGan import CycleGan
from utils.DataLoader import DataLoader
import numpy as np
import cv2
size=256
imageShape=(size,size,3)
batchSize=1
dl=DataLoader(path="dataset",batchSize=batchSize,imageSize=size*2)
cgan=CycleGan(imageShape[0], imageShape[1], imageShape[2],batchSize=batchSize)
modelSavePath='cgan_saved-163 '+str(size)
cgan.loadModel(modelSavePath)
dataset = dl.getGenerater()
i=0
data=[]
for d in dataset:
i+=1
if i==105:
data=d
break
datasetX =
|
np.array(data[0])
|
numpy.array
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import methfun as mf
import methdata as md
from scipy.interpolate import UnivariateSpline
# to register datetimes in matplotlib
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
mf.matplotlib_update_settings() # make prettier plots
# to avoid displaying plots: change backend
import matplotlib
# matplotlib.use('Agg')
matplotlib.use('Qt5Agg')
# os.environ['PATH'] = os.environ['PATH'] + ':/'
# length of time series
N = 2**14
tdf0 = pd.read_csv(os.path.join(md.outdir_data, 'results_tdf.csv'),index_col=0)
sdf0 = pd.read_csv(os.path.join(md.outdir_data, 'scalars_sdf.csv'),
header=[0,1], index_col = 0)
# fwat = sdf0['flux_ec', 'H2O'].values
# q005fwat = np.quantile(fwat, 0.05)
# q01fwat = np.quantile(fwat, 0.25)
# q05fwat = np.quantile(fwat, 0.5)
# q095fwat = np.quantile(fwat,0.95)
##################################### CONDITION FOR RETAINING RUNS:: ###########
cond1 = (tdf0['ustar'] > 0.2) \
& (tdf0['is_stationary'] == True) \
& (tdf0['exists'] == True) \
& (tdf0['windir'] > 230) \
& (tdf0['windir'] < 270) \
& (tdf0['length'] >= N)\
& (tdf0['h2o_is_local'] == True) \
& (sdf0['flux_ec', 'H2O'] > 0)
###################################### FOR PLOTTING ONLY:: #####################
cond_noz0 = (tdf0['ustar'] > 0.2) \
& (tdf0['is_stationary'] == True) \
& (tdf0['exists'] == True) \
& (tdf0['length'] >= N) \
& (tdf0['h2o_is_local'] == True) \
& (sdf0['flux_ec', 'H2O'] > 0)
############### OTHER CONDITIONS, JUST TO COUNT HOW MANY RUNS ARE EXCLUDED:: ###
cond_turb_intensity = (tdf0['ustar'] > 0.2)
cond_wind_sector = (tdf0['windir'] > 230) & (tdf0['windir'] < 270)
cond_stationary = (tdf0['is_stationary'] == True)
cond_water_vapor = (tdf0['h2o_is_local'] == True) & (sdf0['flux_ec', 'H2O'] > 0)
nruns_turb_int = np.size(cond_turb_intensity[cond_turb_intensity > 0])
nruns_wind_sector = np.size(cond_wind_sector[cond_wind_sector > 0])
nruns_stationary = np.size(cond_stationary[cond_stationary > 0])
nruns_water_vapor = np.size(cond_water_vapor[cond_water_vapor > 0])
# only to plot all wind rirections::
tdfnoz0 = tdf0[cond_noz0].copy()
sdfnoz0 = sdf0[cond_noz0].copy()
# for the rest of the analysis::
tdf = tdf0[cond1].copy()
sdf = sdf0[cond1].copy()
rdf = pd.read_csv( os.path.join(md.outdir_data, 'results_rdf.csv'), index_col=0)
# rdf = pd.read_csv( os.path.join(md.outdir_data, 'results_rdf_0.5.csv'), index_col=0)
# rdf = pd.read_csv( os.path.join(md.outdir_data, 'results_rdf_0.3.csv'), index_col=0)
nruns = np.shape(rdf)[0]
rdf['datetime'] = pd.to_datetime(rdf['csv_name'], format='%Y%m%d_%H%M')
sdf['datetime'] = pd.to_datetime(rdf['csv_name'], format='%Y%m%d_%H%M')
tdf['datetime'] = pd.to_datetime(tdf['csv_name'], format='%Y%m%d_%H%M')
tdf0['datetime'] = pd.to_datetime(tdf0['csv_name'], format='%Y%m%d_%H%M')
sdf0['datetime'] = pd.to_datetime(tdf0['csv_name'], format='%Y%m%d_%H%M')
# datetimes = pd.to_datetime(rdf['csv_name'], format='%Y%m%d_%H%M', errors='ignore')
print('total number of tuns (after processing the raw data) = ', nruns)
print('total number of runs in this wind sector = {}'.format(nruns_wind_sector))
print('total number of runs passing stationarity checks = {}'.format(nruns_stationary))
print('total number of runs passing water vapor checks = {}'.format(nruns_water_vapor))
print('total number of runs passing turbulence intensity checks = {}'.format(nruns_turb_int))
# plt.figure()
# plt.plot(rdf['wc_wa_M30'], rdf['wc_en_M30'], 'or')
# plt.plot(rdf['wc_wa_M30'], rdf['wc_me_M30'], 'og')
# plt.plot(rdf['wc_wa_M30'], rdf['wc_wa_M30'], 'k')
# plt.plot(rdf['wc_wa_M30'], rdf['wc_ba_M30'], 'oc')
# plt.plot(rdf['wc_wa_M30'], rdf['wc_bar_M30'], 'oy')
# plt.show()
# plt.plot(rdf['wc_wa_M30'], rdf['wc_en_M30'], 'or')
#
# plt.figure()
# plt.scatter(rdf['sr_tewa'].values, rdf['sr_teme'].values, alpha = 0.6, s = 18, color = 'g', marker = '^', label = 'CH4')
# plt.scatter(rdf['sr_tewa'].values, rdf['sr_teba'].values, alpha = 0.6, s = 18, color = 'c', marker = '^', label = 'CH4')
# plt.scatter(rdf['sr_tewa'].values, rdf['sr_teen'].values, alpha = 0.6, s = 18, color = 'r', marker = '^', label = 'CH4')
# plt.scatter(rdf['srco2_tewa'].values, -rdf['srco2_teme'].values, alpha = 0.6, s = 18, color = 'y', marker = '^', label = 'CO2')
# plt.plot(rdf['sr_tewa'].values, rdf['sr_tewa'].values, 'k')
# plt.show()
plt.figure()
plt.xlabel('Wind Direction [degrees]')
plt.ylabel(r'Roughness length $z_0$ [m]')
plt.plot(tdf0['windir'], tdf0['z0'], 'o', label = 'All runs', alpha = 0.6)
plt.plot(tdfnoz0['windir'], tdfnoz0['z0'], 'o', label = 'All wind sector', alpha = 0.6)
plt.plot(tdf['windir'], tdf['z0'], 'o', label = 'filtered', alpha = 0.6)
plt.plot([230, 230], [np.min(tdfnoz0['z0'].values), np.max(tdfnoz0['z0'].values)], '--k')
plt.plot([270, 270], [np.min(tdfnoz0['z0'].values), np.max(tdfnoz0['z0'].values)], '--k')
# plt.plot(tdfnoz0['windir'], tdfnoz0['z0'], 'o', label = 'included')
plt.legend()
# plt.yscale()
plt.savefig( os.path.join(md.outdir_plot, 'SI_z0_wind_direction.png'))
plt.close()
plt.figure()
plt.xlabel('Wind Direction [degrees]')
plt.ylabel(r'Transport efficiency $e_T$')
plt.plot(tdfnoz0['windir'], sdfnoz0['eT', 'H2O'], 'ob', label = r'$H_2O$', alpha = 0.4)
plt.plot(tdfnoz0['windir'], sdfnoz0['eT', 'CH4'], 'og', label = r'$CH_4$', alpha = 0.4)
plt.plot(tdfnoz0['windir'], sdfnoz0['eT', 'CO2'], 'oc', label = r'$CO_2$', alpha = 0.4)
plt.plot(tdf['windir'], sdf['eT', 'H2O'], 'ob', alpha = 0.6)
plt.plot(tdf['windir'], sdf['eT', 'CH4'], 'og', alpha = 0.6)
plt.plot(tdf['windir'], sdf['eT', 'CO2'], 'oc', alpha = 0.6)
plt.plot([230, 230], [np.min(sdfnoz0['eT', 'CO2'].values), np.max(sdfnoz0['eT', 'CH4'].values)], '--k')
plt.plot([270, 270], [np.min(sdfnoz0['eT', 'CO2'].values), np.max(sdfnoz0['eT', 'CH4'].values)], '--k')
# plt.plot(tdfnoz0['windir'], tdfnoz0['z0'], 's', label = 'without z0 filter')
# plt.plot(tdf['windir'], sdf['z0'], 'o', label = 'with z0 filter')
# plt.plot(tdfnoz0['windir'], tdfnoz0['z0'], 'o', label = 'included')
plt.legend()
plt.savefig( os.path.join(md.outdir_plot, 'SI_eT_wind_direction.png'))
plt.close()
plt.figure()
plt.xlabel('Wind Direction [degrees]')
plt.ylabel(r'Scalar Skewness $M_{30}$')
plt.plot(tdfnoz0['windir'].values, sdfnoz0['M30', 'H2O'].values, 'ob', alpha = 0.4)
plt.plot(tdfnoz0['windir'].values, sdfnoz0['M30', 'CH4'].values, 'og', alpha = 0.4)
plt.plot(tdf['windir'].values, np.abs(sdf['M30', 'H2O'].values), 'ob', label = r'$H_2O$', alpha = 0.6)
plt.plot(tdf['windir'].values, np.abs(sdf['M30', 'CH4'].values), 'og', label = r'$CH_4$', alpha = 0.6)
plt.plot([230, 230], [np.min(sdfnoz0['M30', 'H2O'].values), np.max(sdfnoz0['M30', 'CH4'].values)], '--k')
plt.plot([270, 270], [np.min(sdfnoz0['M30', 'H2O'].values), np.max(sdfnoz0['M30', 'CH4'].values)], '--k')
# plt.plot(tdf['windir'].values, sdf['flux_ec', 'H1O'].values, 'o')
plt.legend()
plt.yscale('log')
plt.savefig( os.path.join(md.outdir_plot, 'SI_Skew_wind_direction.png'))
plt.close()
# PRINT TRANSPORT EFFICIENCIES
print('-----------------------------------------------------------------------')
print('Transport Efficiencies eT:')
print('methane: mean = {}, stdv = {}'.format(np.mean(rdf['wc_me_eT']), np.std(rdf['wc_me_eT'])))
print('water : mean = {}, stdv = {}'.format(np.mean(rdf['wc_wa_eT']), np.std(rdf['wc_wa_eT'])))
print('carbon : mean = {}, stdv = {}'.format(np.mean(rdf['wc_cd_eT']), np.std(rdf['wc_cd_eT'])))
print('hotspot : mean = {}, stdv = {}'.format(np.mean(rdf['wc_en_eT']), np.std(rdf['wc_en_eT'])))
print('backgr. : mean = {}, stdv = {}'.format(np.mean(rdf['wc_ba_eT']), np.std(rdf['wc_ba_eT'])))
print('-----------------------------------------------------------------------')
fig, axes = plt.subplots(1, 2, figsize = (12, 6))
axes[0].set_ylim([0, 1])
axes[1].set_ylim([0, 1])
# axes[0].set_xlim([0, 1])
# axes[1].set_xlim([0, 1])
axes[0].set_xlabel(r'Transport efficiency $e_T$ ')
axes[0].set_ylabel(r'Transport efficiency $e_T$ ')
axes[1].set_xlabel(r'$H_2O$ Transport efficiency $e_T$ ')
axes[1].set_ylabel(r'$CH_4$ Transport efficiency $e_T$ ')
# axes[0].plot(sdf['eT', 'H2O'], sdf['eT', 'H2O'], 'k')
# axes[1].plot(sdf['eT', 'H2O'], sdf['eT', 'H2O'], 'k')
# axes[0].plot(rdf['wc_wa_eT'], rdf['wc_wa_eT'], 'k')
# axes[0].plot(rdf['wc_wa_eT'], rdf['wc_me_eT'], 'og', label = r'$CH_4$', alpha = 0.7, markersize = 6)
# axes[0].plot(rdf['wc_wa_eT'], rdf['wc_cd_eT'], 'sb', label = r'$CO_2$', alpha = 0.7, markersize = 6)
#
# axes[1].plot(rdf['wc_wa_eT'], rdf['wc_wa_eT'], 'k')
# axes[1].plot(rdf['wc_wa_eT'], rdf['wc_me_eT'], 'og', label = r'$CH_4$ total', alpha = 0.8, markersize = 6)
# axes[1].plot(rdf['wc_we_eT'], rdf['wc_en_eT'], '^r', label = r'$CH_4$ hotspot', alpha = 0.7, markersize = 6)
# axes[1].plot(rdf['wc_wb_eT'], rdf['wc_ba_eT'], 'sc', label = r'$CH_4$ background', alpha = 0.7, markersize = 6)
mrkrsize = 12
axes[0].plot(rdf['wc_wa_eT'], rdf['wc_wa_eT'], color = 'k')
axes[0].scatter(rdf['wc_wa_eT'], rdf['wc_me_eT'], c = 'green', marker='o', label = r'$CH_4$', alpha = 0.7, s = mrkrsize)
axes[0].scatter(rdf['wc_wa_eT'], rdf['wc_cd_eT'], c = 'blue', marker='s', label = r'$CO_2$', alpha = 0.7, s = mrkrsize)
axes[1].plot(rdf['wc_wa_eT'], rdf['wc_wa_eT'], color = 'k')
axes[1].scatter(rdf['wc_wa_eT'], rdf['wc_me_eT'], c = 'green', marker='o', label = r'$CH_4$ total', alpha = 0.8, s = mrkrsize)
axes[1].scatter(rdf['wc_we_eT'], rdf['wc_en_eT'], c = 'red', marker='^', label = r'$CH_4$ hotspot', alpha = 0.7, s = mrkrsize)
axes[1].scatter(rdf['wc_wb_eT'], rdf['wc_ba_eT'], c = 'orange', marker='s', label = r'$CH_4$ background', alpha = 0.7, s = mrkrsize)
axes[0].legend(loc="lower right")
axes[1].legend(loc="lower left")
axes[0].annotate("a)", xy=(0.05, 0.9), xycoords="axes fraction")
axes[1].annotate("b)", xy=(0.05, 0.9), xycoords="axes fraction")
plt.tight_layout()
plt.savefig( os.path.join(md.outdir_plot, 'scatter_eT.png'))
plt.close()
# plt.figure()
# plt.plot(rdf['wc_me_eT'], rdf['wc_en_eT'], 'or', label = r'$CH_4$ total', alpha = 0.8, markersize = 5)
# plt.plot(rdf['wc_me_eT'], rdf['wc_ba_eT'], 'oc', label = r'$CH_4$ total', alpha = 0.8, markersize = 5)
# # plt.plot(rdf['wc_wa_eT'], rdf['wc_we_eT'], 'or', label = r'$CH_4$ total', alpha = 0.8, markersize = 5)
# # plt.plot(rdf['wc_wa_eT'], rdf['wc_wb_eT'], 'oc', label = r'$CH_4$ total', alpha = 0.8, markersize = 5)
# # plt.plot(rdf['wc_we_eT'], rdf['wc_en_eT'], 'or', label = r'$CH_4$ ebullition', alpha = 0.5, markersize = 5)
# # plt.plot(rdf['wc_wa_eT'], rdf['wc_me_eT'], 'ok', label = r'$CH_4$ total', alpha = 0.8, markersize = 5)
# # plt.plot(rdf['wc_wb_eT'], rdf['wc_ba_eT'], 'oy', label = r'$CH_4$ background', alpha = 0.5, markersize = 5)
# plt.plot(sdf['eT', 'H2O'], sdf['eT', 'H2O'], 'k')
# plt.savefig( os.path.join(md.outdir_plot, 'scatter_eT.png'))
# plt.close()
#
# plt.figure()
# plt.plot(rdf['wc_me_M30)'], rdf['wc_me_M30'], 'k')
# plt.plot(rdf['wc_me_M30)'], rdf['wc_en_M30'], 'or', label = r'$CH_4$ ener', alpha = 0.8, markersize = 5)
# plt.plot(rdf['wc_me_M30)'], rdf['wc_ba_M30'], 'oc', label = r'$CH_4$ back', alpha = 0.8, markersize = 5)
# plt.show()
fig, axes = plt.subplots(1, 2, figsize = (12, 6))
axes[0].set_xlabel(r'Stability parameter $\zeta$')
axes[1].set_xlabel(r'Stability parameter $\zeta$')
axes[0].set_ylabel(r'Transport efficiency $e_T$ ')
axes[0].set_ylim([0, 1])
axes[1].set_ylim([0, 1])
# axes[1].set_xlabel(r'$H_2O$ Transport efficiency $e_T$ ')
axes[1].set_ylabel(r'Transport efficiency $e_T$ ')
# axes[0].plot(sdf['eT', 'H2O'], sdf['eT', 'H2O'], 'k')
# axes[1].plot(sdf['eT', 'H2O'], sdf['eT', 'H2O'], 'k')
axes[0].plot(tdf['stab'], rdf['wc_me_eT'], 'og', label = 'CH4', alpha = 0.6, markersize = 5)
axes[0].plot(tdf['stab'], rdf['wc_wa_eT'], 'ob', label = 'H2O', alpha = 0.6, markersize = 5)
# axes[0].plot(tdf['stab'], rdf['wco2_me_eT'], 'oc', label = 'CO2', alpha = 0.6, markersize = 5)
axes[0].plot(tdf['stab'], rdf['wc_cd_eT'], 'oc', label = 'CO2', alpha = 0.6, markersize = 5)
axes[1].plot(tdf['stab'], rdf['wc_en_eT'], 'or', label = 'CH4 hotspot', alpha = 0.5, markersize = 5)
axes[1].plot(tdf['stab'], rdf['wc_me_eT'], 'ok', label = 'CH4 total', alpha = 0.8, markersize = 5)
axes[1].plot(tdf['stab'], rdf['wc_ba_eT'], 'oy', label = 'CH4 background', alpha = 0.5, markersize = 5)
axes[0].annotate("a)", xy=(0.04, 0.90), xycoords="axes fraction")
axes[1].annotate("b)", xy=(0.04, 0.90), xycoords="axes fraction")
axes[0].legend()
axes[1].legend()
plt.savefig( os.path.join(md.outdir_plot, 'stability_eT.png'))
plt.close()
# check EC vs WAVELET FILETERED FLUXES
# small difference in the pre-proessing fluxes
# bacause I am unsing entire time series there, not up to N = 2**14 points
fig, axes = plt.subplots(2, 1)
fig.suptitle('Effect of wavelet filtering on total fluxes')
# axes[0].plot(rdf['wc_wa_flux_ec'], rdf['sr_fwa_ec'], 's', label = 'H2O EC srfun')
# axes[0].plot(rdf['sr_fwa_ec'], rdf['sr_fwa_ec'], 'k', label = 'H2O EC srfun')
axes[0].plot(sdf['flux_ec', 'H2O'], rdf['sr_fwa_ec'], 's', label = 'H2O EC srfun')
axes[0].plot(sdf['flux_ec', 'H2O'], rdf['wc_wa_flux_ec'], 'o', label = 'H2O EC wcfun') # unfiltered
axes[0].plot(sdf['flux_ec', 'H2O'], rdf['wc_wa_flux_wt'], '.', label = 'H2O WA Filtered wcfun')
axes[0].plot(sdf['flux_ec', 'H2O'], sdf['flux_ec', 'H2O'], 'k')
axes[0].legend()
axes[0].set_ylabel('EC flux')
axes[0].set_xlabel('Wavelet flux')
axes[1].plot(sdf['flux_ec', 'CH4'], rdf['sr_fme_ec'], 's', label = 'CH4 EC srfun')
axes[1].plot(sdf['flux_ec', 'CH4'], rdf['wc_me_flux_ec'], 'o', label = 'CH4 EC wcfun') # unfiltered
axes[1].plot(sdf['flux_ec', 'CH4'], rdf['wc_me_flux_wt'], '.', label = 'CH4 WA Filtered wcfun') # wavelet filtered
axes[1].plot(sdf['flux_ec', 'CH4'], sdf['flux_ec', 'CH4'], 'k')
axes[1].set_ylabel('Wavelet flux')
axes[1].set_xlabel('EC flux')
axes[1].legend()
axes[0].annotate("a)", xy=(0.95, 0.05), xycoords="axes fraction")
axes[1].annotate("b)", xy=(0.95, 0.05), xycoords="axes fraction")
# plt.plot(rdf['sr_me_flux_ec'])
plt.xscale('log')
plt.yscale('log')
plt.savefig( os.path.join(md.outdir_plot, 'SI_filtering_effect_on_fluxes.png'))
plt.close()
########################################################################################################################
########################################################################################################################
############ PLOT PARTITION RESULTS:
# fig, axes = plt.subplots(1, 3, figsize = (15, 5))
# axes[0].plot( rdf['wc_en_frac_flux'].values, rdf['wf_en_frac_flux'].values, 'o', label = 'wf')
# axes[0].plot( rdf['wc_en_frac_flux'].values, rdf['w2_en_frac_flux'].values, 'o', label = 'w2')
# axes[0].plot( rdf['wc_en_frac_flux'].values, rdf['wc_en_frac_flux'].values, '--k')
# axes[0].set_xlabel('energetic flux fraction [wc]')
# axes[0].set_ylabel('energetic flux fraction')
# axes[1].plot( rdf['wc_en_frac_var'].values, rdf['wf_en_frac_var'].values, 'o', label = 'wf')
# axes[1].plot( rdf['wc_en_frac_var'].values, rdf['w2_en_frac_var'].values, 'o', label = 'w2')
# axes[1].plot( rdf['wc_en_frac_var'].values, rdf['wc_en_frac_var'].values, '--k')
# axes[1].set_xlabel('energetic variance fraction [wc]')
# axes[1].set_ylabel('energetic variance fraction')
# axes[2].plot( rdf['wc_en_frac_time'].values, rdf['wf_en_frac_time'].values, 'o', label = 'wf')
# axes[2].plot( rdf['wc_en_frac_time'].values, rdf['w2_en_frac_time'].values, 'o', label = 'w2')
# axes[2].plot( rdf['wc_en_frac_time'].values, rdf['wc_en_frac_time'].values, '--k')
# axes[2].set_xlabel('energetic time fraction [wc]')
# axes[2].set_ylabel('energetic time fraction')
#
# axes[0].annotate("a)", xy=(0.1, 0.9), xycoords="axes fraction")
# axes[1].annotate("b)", xy=(0.1, 0.9), xycoords="axes fraction")
# axes[2].annotate("c)", xy=(0.1, 0.9), xycoords="axes fraction")
# axes[0].legend(loc='lower right')
# axes[1].legend(loc='lower right')
# axes[2].legend(loc='lower right')
# plt.tight_layout()
# plt.savefig( os.path.join(md.outdir_plot, 'SI_fractions_of_ener_flux_time.png'))
# plt.close()
# plt.figure()
# plt.plot( rdf['wc_wa_eT'].values, rdf['wc_me_eT'].values, 'o', label='CH4 wc')
# plt.plot( rdf['wc_wb_eT'].values, rdf['wc_ba_eT'].values, 'o', label='CH4 qr')
# plt.plot( rdf['wc_wa_eT'].values, rdf['wc_wa_eT'].values, '--k')
# plt.xlabel(r'$eT_{H_2O}$')
# plt.ylabel(r'$eT_{CH_4}$')
# plt.legend()
# plt.savefig(os.path.join(md.outdir_plot,
# 'partition_dir_vs_indir_flux_quadrant.png'))
# plt.close()
# plt.figure()
# plt.plot(np.abs(tdf['stab'].values), rdf['wc_wa_eT'].values, 'ob', label='H2O wc')
# plt.plot(np.abs(tdf['stab'].values), rdf['wc_me_eT'].values, 'og', label='CH4 wc')
# # plt.plot(np.abs(tdf['stab'].values), rdf['wc_ba_eT'].values/rdf['wc_wb_eT'].values, 'or', label='CH4 wc')
# # plt.plot(np.abs(tdf['stab'].values), 1*np.ones(np.shape(tdf)[0]), 'k', label='CH4 wc')
# # plt.plot(tdf['Re_star'].values, rdf['wc_me_eT'].values, 'og', label='CH4 wc')
# # plt.plot(tdf['Re_star'].values, rdf['wc_wb_eT'].values, 'o', label='CH4 qr')
# # plt.plot( rdf['wc_wa_eT'].values, tdf['stab'].values, '--k')
# # plt.xlabel(r'$eT_{H_2O}$')
# plt.xlabel(r'$|\zeta|$')
# plt.ylabel(r'$eT_{CH_4}$')
# plt.xscale('log')
# plt.legend()
# plt.savefig(os.path.join(md.outdir_plot,
# 'partition_dir_vs_indir_flux_quadrant.png'))
# plt.close()
#
# plt.figure()
# plt.plot(rdf['wc_wa_flux_ec'].values, rdf['wc_wb_eT'].values, 'ob', label='H2O wc')
# plt.plot(rdf['wc_wa_flux_ec'].values, rdf['wc_ba_eT'].values, 'og', label='CH4 wc')
# # plt.plot(np.abs(tdf['stab'].values), rdf['wc_en_eT'].values, 'or', label='CH4 wc')
# # plt.plot(np.abs(tdf['stab'].values), rdf['wc_we_eT'].values, 'oy', label='CH4 wc')
# # plt.plot(np.abs(tdf['stab'].values), rdf['wc_ba_eT'].values/rdf['wc_wb_eT'].values, 'or', label='CH4 wc')
# # plt.plot(np.abs(tdf['stab'].values), 1*np.ones(np.shape(tdf)[0]), 'k', label='CH4 wc')
# # plt.plot(tdf['Re_star'].values, rdf['wc_me_eT'].values, 'og', label='CH4 wc')
# # plt.plot(tdf['Re_star'].values, rdf['wc_wb_eT'].values, 'o', label='CH4 qr')
# # plt.plot( rdf['wc_wa_eT'].values, tdf['stab'].values, '--k')
# # plt.xlabel(r'$eT_{H_2O}$')
# plt.xlabel(r'$F_{H2O}$')
# plt.ylabel(r'$eT_{CH_4}$')
# # plt.xscale('log')
# plt.legend()
# plt.savefig(os.path.join(md.outdir_plot,
# 'partition_dir_vs_indir_flux_quadrant.png'))
# check they are all above the limit
print(np.min(rdf['wc_wa_eT'].values))
print(np.min(rdf['wc_me_eT'].values))
print(np.min(rdf['wc_ba_eT'].values))
print(np.min(rdf['wc_en_eT'].values))
fig, axes = plt.subplots(2, 3, figsize = (15, 10))
axes[0,0].plot(rdf['wc_wa_eT'].values, rdf['wc_me_eT'].values, 'ob', alpha = 0.6, markersize = 5)
axes[0,0].plot(rdf['wc_wa_eT'].values, rdf['wc_wa_eT'].values, 'k')
axes[0,0].set_ylim([0.4, 1])
axes[0,0].set_xlabel(r'$|\zeta|$')
axes[0,0].set_ylabel(r'$e_T$')
axes[0,0].set_title('Total flux')
axes[0,0].set_xlabel(r'$e_T$ $H_2O$')
axes[0,0].set_ylabel(r'$e_T$ $CH_4$')
axes[0, 0].annotate("a)", xy=(0.1, 0.9), xycoords="axes fraction")
axes[0,1].plot(rdf['wc_wb_eT'].values, rdf['wc_ba_eT'].values, 'ob', alpha = 0.6, markersize = 5)
axes[0,1].plot(rdf['wc_wb_eT'].values, rdf['wc_wb_eT'].values, 'k')
axes[0,1].set_ylim([0.4, 1])
axes[0,1].set_title('Background')
axes[0,1].set_xlabel(r'$e_T$ $H_2O$')
axes[0,1].set_ylabel(r'$e_T$ $CH_4$')
axes[0, 1].annotate("b)", xy=(0.1, 0.9), xycoords="axes fraction")
axes[0, 2].plot(rdf['wc_we_eT'].values, rdf['wc_en_eT'].values, 'ob', alpha = 0.6, markersize = 5)
axes[0, 2].plot(rdf['wc_we_eT'].values, rdf['wc_we_eT'].values, 'k')
axes[0, 2].set_ylim([0.4, 1])
axes[0, 2].set_xlabel(r'$e_T$ $H_2O$')
axes[0, 2].set_ylabel(r'$e_T$ $CH_4$')
axes[0, 2].set_title('Hotspot')
axes[0, 2].annotate("c)", xy=(0.1, 0.9), xycoords="axes fraction")
axes[1, 0].plot(np.abs(tdf['stab'].values), rdf['wc_wa_eT'].values, 'ob', label=r'$H_2O$', alpha = 0.6, markersize = 5)
axes[1, 0].plot(np.abs(tdf['stab'].values), rdf['wc_me_eT'].values, 'sg', label=r'$CH_4$', alpha = 0.6, markersize = 5)
axes[1, 0].set_xscale('log')
axes[1, 0].set_ylim([0.4, 1])
axes[1, 0].set_xlabel(r'$|\zeta|$')
axes[1, 0].set_ylabel(r'$e_T$')
axes[1, 0].annotate("d)", xy=(0.1, 0.9), xycoords="axes fraction")
axes[1, 1].plot(np.abs(tdf['stab'].values), rdf['wc_wb_eT'].values, 'ob', label=r'$H_2O$', alpha = 0.6, markersize = 5)
axes[1, 1].plot(np.abs(tdf['stab'].values), rdf['wc_ba_eT'].values, 'sg', label=r'$CH_4$', alpha = 0.6, markersize = 5)
axes[1, 1].set_xscale('log')
axes[1, 1].set_ylim([0.4, 1])
axes[1, 1].set_xlabel(r'$|\zeta|$')
axes[1, 1].annotate("e)", xy=(0.1, 0.9), xycoords="axes fraction")
axes[1, 2].plot(np.abs(tdf['stab'].values), rdf['wc_we_eT'].values, 'ob', label=r'$H_2O$', alpha = 0.6, markersize = 5)
axes[1, 2].plot(np.abs(tdf['stab'].values), rdf['wc_en_eT'].values, 'sg', label=r'$CH_4$', alpha = 0.6, markersize = 5)
axes[1, 2].set_xscale('log')
axes[1, 2].set_ylim([0.4, 1])
axes[1, 2].set_xlabel(r'$|\zeta|$')
axes[1, 2].annotate("f)", xy=(0.1, 0.9), xycoords="axes fraction")
plt.legend(loc = 'lower right')
plt.tight_layout()
plt.savefig(os.path.join(md.outdir_plot,
'partition_dir_vs_indir_flux_quadrant.png'), dpi = 300)
plt.close()
# fig, axes = plt.subplots(1, 3, figsize = (15, 5))
# axes[0].plot(np.abs(tdf['Re_star'].values), rdf['wc_wa_eT'].values, 'ob', label='H2O wc')
# axes[0].plot(np.abs(tdf['Re_star'].values), rdf['wc_me_eT'].values, 'og', label='CH4 wc')
# axes[0].set_xscale('log')
# axes[0].set_ylim([0.5, 1])
# axes[0].set_xlabel(r'$Re_*$')
# axes[0].set_ylabel(r'$e_T$')
# axes[0].set_title('Total flux')
#
#
# axes[1].plot(np.abs(tdf['Re_star'].values), rdf['wc_wb_eT'].values, 'ob', label='H2O wc')
# axes[1].plot(np.abs(tdf['Re_star'].values), rdf['wc_ba_eT'].values, 'og', label='CH4 wc')
# axes[1].set_xscale('log')
# axes[1].set_ylim([0.5, 1])
# axes[1].set_xlabel(r'$Re_*$')
# axes[1].set_title('Background')
#
# axes[2].plot(np.abs(tdf['Re_star'].values), rdf['wc_we_eT'].values, 'ob', label='H2O wc')
# axes[2].plot(np.abs(tdf['Re_star'].values), rdf['wc_en_eT'].values, 'og', label='CH4 wc')
# axes[2].set_xscale('log')
# axes[2].set_ylim([0.5, 1])
# axes[2].set_xlabel(r'$Re_*$')
# axes[2].set_title('Ebullition')
# plt.legend()
# plt.tight_layout()
# plt.savefig(os.path.join(md.outdir_plot,
# 'partition_dir_vs_indir_flux_quadrant_Restar.png'))
# plt.close()
#
# def plot_moments(partition = 'wc'):
# fig, axes = plt.subplots(ncols, nrows, figsize = (10, 15))
#
# for j in range(ncols):
# for i in range(nrows):
# mymoment = mymoments[ (j-1)*nrows + i ]
# print(mymoment)
# print(mymoment in ['M40', 'M30'])
#
# axes[j,i].scatter(rdf['wa_{}'.format(mymoment)],
# rdf['me_{}'.format(mymoment)], label = 'CH4')
# if mymoment == 'M40':
# # if mymoment in ['M40', 'M30']:
# axes[j, i].scatter(np.abs(rdf['wa_{}'.format(mymoment)]),
# np.abs(rdf['{}_ba_{}'.format(partition, mymoment)]),
# label=partition)
# axes[j,i].set_xscale('log')
# axes[j,i].set_yscale('log')
# axes[j, i].set_title(r'$|{}|$'.format(mymoment))
# else:
# axes[j,i].scatter(rdf['wa_{}'.format(mymoment)],
# rdf['{}_ba_{}'.format(partition, mymoment)],
# label = partition)
# axes[j,i].set_title(r'$ {} $'.format(mymoment))
# axes[j,i].plot(rdf['wa_{}'.format(mymoment)],
# rdf['wa_{}'.format(mymoment)], 'k')
# axes[j,i].set_xlabel('H2O')
# axes[j,i].set_ylabel('CH4')
# axes[0,0].legend()
# plt.tight_layout()
# plt.savefig(os.path.join(md.outdir_plot, 'partition_moments_{}.png'.format(partition)))
# # plt.show()
# plt.close()
def plot_moments_2(partition = 'wc'):
mymoments = ['M30', 'M40', 'M21', 'M12', 'M13', 'Rcw']
letters = np.array([["a)", "b)"],["c)", "d)"], ["e)", "f)"]])
nrows = 2
ncols = 3
fig, axes = plt.subplots(ncols, nrows, figsize = (10, 15))
for j in range(ncols):
for i in range(nrows):
mymoment = mymoments[ (j-1)*nrows + i ]
print(mymoment)
print(mymoment in ['M40', 'M30'])
axes[j,i].plot(rdf['{}_wa_{}'.format(partition, mymoment)],
rdf['{}_me_{}'.format(partition, mymoment)],
# '^b', label = partition, alpha = 0.6, markersize = 5, label = 'CH4')
'^b', label = partition, alpha = 0.6, markersize = 5)
if mymoment in ['M40', 'M30']:
axes[j, i].plot(np.abs(rdf['{}_bar_{}'.format(partition, mymoment)]),
np.abs(rdf['{}_ba_{}'.format(partition, mymoment)]),
'or' , label=partition, alpha = 0.6, markersize = 5)
axes[j,i].set_xscale('log')
axes[j,i].set_yscale('log')
axes[j, i].set_title(r'$|{}|$'.format(mymoment))
else:
axes[j,i].plot(rdf['{}_bar_{}'.format(partition, mymoment)],
rdf['{}_ba_{}'.format(partition, mymoment)],
'or', label = partition, alpha = 0.6, markersize = 5)
axes[j,i].set_title(r'$ {} $'.format(mymoment))
axes[j, i].annotate(letters[j, i], xy=(0.05, 0.91), xycoords="axes fraction")
axes[j,i].plot(rdf['{}_wa_{}'.format(partition, mymoment)],
rdf['{}_wa_{}'.format(partition, mymoment)], 'k')
axes[j,i].set_xlabel('H2O')
axes[j,i].set_ylabel('CH4')
# axes[0,0].legend()
plt.tight_layout()
plt.savefig(os.path.join(md.outdir_plot, 'partition_moments_2_{}.png'.format(partition)), dpi = 300)
# plt.show()
# plt.close()
# plot moments for the three partition methods
plot_moments_2(partition='wc')
# plot_moments_2(partition='wf')
# plot_moments_2(partition='w2')
def plot_moments_3(partition = 'wc'):
mymoments = ['M30', 'M21', 'M12', 'M13']
letters = np.array([["a)", "b)"],["c)", "d)"]])
# mylabels = np.array([['30', '$M_{21}$'],['$M_{12}$', '$M_{13}$']])
nrows = 2
ncols = 2
fig, axes = plt.subplots(ncols, nrows, figsize = (8, 8))
for j in range(ncols):
for i in range(nrows):
mymoment = mymoments[ (j-1)*nrows + i ]
# mylabel = mylabels[ (j-1)*nrows + i ]
print(mymoment)
mylabel = r'M_{{{}}}'.format(mymoment[1:])
print(mylabel)
print(mymoment in ['M40', 'M30'])
axes[j,i].plot(rdf['{}_wa_{}'.format(partition, mymoment)],
rdf['{}_me_{}'.format(partition, mymoment)],
# '^b', label = partition, alpha = 0.6, markersize = 5, label = 'CH4')
'^b', label = partition, alpha = 0.6, markersize = 4)
if mymoment in ['M40', 'M30']:
axes[j, i].plot(np.abs(rdf['{}_bar_{}'.format(partition, mymoment)]),
np.abs(rdf['{}_ba_{}'.format(partition, mymoment)]),
'or' , label=partition, alpha = 0.6, markersize = 4)
axes[j,i].set_xscale('log')
axes[j,i].set_yscale('log')
axes[j, i].set_title(r'$|{}|$'.format(mylabel))
else:
axes[j,i].plot(rdf['{}_bar_{}'.format(partition, mymoment)],
rdf['{}_ba_{}'.format(partition, mymoment)],
'or', label = partition, alpha = 0.6, markersize = 4)
axes[j,i].set_title(r'$ {} $'.format(mylabel))
axes[j, i].annotate(letters[j, i], xy=(0.05, 0.86), xycoords="axes fraction")
axes[j,i].plot(rdf['{}_wa_{}'.format(partition, mymoment)],
rdf['{}_wa_{}'.format(partition, mymoment)], 'k')
axes[j,i].set_xlabel(r'$H_2O$')
axes[j,i].set_ylabel(r'$CH_4$')
# axes[0,0].legend()
plt.tight_layout()
plt.savefig(os.path.join(md.outdir_plot, 'partition_moments_3_{}.png'.format(partition)), dpi = 300)
# plt.show()
# plt.close()
plot_moments_3(partition='wc')
# plot_moments_3(partition='wd')
plot_densities = False
if plot_densities:
pdf = pd.read_pickle( os.path.join(md.outdir_data, 'results_pdf.csv'))
plt.figure()
mindensity = 1e-5
ndecades1 = 3
ndecades2 = 6
for i in range(nruns):
print(i)
x_me = pdf['x_me'].iloc[i]
x_wa = pdf['x_wa'].iloc[i]
x_en = pdf['x_en'].iloc[i]
x_ba = pdf['x_ba'].iloc[i]
p_me = pdf['p_me'].iloc[i]
p_wa = pdf['p_wa'].iloc[i]
p_en = pdf['p_en'].iloc[i]
p_ba = pdf['p_ba'].iloc[i]
x_me[p_me < mindensity] = np.nan
x_wa[p_wa < mindensity] = np.nan
x_en[p_en < mindensity] = np.nan
x_ba[p_ba < mindensity] = np.nan
p_me[p_me < mindensity] = np.nan
p_wa[p_wa < mindensity] = np.nan
p_en[p_en < mindensity] = np.nan
p_ba[p_ba < mindensity] = np.nan
if i == 0:
plt.plot(x_en, 10**( ndecades2 + np.log10(p_en)), '.r',
label='CH4 ener')
plt.plot(x_me, 10**( ndecades1 + np.log10(p_me)), '.g',
label='CH4 ')
plt.plot(x_wa, p_wa, '.b',
label='H2O')
plt.plot(x_ba, 10**( -ndecades1 + np.log10(p_ba)), '.c',
label='CH4 back')
else:
plt.plot(x_en, 10 ** (ndecades2 + np.log10(p_en)), '.r')
plt.plot(x_me, 10 ** (ndecades1 + np.log10(p_me)), '.g')
plt.plot(x_wa, p_wa, '.b')
plt.plot(x_ba, 10 ** (-ndecades1 + np.log10(p_ba)), '.c')
plt.plot([0,0], [1e-8, 1e8], '--k')
# plt.plot(XXXME[i,:], PDFME[i,:], '-og')
plt.yscale('log')
plt.legend()
# plt.show()
plt.savefig(os.path.join(md.outdir_plot, 'partition_pdfs.png'))
plt.close()
# qc_min = np.min(rdf['qc_stdv_c2'].values)
# wc_min = np.min(rdf['wc_stdv_c'].values)
# # qf_min = np.min(rdf['qf_stdv_cw'].values)
# wf_min = np.min(rdf['wf_stdv_cw'].values)
# w2_min = np.min(rdf['w2_stdv_c2'].values)
# # print(qc_min)
# print(wc_min)
# # print(qf_min)
# print(wf_min)
# print(w2_min)
#
# list(rdf.keys())
# plt.figure(figsize = (10, 10))
# plt.plot( rdf['qr_R_me_wa'].values, rdf['wc_stdv_c'].values, 'ob', label = 'c [wavelet scalar]')
# plt.plot( rdf['qr_R_me_wa'].values, rdf['wf_stdv_cw'].values, 'or', label = 'f [wavelet flux]')
# plt.plot( rdf['qr_R_me_wa'].values, rdf['w2_stdv_c2'].values, 'oc', label = 'c2 [wavelet energy]')
# lim_wc = 0.2
# lim_wf = 0.8
# lim_w2 = 3.0
# plt.plot( rdf['qr_R_me_wa'].values, lim_wc*np.ones(nruns), '--k')
# plt.plot( rdf['qr_R_me_wa'].values, lim_wf*np.ones(nruns), '--k')
# plt.plot( rdf['qr_R_me_wa'].values, lim_w2*np.ones(nruns), '--k')
# plt.xlabel(r'correlation $R_{cr}$ between scalars $CH_4$ and $H_2O$')
# plt.ylabel(r'Standard deviation of wavelet coeffient differences')
# plt.text(0.6, lim_wc * 1.05, r'{}'.format(lim_wc), fontsize = 15)
# plt.text(0.6, lim_wf * 1.05, r'{}'.format(lim_wf), fontsize = 15)
# plt.text(0.6, lim_w2 * 1.05, r'{}'.format(lim_w2), fontsize = 15)
# corrval = 0.98
# plt.plot([corrval, corrval], [0, 35], '--k')
# plt.text(0.93, 40, 'correlation \n {}'.format(corrval), fontsize=15)
# plt.xscale('log')
# plt.yscale('log')
# plt.legend(title='Partition scheme')
# plt.savefig(os.path.join(md.outdir_plot, 'partition_stdvs_98.png'))
# plt.close()
fig, axes = plt.subplots(1, 2, figsize = (13, 6))
axes[0].plot( rdf['wc_Rcr'].values, rdf['wc_stdv_c'].values, 'o',
label = 'c [wavelet scalar]', alpha = 0.7, markersize = 8)
lim_wc = 0.3
lim_wf = 0.8
lim_w2 = 3.0
# axes[0].plot( rdf['qr_R_me_wa'].values, lim_wc*np.ones(nruns), '--k')
# axes[0].plot( rdf['qr_R_me_wa'].values, rdf['wc_my_stdv']*np.ones(nruns), '--k')
# xmin, xmax, ymin, ymax = plt.axis()
axes[0].set_xlabel(r'correlation $R_{cr}$ between $CH_4$ and $H_2O$')
axes[0].set_ylabel(r'Standard deviation of $| \Delta WT^{(m)}[i] |$')
# axes[0].set_ylabel(r'Standard deviation of $\lvert \Delta WT^{(m)}[i]\rvert$')
axes[0].set_ylim([0, 0.2 + np.max(rdf['wc_stdv_c'].values)])
xmin, xmax = axes[0].get_xlim()
# axes[1].plot( x, rdf['wc_my_stdv']*np.ones(nruns), '--k')
axes[0].plot( [xmin, xmax], [rdf['wc_my_stdv'], rdf['wc_my_stdv']], '--k')
axes[0].set_xlim([xmin , xmax])
axes[0].annotate("a)", xy=(0.90, 0.90), xycoords="axes fraction")
y = rdf['wc_stdv_c'].values
# x = rdf['me_M30'].values-rdf['wa_M30'].values
x = (rdf['wc_me_M30'].values-rdf['wc_wa_M30'].values)
z = (rdf['wc_ba_M30'].values-rdf['wc_bar_M30'].values)
# z = np.polyfit(x, y, 1)
# xv = np.linspace(np.min(x), np.max(x), 100)
# yv = z[0]*xv + z[1]
axes[1].plot(x, y, 'o', label = 'c [wavelet scalar]', alpha = 0.7, markersize = 8)
# axes[1].plot(z, y, 'or', label = 'c [wavelet scalar]', alpha = 0.6, markersize = 8)
# axes[1].plot(-x, y, 'or', label = 'c [wavelet scalar]', alpha = 0.8, markersize = 8)
# axes[1].plot( x, lim_wc*np.ones(nruns), '--k')
xmin, xmax = axes[1].get_xlim()
axes[1].plot( [xmin, xmax + 1], [rdf['wc_my_stdv'], rdf['wc_my_stdv']], '--k')
axes[1].set_xlim([xmin , xmax + 1])
# plt.plot(xv, yv, '--k')
# plt.plot( [0., 0.], [ymin, ymax], '--k')
axes[1].plot( [0., 0.], [0, np.max(rdf['wc_stdv_c'].values)+0.2], '--k')
axes[1].set_ylim([0, np.max(rdf['wc_stdv_c'].values)+0.2])
axes[1].set_xlabel(r'$M_{30, CH_4} - M_{30, H_2O}$')
axes[1].set_xscale('symlog')
# axes[1].set_ylabel(r'Standard deviation of wavelet coefficient differences')
axes[1].set_ylabel(r'Standard deviation of $ | \Delta WT^{(m)}[i] | $')
axes[1].annotate("b)", xy=(0.05, 0.90), xycoords="axes fraction")
plt.savefig(os.path.join(md.outdir_plot, 'partition_stdvs.png'), dpi = 300)
plt.close()
# fig, axes = plt.subplots(1, 2, figsize=(16, 8))
# # plt.plot(rdf['ts_b'], rdf['ts_s'], 'o')
# # plt.plot(rdf['ts_b'], rdf['sr_fme_ec']*rdf['wc_en_frac_flux'], 'o')
# # plt.plot(rdf['ts_b'], rdf['wc_en_frac_flux'], 'o')
# axes[0].plot(1/rdf['ts_l'], rdf['wc_en_frac_flux'], 'o', alpha = 0.8, markersize = 8)
# # plt.plot(rdf['ts_b'], 1/20*rdf['ts_b'], 'k')
# # plt.xscale('log')
# # plt.yscale('log')
# axes[0].set_ylabel('Fraction of Ebullition Flux')
# axes[0].set_xlabel('Mean time between ebullition events [s]')
# axes[0].annotate("a)", xy=(0.9, 0.90), xycoords="axes fraction")
#
# axes[1].plot(rdf['wc_en_frac_time'], rdf['wc_en_frac_flux'], 'o', alpha = 0.8, markersize = 8)
# axes[1].set_ylabel('Fraction of Ebullition Flux')
# axes[1].set_xlabel('Fraction of active area')
# axes[1].annotate("b)", xy=(0.05, 0.90), xycoords="axes fraction")
#
# plt.savefig(os.path.join(md.outdir_plot, 'ebull_time_scale.png'), dpi = 300)
# plt.close()
# def phim(stabv):
# ''' stability correction function for momentum
# From Katul et al, PRL, 2011'''
# # if stab > 1:
# # print('turb_quant WARNING:: very stable - using constant phim')
# # return 5.47 # check
# n = np.size(stabv)
# myphim = np.zeros(n)
# for i in range(n):
# if stabv[i] > 0: # stable
# myphim[i] = 1 + 4.7*stabv[i]
# else:
# myphim[i] = (1-15*stabv[i])**(-1/4)
# return myphim
# shear_ts = rdf['sr_ustar']/0.4/md.z_ref*phim(rdf['sr_stab'].values)
# ener_ts = rdf['sr_ustar']/0.4/md.z_ref*phim(rdf['sr_stab'].values)
# fig, axes = plt.subplots(1, 2, figsize=(16, 8))
# xxd, yyd = mf.mylogslope(rdf['ts_shear_ts'], 1/rdf['ts_l'].values, slope=-2)
# axes[0].plot(rdf['ts_shear_ts'], 1/rdf['ts_l'], 'o', label = 'runs', alpha = 0.8, markersize = 8)
# # axes[0].plot(xxd, yyd, 'k', label = r'slope $-1$')
# axes[0].set_xlabel(r'Shear time scale [$s$]')
# axes[0].set_ylabel(r'Frequency of ebullition events [$s^{-1}$]')
#
# axes[1].plot(rdf['ts_diss_ts'], 1/rdf['ts_l'], 'o', label = 'runs', alpha = 0.8, markersize = 8)
# axes[1].set_xlabel(r'Dissipation time scale [$s$]')
# axes[1].set_ylabel(r'Frequency of ebullition events [$s^{-1}$]')
# # axes[1].set_xscale('log')
# # axes[1].set_yscale('log')
# # axes[1].legend()
# plt.savefig(os.path.join(md.outdir_plot, 'shear_ts_vs_ebull_time_scale.png'))
# plt.close()
# fig, axes = plt.subplots(1, 2, figsize=(16, 8))
# # xxd, yyd = mf.mylogslope(rdf['ts_shear_ts'], rdf['ts_l'].values, slope=1)
# axes[0].plot(rdf['ts_shear_ts'], rdf['ts_l'], 'o', label = 'runs', alpha = 0.8, markersize = 8)
# # axes[0].plot(xxd, yyd, 'k', label = r'slope $1$')
# axes[0].set_xlabel(r'Shear time scale [$s$]')
# # axes[0].set_ylabel(r'Frequency of ebullition events [$s^{-1}$]')
# axes[0].set_ylabel(r'Frequency of ebullition events [$s^{-1}$]')
# axes[0].annotate("a)", xy=(0.05, 0.90), xycoords="axes fraction")
# # axes[0].set_xscale('log')
# # axes[0].set_yscale('log')
#
# # xxd, yyd = mf.mylogslope(rdf['ts_diss_ts'], rdf['ts_l'].values, slope=-1)
# axes[1].plot(rdf['ts_diss_ts'], rdf['ts_l'], 'o', label = 'runs', alpha = 0.8, markersize = 8)
# # axes[1].plot(xxd, yyd, 'k', label = r'slope $1$')
# axes[1].set_xlabel(r'Dissipation time scale [$s$]')
# axes[1].set_ylabel(r'Frequency of ebullition events [$s^{-1}$]')
# axes[1].annotate("b)", xy=(0.05, 0.90), xycoords="axes fraction")
# # axes[1].set_xscale('log')
# # axes[1].set_yscale('log')
# # axes[1].legend()
#
# # axes[2].plot(rdf['ts_Tw'], rdf['ts_l'], 'o', label = 'runs', alpha = 0.8, markersize = 8)
# # axes[2].set_xlabel(r'Dissipation time scale [$s$]')
# # axes[2].set_ylabel(r'$T_w$ [$s$]')
# # axes[1].set_xscale('log')
# # axes[1].set_yscale('log')
# # axes[1].legend()
# plt.savefig(os.path.join(md.outdir_plot, 'shear_ts_vs_ebull_time_scale.png'), dpi = 300)
# plt.close()
# plt.figure(figsize=(8, 8))
# xxd, yyd = mf.mylogslope(rdf['ts_diss_ts'], rdf['sr_alpha'].values, slope=-1)
# plt.plot(rdf['ts_diss_ts'], rdf['sr_alpha'], 'o', label = 'runs')
# plt.plot(xxd, yyd, 'k', label = r'slope $-1$')
# plt.xscale('log')
# plt.yscale('log')
# plt.xlabel('Shear time scale [s]')
# plt.ylabel('Mean time between ebullition events [s]')
# plt.legend()
# plt.show()
# plt.savefig(os.path.join(md.outdir_plot, 'shear_ts_vs_ebull_time_scale.png'))
# plt.close()
# plt.figure()
# plt.plot(1/rdf['ts_l'], rdf['sr_fme_ec']*rdf['wc_en_frac_flux'], 'o')
# # plt.plot()
# plt.xscale('log')
# plt.yscale('log')
# plt.show()
# plt.figure()
# plt.plot(rdf['sr_fme_ec'], rdf['sr_ustar'], 'o')
# plt.show()
# plt.figure()
# # plt.plot(rdf['ts_l'], rdf['wc_en_frac_time'], 'o')
# plt.plot(1/rdf['ts_l'], rdf['wc_en_frac_time'], 'o')
# # plt.plot()
# # plt.xscale('log')
# # plt.yscale('log')
# plt.show()
# plot
# myvars = []
fig, axes = plt.subplots(1, 2, figsize=(10,6))
print(np.max(rdf['sr_fme_sr']))
axes[0].plot(rdf['sr_fme_ec'], rdf['sr_fme_sr'], 'ok', alpha = 0.3, markersize = 6)
axes[0].plot(rdf['sr_fme_ec']*rdf['wc_en_frac_flux'], rdf['sr_fen_sr'], 'or', alpha = 0.6, markersize = 6)
axes[0].plot(rdf['sr_fen_sr'], rdf['sr_fen_sr'], 'k')
axes[0].set_xscale('log')
axes[0].set_yscale('log')
axes[0].set_title(r'$CH_4$ hotspot')
axes[0].annotate("a)", xy=(0.1, 0.9), xycoords="axes fraction")
axes[0].set_xlabel(r'EC flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[0].set_ylabel(r'SR flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[0].set_ylim([8E-5*40, 12E-2*50])
axes[0].set_xlim([8E-5*40, 12E-2*50])
axes[0].plot([1E-4*40, 8E-2*50], [1E-4*40, 8E-2*50], 'k')
axes[1].plot(rdf['sr_fme_ec'], rdf['sr_fme_sr'], 'ok', alpha = 0.3, markersize = 6)
axes[1].plot(rdf['sr_fme_ec']*rdf['wc_ba_frac_flux'], rdf['sr_fba_sr'], 'oc', alpha = 0.6, markersize = 6)
axes[1].plot(rdf['sr_fba_sr'], rdf['sr_fba_sr'], 'k')
# axes[1].plot(rdf['sr_fen'], rdf['sr_fen'], 'k')
axes[1].annotate("b)", xy=(0.1, 0.9), xycoords="axes fraction")
axes[1].set_xscale('log')
axes[1].set_yscale('log')
axes[1].set_title(r'$CH_4$ background')
axes[1].set_ylim([8E-5*40, 12E-2*50])
axes[1].set_xlim([8E-5*40, 12E-2*50])
axes[1].plot([1E-4*40, 8E-2*50], [1E-4*40, 8E-2*50], 'k')
# axes[1].set_ylim([8E-5, 12E-2])
# axes[1].set_xlim([8E-5, 12E-2])
axes[1].set_xlabel(r'EC flux [$\mu mol\, m^{-2}\, s^{-1}$]')
# axes[1].axes.get_yaxis().set_visible(False)
# axes[1].set_ylabel('SR flux')
# axes[1].plot([1E-4, 8E-2], [1E-4, 8E-2], 'k')
plt.tight_layout()
plt.savefig(os.path.join(md.outdir_plot, 'sr_fluxes.png'), dpi = 300)
plt.close()
fig, axes = plt.subplots(2, 2, figsize=(10,10))
axes[0,0].plot(rdf['sr_fme_ec'], rdf['sr_fme_sr'], 'og', label = 'SR')
axes[0,0].plot(rdf['sr_fme_ec'], rdf['sr_fen_sr'] + rdf['sr_fba_sr'] , '.k', label = 'ISR')
# axes[0,0].plot(rdf['sr_fme_ec'], rdf['sr_fme_ec'], 'k')
axes[0,0].set_xscale('log')
axes[0,0].set_yscale('log')
# axes[0,0].set_xlabel('Eddy covariance flux')
# axes[0,0].set_ylabel('Surface renewal flux')
axes[0, 0].set_xlabel(r'EC flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[0, 0].set_ylabel(r'SR flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[0,0].legend(loc='lower right')
axes[0,0].set_title(r'$CH_4$')
axes[0, 0].set_ylim([1E-4*40, 12E-2*50])
axes[0, 0].set_xlim([1E-4*40, 12E-2*50])
axes[0, 0].plot( [1E-4*40, 8E-2*50], [1E-4*40, 8E-2*50], 'k')
axes[0,1].plot(rdf['sr_fwa_ec'], rdf['sr_fwa_sr'], 'ob')
axes[0,1].plot(rdf['sr_fwa_ec'], rdf['sr_fwa_ec'], 'k')
axes[0,1].set_xscale('log')
axes[0,1].set_yscale('log')
axes[0,1].set_title(r'$H_2O$')
axes[1,0].plot(rdf['sr_fme_ec']*rdf['wc_en_frac_flux'], rdf['sr_fen_sr'], 'or')
# axes[0,1].set_xlabel('Eddy covariance flux')
# axes[0,1].set_ylabel('Surface renewal flux')
axes[0, 1].set_xlabel(r'EC flux [$m mol\, m^{-2}\, s^{-1}$]')
axes[0, 1].set_ylabel(r'SR flux [$m mol\, m^{-2}\, s^{-1}$]')
# axes[1,0].plot(rdf['sr_fen'], rdf['sr_fen'], 'k')
axes[1,0].set_xscale('log')
axes[1,0].set_yscale('log')
axes[1,0].set_title(r'$CH_4$ hotspot')
# axes[1,0].set_ylim([1E-4, 12E-2])
# axes[1,0].set_xlim([1E-4, 12E-2])
# axes[1, 0].plot([1E-4, 8E-2], [1E-4, 8E-2], 'k')
axes[1, 0].set_ylim([1E-4*40, 12E-2*50])
axes[1, 0].set_xlim([1E-4*40, 12E-2*50])
axes[1, 0].plot( [1E-4*40, 8E-2*50], [1E-4*40, 8E-2*50], 'k')
axes[1, 0].set_xlabel(r'EC flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[1, 0].set_ylabel(r'SR flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[1,1].plot(rdf['sr_fme_ec']*rdf['wc_ba_frac_flux'], rdf['sr_fba_sr'], 'oc')
# axes[1,1].plot(rdf['sr_fba'], rdf['sr_fba'], 'k')
axes[1,1].set_xscale('log')
axes[1,1].set_yscale('log')
axes[1,1].set_title(r'$CH_4$ background')
# axes[1,1].set_ylim([1E-4, 12E-2])
# axes[1,1].set_xlim([1E-4, 12E-2])
# axes[1, 1].plot([1E-4, 8E-2], [1E-4, 8E-2], 'k')
axes[1, 1].set_ylim([1E-4*40, 12E-2*50])
axes[1, 1].set_xlim([1E-4*40, 12E-2*50])
axes[1, 1].plot( [1E-4*40, 8E-2*50], [1E-4*40, 8E-2*50], 'k')
# axes[1,1].set_xlabel('Eddy covariance flux')
# axes[1,1].set_ylabel('Surface renewal flux')
axes[1, 1].set_xlabel(r'EC flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[1, 1].set_ylabel(r'SR flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[0, 0].annotate("a)", xy=(0.1, 0.9), xycoords="axes fraction")
axes[0, 1].annotate("b)", xy=(0.1, 0.9), xycoords="axes fraction")
axes[1, 0].annotate("c)", xy=(0.1, 0.9), xycoords="axes fraction")
axes[1, 1].annotate("d)", xy=(0.1, 0.9), xycoords="axes fraction")
plt.tight_layout()
plt.savefig(os.path.join(md.outdir_plot, 'SI_sr_fluxes_4.png'))
plt.close()
fig, axes = plt.subplots(2, 2, figsize=(10,10))
xx, yy = mf.mylogslope(rdf['sr_Re_star'],rdf['sr_da_me'], slope = -1/4 )
axes[0,0].plot(rdf['sr_Re_star'], rdf['sr_da_me'], 'og')
axes[0,0].plot(xx, yy, 'k')
axes[0,0].set_xscale('log')
axes[0,0].set_yscale('log')
axes[0,0].set_xlabel(r'$Re_{*}$')
axes[0,0].set_ylabel(r'$Da$')
axes[0,0].set_title(r'$CH_4$')
xx, yy = mf.mylogslope(rdf['sr_Re_star'],rdf['sr_da_wa'], slope = -1/4 )
axes[0,1].plot(rdf['sr_Re_star'], rdf['sr_da_wa'], 'ob')
axes[0,1].plot(xx, yy, 'k')
axes[0,1].set_xscale('log')
axes[0,1].set_yscale('log')
axes[0,1].set_xlabel('$Re_{*}$')
axes[0,1].set_ylabel(r'$Da$')
axes[0,1].set_title(r'$H_2O$')
xx, yy = mf.mylogslope(rdf['sr_Re_star'],rdf['sr_da_en'], slope = -1/4 )
axes[1,0].plot(rdf['sr_Re_star'], rdf['sr_da_en'], 'or')
axes[1,0].plot(xx, yy, 'k')
axes[1,0].set_xscale('log')
axes[1,0].set_yscale('log')
axes[1,0].set_xlabel('$Re_{*}$')
axes[1,0].set_ylabel(r'$Da$')
axes[1,0].set_title(r'$CH_4$ ebullition')
xx, yy = mf.mylogslope(rdf['sr_Re_star'],rdf['sr_da_ba'], slope = -1/4 )
axes[1,1].plot(rdf['sr_Re_star'], rdf['sr_da_ba'], 'oc')
axes[1,1].plot(xx, yy, 'k')
axes[1,1].set_xscale('log')
axes[1,1].set_yscale('log')
axes[1,1].set_xlabel('$Re_{*}$')
axes[1,1].set_ylabel(r'$Da$')
axes[1,1].set_title(r'$CH_4$ background')
axes[0, 0].annotate("a)", xy=(0.9, 0.1), xycoords="axes fraction")
axes[0, 1].annotate("b)", xy=(0.9, 0.1), xycoords="axes fraction")
axes[1, 0].annotate("c)", xy=(0.9, 0.1), xycoords="axes fraction")
axes[1, 1].annotate("d)", xy=(0.9, 0.1), xycoords="axes fraction")
plt.tight_layout()
plt.savefig(os.path.join(md.outdir_plot, 'SI_sr_DaRe.png'))
plt.close()
# fig, axes = plt.subplots(2, 2, figsize=(10,10))
# xx, yy = mf.mylogslope(rdf['sr_Re_stab'],rdf['sr_da_me'], slope = -1/4 )
# axes[0,0].plot(rdf['sr_Re_stab'], rdf['sr_da_me'], 'og')
# axes[0,0].plot(xx, yy, 'k')
# axes[0,0].set_xscale('log')
# axes[0,0].set_yscale('log')
# axes[0,0].set_xlabel(r'$Re_{*}$')
# axes[0,0].set_ylabel('Da')
# axes[0,0].set_title(r'$CH4$')
#
# xx, yy = mf.mylogslope(rdf['sr_Re_stab'],rdf['sr_da_wa'], slope = -1/4 )
# axes[0,1].plot(rdf['sr_Re_stab'], rdf['sr_da_wa'], 'ob')
# axes[0,1].plot(xx, yy, 'k')
# axes[0,1].set_xscale('log')
# axes[0,1].set_yscale('log')
# axes[0,1].set_xlabel('$Re_{*}$')
# axes[0,1].set_ylabel('Da')
# axes[0,1].set_title(r'$H2O$')
#
# xx, yy = mf.mylogslope(rdf['sr_Re_stab'],rdf['sr_da_en'], slope = -1/4 )
# axes[1,0].plot(rdf['sr_Re_stab'], rdf['sr_da_en'], 'or')
# axes[1,0].plot(xx, yy, 'k')
# axes[1,0].set_xscale('log')
# axes[1,0].set_yscale('log')
# axes[1,0].set_xlabel('$Re_{*}$')
# axes[1,0].set_ylabel('Da')
# axes[1,0].set_title(r'$CH4$ Ebullition')
#
# xx, yy = mf.mylogslope(rdf['sr_Re_stab'],rdf['sr_da_ba'], slope = -1/4 )
# axes[1,1].plot(rdf['sr_Re_stab'], rdf['sr_da_ba'], 'oc')
# axes[1,1].plot(xx, yy, 'k')
# axes[1,1].set_xscale('log')
# axes[1,1].set_yscale('log')
# axes[1,1].set_xlabel('$Re_{*}$')
# axes[1,1].set_ylabel('Da')
# axes[1,1].set_title(r'$CH4$ Background')
# plt.tight_layout()
# plt.savefig(os.path.join(md.outdir_plot, 'sr_DaReStab.png'), dpi = 300)
# plt.close()
# write as a function of actual date
# plt.figure()
# # plt.plot(df['wc_en_frac_flux'], rdf['wc_me_flux_ec'], 'o')
# plt.title('Energetic fraction with different methods')
# plt.plot(rdf['wc_me_flux_ec'], 'g', label = 'total CH4')
# plt.plot(rdf['wc_en_frac_flux']*rdf['wc_me_flux_ec'], 'r', label = 'ener frac wc')
# plt.plot(rdf['w2_en_frac_flux']*rdf['w2_me_flux_ec'], 'orange', label = 'ener frac w2')
# plt.plot(rdf['wf_en_frac_flux']*rdf['wf_me_flux_ec'], 'blue', label = 'ener frac wf')
# plt.legend()
# plt.savefig(os.path.join(md.outdir_plot, 'ener_flux_fractions_allmethods.png'))
# plt.close()
# pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
# rdf['datetime'] = pd.to_datetime(rdf['csv_name'], format='%Y%m%d_%H%M')
# datetimes = pd.to_datetime(rdf['csv_name'], format='%Y%m%d_%H%M', errors='ignore')
datetimes = pd.to_datetime(rdf['csv_name'], format='%Y%m%d_%H%M')
# plt.plot(rdf['datetime'])
fig, axes = plt.subplots(2, 1, figsize = (12, 8))
axes[0].plot(datetimes, rdf['wc_me_flux_ec'], 'sk', alpha = 0.6, markersize = 6, label = 'Total flux')
axes[0].plot(datetimes, rdf['wc_en_frac_flux']*rdf['wc_me_flux_ec'], 'or', alpha = 0.6, markersize = 6, label = 'hotspot flux')
axes[0].plot(datetimes, rdf['wc_ba_frac_flux']*rdf['wc_me_flux_ec'], '^c', alpha = 0.6, markersize = 6, label = 'background flux')
axes[0].set_yscale('log')
axes[0].legend(ncol = 3, bbox_to_anchor=(1., 1.2))
axes[0].set_ylabel(r'$CH_4$ Flux [$\mu mol\, m^{-2}\, s^{-1}$]', fontsize = 16)
axes[0].annotate("a)", xy=(0.05, 0.90), xycoords="axes fraction")
# axes[0].axes.get_xaxis().set_visible(False)
axes[1].plot(datetimes, rdf['wc_en_frac_time'], '^r', alpha = 0.6, markersize = 6, label = 'Area')
axes[1].plot(datetimes, rdf['wc_en_frac_var'], 'sg', alpha = 0.6, markersize = 6, label = 'Variance')
axes[1].plot(datetimes, rdf['wc_en_frac_flux'], 'ok', alpha = 0.6, markersize = 6, label = 'Flux')
axes[1].set_ylabel('Hotspot fraction', fontsize = 16)
axes[1].annotate("b)", xy=(0.05, 0.90), xycoords="axes fraction")
# axes[1].get_xticks(rotation=70)
# axes[1].set_xticklabels(datetimes.dt.date, rotation = 0, fontsize = 16)
axes[1].legend()
# plt.xlabel('Date')
# axes[1].legend(ncol = 3)
axes[1].legend(ncol = 3, bbox_to_anchor=(1., 1.2))
plt.tight_layout()
plt.savefig(os.path.join(md.outdir_plot, 'ener_flux_frac_fractions.png'), dpi = 300)
plt.close()
# fig, axes = plt.subplots(ncols = 2, nrows = 1 ,figsize=(12, 6))
fig, axes = plt.subplots(ncols = 2, nrows = 2 ,figsize=(12, 12))
xdata = rdf['sr_ustar']
xv, yv = mf.mylogslope(xdata, rdf['sr_teba'], slope = 0 )
axes[0, 0].scatter(xdata, rdf['sr_teme'].values, alpha = 0.6, s = 18, color = 'g', marker = '^', label = r'$CH_4$')
axes[0, 0].scatter(xdata, rdf['sr_teen'].values, alpha = 0.6, s = 18, color = 'r', marker = 'o', label = r'$CH_4$ H')
axes[0, 0].scatter(xdata, rdf['sr_teba'].values, alpha = 0.6, s = 18, color = 'c', marker = 's', label = r'$CH_4$ B')
axes[0, 0].scatter(xdata, rdf['sr_tewa'].values, alpha = 0.6, s = 20, color = 'b', marker = 's', label = r'$H_2O$')
axes[0, 0].annotate("a)", xy=(0.03, 0.90), xycoords="axes fraction")
axes[0, 0].set_xscale('log')
axes[0, 0].set_yscale('log')
axes[0, 0].set_ylabel('Transport efficiency')
axes[0, 0].set_xlabel(r'$u_* [m/s]$')
axes[0, 0].legend()
axes[0, 0].set_xticks([0.2, 0.3, 0.4, 0.6, 0.8], minor = True)
axes[0, 0].set_xticklabels([0.2, 0.3, 0.4, 0.6, 0.8], minor = True)
axes[0, 0].set_yticks([0.1, 0.2, 0.3, 0.4, 0.6])
axes[0, 0].set_yticklabels([0.1, 0.2, 0.3, 0.4, 0.6])
xv, yv = mf.mylogslope(xdata, rdf['sr_gtv_ba'], slope = 3/4 )
axes[0, 1].scatter(xdata, rdf['sr_gtv_wa'].values, alpha = 0.6, s = 20, color = 'b', marker = 's', label = r'$H_2O$')
axes[0, 1].scatter(xdata, rdf['sr_gtv_me'].values, alpha = 0.6, s = 18, color = 'g', marker = '^', label = r'$CH_4$')
axes[0, 1].scatter(xdata, rdf['sr_gtv_ba'].values, alpha = 0.6, s = 18, color = 'c', marker = 's', label = r'$CH_4$ B')
axes[0, 1].scatter(xdata, rdf['sr_gtv_en'].values, alpha = 0.6, s = 18, color = 'r', marker = 'o', label = r'$CH_4$ H')
axes[0, 1].annotate("b)", xy=(0.04, 0.90), xycoords="axes fraction")
axes[0, 1].plot(xv, yv, 'k', linewidth = 2)
axes[0, 1].set_xscale('log')
axes[0, 1].set_yscale('log')
axes[0, 1].set_ylabel('Gas transfer velocity [m/s]')
axes[0, 1].set_xlabel(r'$u_* [m/s]$')
axes[0, 1].set_xticks([0.2, 0.3, 0.4, 0.6, 0.8], minor = True)
axes[0, 1].set_xticklabels([0.2, 0.3, 0.4, 0.6, 0.8], minor = True)
axes[0, 1].set_yticks([0.1, 0.2, 0.3, 0.4, 0.6])
axes[0, 1].set_yticklabels([0.1, 0.2, 0.3, 0.4, 0.6])
# plt.tight_layout()
# plt.savefig(os.path.join(md.outdir_plot, 'teff_and_gtv.png'), dpi=300)
# plt.close()
# fig, axes = plt.subplots(ncols = 2, nrows = 1 ,figsize=(12, 6))
xdata = rdf['sr_ustar']
# xv, yv = mf.mylogslope(xdata, rdf['sr_teba_sr'], slope = -1/4 )
# axes[0].scatter(xdata, rdf['sr_teme_isr'].values, alpha = 0.6, s = 18, color = 'g', marker = 'o', label = 'CH4 ISR')
axes[1, 0].scatter(xdata, rdf['sr_tewa_sr'].values, alpha = 0.6, s = 18, color = 'b', marker = 's', label = r'$H_2O$')
axes[1, 0].scatter(xdata, rdf['sr_teme_isr'].values, alpha = 0.6, s = 18, color = 'g', marker = '^', label = r'$CH4$')
axes[1, 0].scatter(xdata, rdf['sr_teba_sr'].values, alpha = 0.6, s = 18, color = 'c', marker = 's', label = r'$CH_4$ B')
axes[1, 0].scatter(xdata, rdf['sr_teen_sr'].values, alpha = 0.6, s = 18, color = 'r', marker = 'o', label = r'$CH_4$ H')
axes[1, 0].annotate("c)", xy=(0.05, 0.90), xycoords="axes fraction")
axes[1, 0].set_xscale('log')
axes[1, 0].set_yscale('log')
axes[1, 0].set_ylabel('Transport efficiency')
axes[1, 0].set_xlabel(r'$u_*\, [m/s]$')
axes[1, 0].legend()
axes[1, 0].set_xticks([0.2, 0.3, 0.4, 0.6, 0.8], minor = True)
axes[1, 0].set_xticklabels([0.2, 0.3, 0.4, 0.6, 0.8], minor = True)
axes[1, 0].set_yticks([0.1, 0.2, 0.3, 0.4, 0.6])
axes[1, 0].set_yticklabels([0.1, 0.2, 0.3, 0.4, 0.6])
xv, yv = mf.mylogslope(xdata, rdf['sr_gtv_ba_sr'], slope = 3/4 )
axes[1, 1].scatter(xdata, rdf['sr_gtv_wa_sr'].values, alpha = 0.6, s = 18, color = 'b', marker = 's', label = r'$H_2O$')
axes[1, 1].scatter(xdata, rdf['sr_gtv_me_isr'].values, alpha = 0.6, s = 18, color = 'g', marker = '^', label = r'$CH4$')
axes[1, 1].scatter(xdata, rdf['sr_gtv_ba_sr'].values, alpha = 0.6, s = 18, color = 'c', marker = 's', label = r'$CH_4$ B')
axes[1, 1].scatter(xdata, rdf['sr_gtv_en_sr'].values, alpha = 0.6, s = 18, color = 'r', marker = 'o', label = r'$CH_4$ H')
axes[1, 1].annotate("d)", xy=(0.05, 0.90), xycoords="axes fraction")
axes[1, 1].plot(xv, yv, 'k', linewidth = 2)
axes[1, 1].set_xscale('log')
axes[1, 1].set_yscale('log')
axes[1, 1].set_ylabel('Gas transfer velocity [m/s]')
axes[1, 1].set_xlabel(r'$u_*\, [m/s]$')
axes[1, 1].set_xticks([0.2, 0.3, 0.4, 0.6, 0.8], minor = True)
axes[1, 1].set_xticklabels([0.2, 0.3, 0.4, 0.6, 0.8], minor = True)
axes[1, 1].set_yticks([0.1, 0.2, 0.3, 0.4, 0.6])
axes[1, 1].set_yticklabels([0.1, 0.2, 0.3, 0.4, 0.6])
axes[0, 0].set_ylim([0.03, 1.2])
axes[1, 0].set_ylim([0.03, 1.2])
axes[0, 1].set_ylim([0.03, 1.2])
axes[1, 1].set_ylim([0.03, 1.2])
plt.tight_layout()
plt.savefig(os.path.join(md.outdir_plot, 'SI_teff_and_gtv_sr_joined.png'))
plt.close()
# check two different computation of gas transfer velocity
# plt.figure()
# plt.scatter(rdf['sr_gtv_wa'].values, rdf['sr_gtv_me'].values,
# alpha = 0.6, s = 18, color = 'g', marker = '^', label = 'CH4')
# plt.plot(rdf['sr_gtv_wa'].values, rdf['wc_enfa']/np.sqrt(rdf['wc_enva']), '.k')
# plt.plot(rdf['sr_gtv_wa'].values, rdf['wc_bafa']/np.sqrt(rdf['wc_bava']), 'oc')
# plt.scatter(rdf['sr_gtv_wa'].values, rdf['sr_gtv_en'].values,
# alpha = 0.6, s = 18, color = 'r', marker = '*', label = 'CH4')
# plt.scatter(rdf['sr_gtv_wa'].values, rdf['sr_gtv_en_sr'].values,
# alpha = 0.6, s = 18, color = 'r', marker = 's', label = 'CH4')
# plt.plot(rdf['sr_gtv_wa'].values, rdf['sr_gtv_wa'].values, 'k')
# plt.show()
plt.figure(figsize=(6,6))
plt.scatter(rdf['sr_gtv_wa'].values, rdf['sr_gtv_wa_sr'].values,
alpha = 0.6, s = 18, color = 'b', marker = 'o', label = r'$H_2O$')
plt.scatter(rdf['sr_gtv_me'].values, rdf['sr_gtv_me_sr'].values,
alpha = 0.6, s = 18, color = 'g', marker = '*', label = r'$CH_4$')
plt.scatter(rdf['sr_gtv_ba'].values, rdf['sr_gtv_ba_sr'].values,
alpha = 0.6, s = 18, color = 'c', marker = 's', label = r'$CH_4$ B')
plt.scatter(rdf['sr_gtv_en'].values, rdf['sr_gtv_en_sr'].values,
alpha = 0.6, s = 18, color = 'r', marker = '^', label = r'$CH_4$ H')
plt.plot(rdf['sr_gtv_wa'].values, rdf['sr_gtv_wa'].values, 'k')
plt.xlabel('EC gas transfer velocity [m/s]')
plt.ylabel('SR gas transfer velocity [m/s]')
plt.legend()
plt.savefig( os.path.join(md.outdir_plot, 'gtv_sr_vs_ecwa.png'))
plt.close()
plt.figure(figsize=(6,6))
plt.scatter(rdf['sr_gtv_wa'].values, rdf['sr_gtv_wa_sr'].values,
alpha = 0.6, s = 18, color = 'b', marker = 'o', label = r'$H_2O$')
plt.scatter(rdf['sr_gtv_me'].values, rdf['sr_gtv_me_sr'].values,
alpha = 0.6, s = 18, color = 'g', marker = '*', label = r'$CH_4$')
plt.scatter(rdf['sr_gtv_cd'].values, rdf['sr_gtv_cd_isr'].values,
alpha = 0.6, s = 18, color = 'y', marker = '^', label = r'$CO_2$')
# plt.scatter(rdf['sr_gtv_ba'].values, rdf['sr_gtv_ba_sr'].values,
# alpha = 0.6, s = 18, color = 'c', marker = 's', label = r'$CH_4$ back')
# plt.scatter(rdf['sr_gtv_en'].values, rdf['sr_gtv_en_sr'].values,
# alpha = 0.6, s = 18, color = 'r', marker = '^', label = r'$CH_4$ eb.')
plt.scatter(rdf['sr_gtv_cd_ba'].values, rdf['sr_gtv_cd_ba_sr'].values,
alpha = 0.6, s = 18, color = 'c', marker = 's', label = r'$CO_2$ B')
plt.scatter(rdf['sr_gtv_cd_en'].values, rdf['sr_gtv_cd_en_sr'].values,
alpha = 0.6, s = 18, color = 'r', marker = '^', label = r'$CO_2$ H')
plt.plot(rdf['sr_gtv_wa'].values, rdf['sr_gtv_wa'].values, 'k')
plt.plot(-rdf['sr_gtv_wa'].values, -rdf['sr_gtv_wa'].values, 'k')
plt.xlabel('EC gas transfer velocity [m/s]')
plt.ylabel('SR gas transfer velocity [m/s]')
plt.legend()
plt.savefig( os.path.join(md.outdir_plot, 'gtv_CO2.png'))
plt.close()
# plt.figure()
# plt.plot(rdf['sr_teme'].values, rdf['sr_teme_sr'].values, 'o')
# plt.plot(rdf['sr_teme'].values, rdf['sr_teme'].values, 'k')
# plt.show()
#
#
# plt.figure()
# plt.plot(rdf['sr_teba'].values, rdf['sr_teba_sr'].values, 'o')
# plt.plot(rdf['sr_teba'].values, rdf['sr_teba'].values, 'k')
# plt.show()
#
# # plt.figure(figsize=(8, 6.5))
# fig, axes = plt.subplots(ncols = 2, nrows = 1 ,figsize=(14, 7))
# xdata = rdf['sr_ustar']
# # axes[0].scatter(xdata, rdf['sr_fme_ec']/rdf['sr_stdv_w']/rdf['sr_apime_std'], color='g',marker = '*', alpha = 0.6, s = 20, label = 'CH4')
# # axes[0].scatter(xdata, rdf['sr_fwa_ec']/rdf['sr_stdv_w']/rdf['sr_apiwa_std'], color='b', marker = 's', alpha = 0.6, s = 20, label = 'H2O')
# # axes[0].scatter(xdata, rdf['sr_fme_ec']/rdf['sr_stdv_w']/rdf['sr_apien_std']*rdf['wc_en_frac_flux']/np.sqrt(rdf['sr_alpha']),
# # alpha = 0.6, s = 20, color = 'r',marker='^', label = 'CH4 Ebullition')
# # axes[0].scatter(xdata, rdf['sr_fme_ec']/rdf['sr_stdv_w']/rdf['sr_apiba_std']*rdf['wc_ba_frac_flux']/np.sqrt(1-rdf['sr_alpha']),
# # alpha = 0.6, s = 20, color = 'c',marker = 'o', label = 'CH4 Background')
# # axes[0].annotate("a)", xy=(0.1, 0.9), xycoords="axes fraction")
#
# xv, yv = mf.mylogslope(xdata,rdf['sr_fme_ec'].values/rdf['sr_std_me'].values/rdf['sr_stdv_w'].values, slope = -3/4/2 )
#
# axes[0].scatter(xdata, rdf['sr_fme_ec'].values/rdf['sr_stdv_w'].values/rdf['sr_std_me'].values, color='g',marker = '*', alpha = 0.6, s = 20, label = 'CH4')
# axes[0].scatter(xdata, rdf['sr_fwa_ec'].values/rdf['sr_stdv_w'].values/rdf['sr_std_wa'].values, color='b', marker = 's', alpha = 0.6, s = 20, label = 'H2O')
# axes[0].scatter(xdata, 1/rdf['sr_alpha'].values*rdf['sr_fme_ec'].values/rdf['sr_stdv_w'].values/rdf['sr_std_en'].values*rdf['wc_en_frac_flux'].values,
# alpha = 0.6, s = 20, color = 'r',marker='^', label = 'CH4 Ebullition')
# axes[0].scatter(xdata, 1/(1-rdf['sr_alpha'].values)*rdf['sr_fme_ec'].values/rdf['sr_stdv_w'].values/rdf['sr_std_ba'].values*rdf['wc_ba_frac_flux'].values,
# alpha = 0.6, s = 20, color = 'c',marker = 'o', label = 'CH4 Background')
# axes[0].annotate("a)", xy=(0.1, 0.9), xycoords="axes fraction")
# axes[0].plot(xv, yv, 'k', linewidth = 2)
#
#
# # plot cubic spline interpolation
# # xdataso = np.sort(xdata)
# # xdataso_order = np.argsort(xdata)
# # fme = UnivariateSpline(xdataso, (rdf['sr_fme_ec']/rdf['sr_stdv_w']/rdf['sr_apime_std'])[xdataso_order])
# # fwa = UnivariateSpline(xdataso, (rdf['sr_fwa_ec']/rdf['sr_stdv_w']/rdf['sr_apiwa_std'])[xdataso_order])
# # fen = UnivariateSpline(xdataso, (rdf['sr_fme_ec']/rdf['sr_stdv_w']/rdf['sr_apien_std']*rdf['wc_en_frac_flux']/np.sqrt(rdf['sr_alpha']))[xdataso_order])
# # fba = UnivariateSpline(xdataso, (rdf['sr_fme_ec']/rdf['sr_stdv_w']/rdf['sr_apiba_std']*rdf['wc_ba_frac_flux']/np.sqrt(1-rdf['sr_alpha']))[xdataso_order])
# # # fme.set_smoothing_factor(0.5)
# # xdataso = np.sort(xdata)
# # axes[0].plot(xdataso, fme(xdataso), 'g', linewidth = 2.6)
# # axes[0].plot(xdataso, fwa(xdataso), 'b', linewidth = 2.6)
# # axes[0].plot(xdataso, fen(xdataso), 'r', linewidth = 2.6)
# # axes[0].plot(xdataso, fba(xdataso), 'c', linewidth = 2.6)
#
# # axes[0].plot(xdataso, fme(xdataso), 'g', linewidth = 2.6)
# # axes[0].plot(xdataso, fme(xdataso), '--k', linewidth = 1.6)
# # axes[0].plot(xdataso, fwa(xdataso), 'b', linewidth = 2.6)
# # axes[0].plot(xdataso, fwa(xdataso), '--k', linewidth = 1.6)
# # axes[0].plot(xdataso, fen(xdataso), 'r', linewidth = 2.6)
# # axes[0].plot(xdataso, fen(xdataso), '--k', linewidth = 1.6)
# # axes[0].plot(xdataso, fba(xdataso), 'c', linewidth = 2.6)
# # axes[0].plot(xdataso, fba(xdataso), '--k', linewidth = 1.6)
# axes[0].set_xscale('log')
# # plt.yscale('log')
# axes[0].set_ylabel('Transport Efficiency [-]')
# axes[0].set_xlabel(r'$u_*$')
# # plt.tight_layout()
# axes[0].legend()
# # plt.savefig(os.path.join(md.outdir_plot, 'transport_efficiency.png'), dpi=300)
# # plt.close()
# # plt.figure(figsize=(8, 6.5))
# # xdata = rdf['sr_Re_star']
# xv, yv = mf.mylogslope(xdata,rdf['sr_fme_ec'].values/rdf['sr_std_me'].values, slope = 3/4 )
# # axes[1].scatter(xdata, rdf['sr_fme_ec']/rdf['sr_apime_std'], color='g',marker = '*', s = 20, alpha = 0.6, label = 'CH4')
# # axes[1].scatter(xdata, rdf['sr_fwa_ec']/rdf['sr_apiwa_std'], color='b', marker = 's',s = 20, alpha = 0.6, label = 'H2O')
# # axes[1].scatter(xdata, rdf['sr_fme_ec']/rdf['sr_apien_std']*rdf['wc_en_frac_flux']/np.sqrt(rdf['sr_alpha']), s = 20, alpha = 0.6, color = 'r',marker='^', label = 'CH4 EB')
# # axes[1].scatter(xdata, rdf['sr_fme_ec']/rdf['sr_apiba_std']*rdf['wc_ba_frac_flux']/np.sqrt(1-rdf['sr_alpha']), s = 20, alpha = 0.6, color = 'c',marker = 'o', label = 'CH4 BA')
# # axes[1].plot(xv, yv, 'k', linewidth = 2)
# # axes[1].annotate("b)", xy=(0.1, 0.9), xycoords="axes fraction")
# # axes[1].annotate(r"3/4", xy=(0.9, 0.6), xycoords="axes fraction")
#
#
# axes[1].scatter(xdata, rdf['sr_fme_ec'].values/rdf['sr_std_me'].values, color='g',marker = '*', alpha = 0.6, s = 20, label = 'CH4')
# axes[1].scatter(xdata, rdf['sr_fwa_ec'].values/rdf['sr_std_wa'].values, color='b', marker = 's', alpha = 0.6, s = 20, label = 'H2O')
# axes[1].scatter(xdata, 1/rdf['sr_alpha'].values*rdf['sr_fme_ec'].values/rdf['sr_std_en'].values*rdf['wc_en_frac_flux'].values,
# alpha = 0.6, s = 20, color = 'r',marker='^', label = 'CH4 Ebullition')
# axes[1].scatter(xdata, 1/(1-rdf['sr_alpha'].values)*rdf['sr_fme_ec'].values/rdf['sr_std_ba'].values*rdf['wc_ba_frac_flux'].values,
# alpha = 0.6, s = 20, color = 'c',marker = 'o', label = 'CH4 Background')
# axes[1].annotate("a)", xy=(0.1, 0.9), xycoords="axes fraction")
# axes[1].plot(xv, yv, 'k', linewidth = 2)
#
# # plot cubic spline interpolation
# # xdataso = np.sort(xdata)
# # xdataso_order = np.argsort(xdata)
# # fme = UnivariateSpline(xdataso, (rdf['sr_fme_ec']/rdf['sr_apime_std'])[xdataso_order])
# # fwa = UnivariateSpline(xdataso, (rdf['sr_fwa_ec']/rdf['sr_apiwa_std'])[xdataso_order])
# # fen = UnivariateSpline(xdataso, (rdf['sr_fme_ec']/rdf['sr_apien_std']*rdf['wc_en_frac_flux']/np.sqrt(rdf['sr_alpha']))[xdataso_order])
# # fba = UnivariateSpline(xdataso, (rdf['sr_fme_ec']/rdf['sr_apiba_std']*rdf['wc_ba_frac_flux']/np.sqrt(1-rdf['sr_alpha']))[xdataso_order])
# # # fme.set_smoothing_factor(0.5)
# # xdataso = np.sort(xdata)
#
# # axes[1].plot(xdataso, fme(xdataso), 'g', linewidth = 2.6)
# # axes[1].plot(xdataso, fme(xdataso), '--k', linewidth = 1.6)
# # axes[1].plot(xdataso, fwa(xdataso), 'b', linewidth = 2.6)
# # axes[1].plot(xdataso, fwa(xdataso), '--k', linewidth = 1.6)
# # axes[1].plot(xdataso, fen(xdataso), 'r', linewidth = 2.6)
# # axes[1].plot(xdataso, fen(xdataso), '--k', linewidth = 1.6)
# # axes[1].plot(xdataso, fba(xdataso), 'c', linewidth = 2.6)
# # axes[1].plot(xdataso, fba(xdataso), '--k', linewidth = 1.6)
# axes[1].set_xscale('log')
# axes[1].set_yscale('log')
# axes[1].set_ylabel('Gas Transfer Velocity [m/s]')
# axes[1].set_xlabel(r'$u_*$')
# plt.tight_layout()
# # plt.legend()
# plt.savefig(os.path.join(md.outdir_plot, 'gas_transfer_velocity_and_treff.png'), dpi=300)
# plt.close()
# plt.figure()
# xv, yv = mf.mylogslope(tdf['Re_star'], rdf['sr_fme_ec'].values/rdf['sr_apime_std'].values*tdf['ustar'].values**(-1), slope=-1/4)
# plt.plot(tdf['Re_star'], rdf['sr_fme_ec'].values/rdf['sr_apime_std'].values*tdf['ustar'].values**(-1), 'o')
# plt.plot(xv, yv, 'k')
# plt.xscale('log')
# plt.yscale('log')
# plt.show()
# plt.figure()
# # xv, yv = mf.mylogslope(tdf['epsilon'], rdf['sr_fme'].values/rdf['sr_apime_std'].values, slope=1/4)
# xv, yv = mf.mylogslope(tdf['ustar'], rdf['sr_fme'].values/rdf['sr_apime_std'].values, slope=3/4)
# # plt.plot(tdf['epsilon'], rdf['sr_fme'].values/rdf['sr_apime_std'].values, 'o')
# plt.plot(tdf['ustar'], rdf['sr_fme'].values/rdf['sr_apime_std'].values, 'o')
# plt.plot(xv, yv, 'k', linewidth = 2)
# plt.xscale('log')
# plt.yscale('log')
# plt.show()
#
#
# plt.figure()
# # xv, yv = mf.mylogslope(tdf['epsilon'], rdf['sr_fme'].values/rdf['sr_apime_std'].values, slope=1/4)
# xv, yv = mf.mylogslope(tdf['ustar'], rdf['sr_fwa_ec'].values/rdf['sr_apiwa_std'].values, slope=3/4)
# # plt.plot(tdf['epsilon'], rdf['sr_fme'].values/rdf['sr_apime_std'].values, 'o')
# plt.plot(tdf['ustar'], rdf['sr_fwa_ec'].values/rdf['sr_apiwa_std'].values, 'o')
# plt.plot(xv, yv, 'k', linewidth = 2)
# plt.xscale('log')
# plt.yscale('log')
# plt.show()
# #
# #
# plt.figure()
# # xv, yv = mf.mylogslope(tdf['epsilon'], rdf['sr_fme'].values/rdf['sr_apime_std'].values, slope=1/4)
# xv, yv = mf.mylogslope(tdf['ustar']**(3/4)*rdf['sr_apiwa_std'].values, rdf['sr_fwa_ec'].values, slope=1)
# # plt.plot(tdf['epsilon'], rdf['sr_fme'].values/rdf['sr_apime_std'].values, 'o')
# plt.plot(tdf['ustar']**(3/4)*rdf['sr_apiwa_std'].values, rdf['sr_fwa_ec'].values, 'o')
# plt.plot(xv, yv, 'k', linewidth = 2)
# plt.xscale('log')
# plt.yscale('log')
# plt.show()
# ksr = 1
# fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(10, 10))
# axes[0,0].set_title(r'$CH_4$')
# axes[0,0].plot(rdf['fluxes_me_ecf'], ksr*rdf['fluxes_me_srf'], 'og', alpha = 0.6, markersize = 6)
# axes[0,0].plot(rdf['fluxes_me_ecf'], rdf['fluxes_me_ecf'], 'k')
# axes[0,0].set_ylabel('Surface Renewal flux')
# axes[0,0].set_xlabel('Eddy covariance flux')
# axes[0,0].set_xscale('log')
# axes[0,0].set_yscale('log')
# axes[0,0].annotate("a)", xy=(0.05, 0.90), xycoords="axes fraction")
#
# axes[0,1].set_title(r'$H_2O$')
# axes[0,1].plot(rdf['fluxes_wa_ecf'], ksr*rdf['fluxes_wa_srf'], 'ob', alpha = 0.6, markersize = 6)
# axes[0,1].plot(rdf['fluxes_wa_ecf'], rdf['fluxes_wa_ecf'], 'k')
# axes[0,1].set_xscale('log')
# axes[0,1].set_yscale('log')
#
# axes[0,1].set_ylabel('Surface Renewal flux')
# axes[0,1].set_xlabel('Eddy covariance flux')
# axes[0,1].annotate("b)", xy=(0.05, 0.90), xycoords="axes fraction")
#
# axes[1, 0].set_title(r'Sensible heat ($T$)')
# axes[1,0].plot(rdf['fluxes_T_ecf'], ksr*rdf['fluxes_T_srf']*np.sign(rdf['fluxes_T_ecf']), 'or', alpha = 0.6, markersize = 6)
# # axes[1,0].plot(rdf['fluxes_T_ecf'], ksr*rdf['fluxes_T_srf'], 'or', alpha = 0.6, markersize = 6)
# axes[1,0].plot(rdf['fluxes_T_ecf'], rdf['fluxes_T_ecf'], 'k')
# # axes[1,0].set_xscale('log')
# # axes[1,0].set_yscale('log')
#
# axes[1,0].set_ylabel('Surface Renewal flux')
# axes[1,0].set_xlabel('Eddy covariance flux')
# axes[1,0].annotate("c)", xy=(0.05, 0.90), xycoords="axes fraction")
#
# axes[1, 1].set_title(r'Momentum ($u$)')
# axes[1,1].plot(rdf['fluxes_u_ecf'],
# ksr*rdf['fluxes_u_srf']*np.sign(rdf['fluxes_u_ecf']), 'ok', alpha = 0.6, markersize = 6)
# # axes[1,1].plot(rdf['fluxes_cd_ecf'], ksr*rdf['fluxes_cd_srf']*np.sign(rdf['fluxes_cd_ecf']), 'ok', alpha = 0.6, markersize = 6)
# axes[1,1].plot(rdf['fluxes_u_ecf'], rdf['fluxes_u_ecf'], 'k')
# # axes[1,1].plot(rdf['fluxes_cd_ecf'], rdf['fluxes_cd_ecf'], 'k')
# axes[1,1].set_ylabel('Surface Renewal flux')
# axes[1,1].set_xlabel('Eddy covariance flux')
# axes[1,1].annotate("d)", xy=(0.05, 0.90), xycoords="axes fraction")
# plt.tight_layout()
# # plt.show()
# plt.savefig(os.path.join(md.outdir_plot, 'ec_sr_all_fluxes.png'), dpi=300)
# plt.close()
# ksr = 1
# fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(10, 10))
# axes[0,0].set_title(r'$CH_4$')
# axes[0,0].plot(rdf['fluxes_me_ecf'], rdf['fluxes_me_srf'], 'og', alpha = 0.6, markersize = 6)
# axes[0,0].plot(rdf['fluxes_me_ecf'], rdf['fluxes_me_ecf'], 'k')
# axes[0,0].set_ylabel('Surface Renewal flux')
# axes[0,0].set_xlabel('Eddy covariance flux')
# axes[0,0].set_xscale('log')
# axes[0,0].set_yscale('log')
# axes[0,0].annotate("a)", xy=(0.05, 0.90), xycoords="axes fraction")
# axes[0,1].set_title(r'$H_2O$')
# axes[0,1].plot(rdf['fluxes_wa_ecf'], rdf['fluxes_wa_srf'], 'ob', alpha = 0.6, markersize = 6)
# axes[0,1].plot(rdf['fluxes_wa_ecf'], rdf['fluxes_wa_ecf'], 'k')
# axes[0,1].set_xscale('log')
# axes[0,1].set_yscale('log')
# axes[0,1].set_ylabel('Surface Renewal flux')
# axes[0,1].set_xlabel('Eddy covariance flux')
# axes[0,1].annotate("b)", xy=(0.05, 0.90), xycoords="axes fraction")
# axes[1, 0].set_title(r'Sensible heat ($T$)')
# axes[1,0].plot(rdf['fluxes_T_ecf'], rdf['fluxes_T_srf'], 'or', alpha = 0.6, markersize = 6)
# axes[1,0].plot(rdf['fluxes_T_ecf'], rdf['fluxes_T_ecf'], 'k')
# axes[1,0].set_ylabel('Surface Renewal flux')
# axes[1,0].set_xlabel('Eddy covariance flux')
# axes[1,0].annotate("c)", xy=(0.05, 0.90), xycoords="axes fraction")
# axes[1, 1].set_title(r'Momentum ($u$)')
# axes[1,1].plot(rdf['fluxes_u_ecf'], rdf['fluxes_u_srf'], 'ok', alpha = 0.6, markersize = 6)
# axes[1,1].plot(rdf['fluxes_u_ecf'], rdf['fluxes_u_ecf'], 'k')
# axes[1,1].set_ylabel('Surface Renewal flux')
# axes[1,1].set_xlabel('Eddy covariance flux')
# axes[1,1].annotate("d)", xy=(0.05, 0.90), xycoords="axes fraction")
# plt.tight_layout()
# # plt.show()
# plt.savefig(os.path.join(md.outdir_plot, 'ec_sr_all_fluxes.png'), dpi=300)
# plt.close()
ksr = 1
fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(10, 10))
axes[0,0].set_title(r'$CH_4$')
axes[0,0].plot(rdf['sr_fme_ec'], rdf['sr_fme_sr'], 'og', alpha = 0.6, markersize = 6)
axes[0,0].plot(rdf['sr_fme_ec'], rdf['sr_fme_ec'], 'k')
# axes[0,0].set_ylabel('Surface Renewal flux')
# axes[0,0].set_xlabel('Eddy covariance flux')
axes[0, 0].set_xlabel(r'EC flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[0, 0].set_ylabel(r'SR flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[0,0].set_xscale('log')
axes[0,0].set_yscale('log')
axes[0,0].annotate("a)", xy=(0.05, 0.90), xycoords="axes fraction")
axes[0,1].set_title(r'$H_2O$')
axes[0,1].plot(rdf['sr_fwa_ec'], rdf['sr_fwa_sr'], 'ob', alpha = 0.6, markersize = 6)
axes[0,1].plot(rdf['sr_fwa_ec'], rdf['sr_fwa_ec'], 'k')
axes[0,1].set_xscale('log')
axes[0,1].set_yscale('log')
# axes[0,1].set_ylabel('Surface Renewal flux')
# axes[0,1].set_xlabel('Eddy covariance flux')
axes[0, 1].set_xlabel(r'EC flux [$m mol\, m^{-2}\, s^{-1}$]')
axes[0, 1].set_ylabel(r'SR flux [$m mol\, m^{-2}\, s^{-1}$]')
axes[0,1].annotate("b)", xy=(0.05, 0.90), xycoords="axes fraction")
axes[1, 0].set_title(r'Sensible heat ($T$)')
axes[1,0].plot(rdf['sr_fTT_ec'], rdf['sr_fTT_sr'], 'or', alpha = 0.6, markersize = 6)
axes[1,0].plot(rdf['sr_fTT_ec'], rdf['sr_fTT_ec'], 'k')
# axes[1,0].set_ylabel('Surface Renewal flux')
# axes[1,0].set_xlabel('Eddy covariance flux')
axes[1, 0].set_xlabel(r'EC flux [$K mol\, m\, s^{-1}$]')
axes[1, 0].set_ylabel(r'SR flux [$K mol\, m\, s^{-1}$]')
axes[1,0].annotate("c)", xy=(0.05, 0.90), xycoords="axes fraction")
axes[1, 1].set_title(r'Momentum ($u$)')
axes[1,1].plot(rdf['sr_fuu_ec'], rdf['sr_fuu_sr'], 'ok', alpha = 0.6, markersize = 6)
axes[1,1].plot(rdf['sr_fuu_ec'], rdf['sr_fuu_ec'], 'k')
# axes[1,1].set_ylabel('Surface Renewal flux')
# axes[1,1].set_xlabel('Eddy covariance flux')
# axes[1, 1].set_xlabel(r'EC flux [$\mu mol\, m^{-2}\, s^{-1}$]')
# axes[1, 1].set_ylabel(r'SR flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[1, 1].set_xlabel(r'EC flux [$ m^{2}\, s^{-2}$]')
axes[1, 1].set_ylabel(r'SR flux [$ m^{2}\, s^{-2}$]')
axes[1,1].annotate("d)", xy=(0.05, 0.90), xycoords="axes fraction")
plt.tight_layout()
# plt.show()
plt.savefig(os.path.join(md.outdir_plot, 'ec_sr_all_fluxes.png'), dpi=300)
plt.close()
# daytime = rdf['datetime'].dt.hour.values + rdf['datetime'].dt.minute.values/60
#
# rdf_spikes = rdf[rdf['fluxes_me_srf']> 0.005].copy()
# daytime_spikes = rdf_spikes['datetime'].dt.hour.values + rdf_spikes['datetime'].dt.minute.values/60
# fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(10, 10))
# axes[0,0].set_xlim(0.0, 23.50)
# axes[0,0].set_title(r'$CH_4$')
# axes[0,0].plot(daytime, rdf['fluxes_me_srf'], '^g', alpha = 0.6, markersize = 6)
# axes[0,0].plot(daytime, rdf['fluxes_me_ecf'], '*k', alpha = 0.6, markersize = 6)
# axes[0,0].set_ylabel('Flux [$\mu mol\, m\, s^{-1}$]')
# axes[0,0].set_xlabel('Time of the day [hour]')
# axes[0,0].set_yscale('log')
# axes[0,0].annotate("a)", xy=(0.05, 0.90), xycoords="axes fraction")
# axes[0,1].set_xlim(0, 23.30)
# axes[0,1].set_title(r'$H_2O$')
# axes[0,1].plot(daytime, rdf['fluxes_wa_srf'], '^b', alpha = 0.6, markersize = 6)
# axes[0,1].plot(daytime, rdf['fluxes_wa_ecf'], '*k', alpha = 0.6, markersize = 6)
# axes[0,1].set_ylabel('Flux [$\mu mol\, m\, s^{-1}$]')
# axes[0,1].set_xlabel('Time of the day [hour]')
# axes[0,1].annotate("b)", xy=(0.05, 0.90), xycoords="axes fraction")
# axes[1, 0].set_title(r'Sensible heat ($T$)')
# axes[1,0].plot(daytime, rdf['fluxes_T_srf'], '^y', alpha = 0.6, markersize = 6)
# axes[1,0].plot(daytime, rdf['fluxes_T_ecf'], '*k', alpha = 0.6, markersize = 6)
# axes[1,0].set_ylabel('Flux [$K\, m\, s^{-1}$]')
# axes[1,0].set_xlabel('Time of the day [hour]')
# axes[1,0].annotate("c)", xy=(0.05, 0.90), xycoords="axes fraction")
# axes[1, 1].set_title(r'$CO_2$')
# axes[1,1].plot(daytime, rdf['fluxes_cd_srf'], '^c', alpha = 0.6, markersize = 6)
# axes[1,1].plot(daytime, rdf['fluxes_cd_ecf'], '*k', alpha = 0.6, markersize = 6)
# axes[1,1].set_ylabel('Flux [$\mu mol\, m\, s^{-1}$]')
# axes[1,1].set_xlabel('Time of the day [hour]')
# axes[1,1].annotate("d)", xy=(0.05, 0.90), xycoords="axes fraction")
# plt.tight_layout()
# fig.savefig(os.path.join(md.outdir_plot, 'ec_sr_daily_fluxes.png'), dpi=300)
# axes[1,1].plot(daytime_spikes, rdf_spikes['fluxes_cd_srf'], 'or', alpha = 0.6, markersize = 8)
# axes[0,0].plot(daytime_spikes, rdf_spikes['fluxes_me_srf'], 'or', alpha = 0.6, markersize = 8)
# fig.savefig(os.path.join(md.outdir_plot, 'ec_sr_daily_fluxes_marked.png'), dpi=300)
# plt.close()
daytime = rdf['datetime'].dt.hour.values + rdf['datetime'].dt.minute.values/60
# rdf_spikes = rdf[rdf['sr_fme_isr']> 0.005].copy()
rdf_spikes = rdf[rdf['sr_fme_isr']> 0.2].copy()
daytime_spikes = rdf_spikes['datetime'].dt.hour.values + rdf_spikes['datetime'].dt.minute.values/60
fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(10, 10))
axes[0,0].set_xlim(0.0, 23.50)
axes[0,0].set_title(r'$CH_4$')
axes[0,0].plot(daytime, rdf['sr_fme_isr'], '^g', alpha = 0.6, markersize = 6)
axes[0,0].plot(daytime, rdf['sr_fme_ec'], '*k', alpha = 0.6, markersize = 6)
axes[0,0].set_ylabel('Flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[0,0].set_xlabel('Time of the day [hour]')
axes[0,0].set_yscale('log')
axes[0,0].annotate("a)", xy=(0.05, 0.90), xycoords="axes fraction")
axes[0,1].set_xlim(0, 23.30)
axes[0,1].set_title(r'$H_2O$')
axes[0,1].plot(daytime, rdf['sr_fwa_sr'], '^b', alpha = 0.6, markersize = 6)
axes[0,1].plot(daytime, rdf['sr_fwa_ec'], '*k', alpha = 0.6, markersize = 6)
axes[0,1].set_ylabel('Flux [$m mol\, m^{-2}\, s^{-1}$]')
axes[0,1].set_xlabel('Time of the day [hour]')
axes[0,1].annotate("b)", xy=(0.05, 0.90), xycoords="axes fraction")
axes[1, 0].set_title(r'Sensible heat ($T$)')
axes[1,0].plot(daytime, rdf['sr_fTT_sr'], '^y', alpha = 0.6, markersize = 6)
axes[1,0].plot(daytime, rdf['sr_fTT_ec'], '*k', alpha = 0.6, markersize = 6)
axes[1,0].set_ylabel('Flux [$K\, m\, s^{-1}$]')
axes[1,0].set_xlabel('Time of the day [hour]')
axes[1,0].annotate("c)", xy=(0.05, 0.90), xycoords="axes fraction")
axes[1, 1].set_title(r'$CO_2$')
axes[1,1].plot(daytime, rdf['sr_fcd_isr'], '^c', alpha = 0.6, markersize = 6)
axes[1,1].plot(daytime, rdf['sr_fcd_ec'], '*k', alpha = 0.6, markersize = 6)
axes[1,1].set_ylabel('Flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[1,1].set_xlabel('Time of the day [hour]')
axes[1,1].annotate("d)", xy=(0.05, 0.90), xycoords="axes fraction")
plt.tight_layout()
fig.savefig(os.path.join(md.outdir_plot, 'ec_sr_daily_fluxes.png'), dpi=300)
axes[1,1].plot(daytime_spikes, rdf_spikes['sr_fcd_isr'], 'or', alpha = 0.6, markersize = 8)
axes[0,0].plot(daytime_spikes, rdf_spikes['sr_fme_isr'], 'or', alpha = 0.6, markersize = 8)
fig.savefig(os.path.join(md.outdir_plot, 'ec_sr_daily_fluxes_marked.png'), dpi=300)
plt.close()
# plt.figure()
# plt.plot(rdf['sr_fcd_ec'], rdf['sr_fcd_sr'],
# 'ok', alpha = 0.6, markersize = 6)
# plt.plot(rdf['sr_fcd_ec'], rdf['sr_fcd_ec'], 'k')
# # plt.plot(rdf['sr_fcd_ec'], rdf['sr_fcd_ec'], 'og')
# # plt.plot(rdf['sr_fcd_ec'], rdf['sr_fba_cd_sr'] + rdf['sr_fen_cd_sr'] , 'og')
# plt.plot(rdf['sr_fcd_ec'], rdf['sr_fba_cd_sr']+rdf['sr_fen_cd_sr'] , 'og')
# # plt.plot(rdf['fluxes_cd_ecf'],rdf['fluxes_cd_ecf'], 'k')
# # plt.plot(rdf['fluxes_cd_ecf'], rdf['srco2_fba_sr'], 'og')
# plt.ylabel('SR')
# plt.xlabel('EC')
# plt.show()
# plt.figure(figsize = (8, 8))
# # plt.title('$Co_2$')
# plt.plot(rdf['sr_fcd_ec'], rdf['sr_fcd_isr'],
# 'ok', alpha = 0.6, markersize = 6)
# plt.plot(rdf['sr_fcd_ec'], rdf['sr_fcd_ec'], 'k', label = r'$CO_2$ total flux')
# # plt.plot(rdf['sr_fcd_ec'], rdf['sr_fcd_ec'], 'og')
# # plt.plot(rdf['sr_fcd_ec'], rdf['sr_fba_cd_sr'] + rdf['sr_fen_cd_sr'] , 'og')
# plt.plot(rdf['wc_cd_flux_ec']*rdf['wc_bd_frac_flux'], rdf['sr_fba_cd_sr'], 'oc', label = r'$CO_2$ B flux')
# plt.plot(rdf['wc_cd_flux_ec']*rdf['wc_ed_frac_flux'], rdf['sr_fen_cd_sr'], 'or', label = r'$CO_2$ H flux')
# # plt.plot(rdf['fluxes_cd_ecf'],rdf['fluxes_cd_ecf'], 'k')
# # plt.plot(rdf['fluxes_cd_ecf'], rdf['srco2_fba_sr'], 'og')
# plt.ylabel(r'SR flux [$\mu mol\, m\, s^{-1}$]')
# plt.xlabel(r'EC flux [$\mu mol\, m\, s^{-1}$]')
# plt.legend()
# plt.tight_layout()
# plt.savefig(os.path.join(md.outdir_plot, 'ec_sr_all_fluxes_CO2.png'), dpi=300)
# plt.close()
fig, axes = plt.subplots(1, 2, figsize = (13, 7.3))
axes[0].plot(rdf['sr_fcd_ec'], rdf['sr_fcd_isr'],'ok', label = r'$CO_2$ total flux', alpha = 0.6, markersize = 6)
axes[0].plot(rdf['sr_fcd_ec'], rdf['sr_fcd_ec'], 'k', markersize=5, alpha = 0.7)
axes[0].plot(rdf['wd_me_flux_ec']*rdf['wd_ba_frac_flux'], rdf['sr_fba_cd_sr'], 'oc', label = r'$CO_2$ B flux', markersize=5, alpha = 0.7)
axes[0].plot(rdf['wd_me_flux_ec']*rdf['wd_en_frac_flux'], rdf['sr_fen_cd_sr'], 'or', label = r'$CO_2$ H flux', markersize=5, alpha = 0.7)
axes[0].axvline(x=0.0, color='k', linestyle='--')
axes[0].axhline(y=0.0, color='k', linestyle='--')
axes[0].set_ylabel(r'SR flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[0].set_xlabel(r'EC flux [$\mu mol\, m^{-2}\, s^{-1}$]')
axes[0].legend()
# plt.figure()
axes[1].plot(rdf['wd_me_fa'], 'k',label = r'$CO_2$ total')
axes[1].plot(rdf['wd_ba_fa'],'c',label = r'$CO_2$ B')
axes[1].plot(rdf['wd_en_fa'], 'r', label = r'$CO_2$ H')
axes[1].axhline(y=0.0, color='k', linestyle='--')
# axes[1].legend()
axes[1].set_xlabel('Run')
axes[1].set_ylabel(r'Average flux [$\mu mol\, m^{-2}\, s^{-1}$]')
# plt.show()
plt.tight_layout()
plt.savefig(os.path.join(md.outdir_plot, 'ec_sr_all_fluxes_CO2.png'), dpi=300)
plt.close()
# fig, axes = plt.subplots(111, figsize = (6, 6))
fig = plt.figure(figsize = (7.3, 7.3))
plt.plot(rdf['sr_fcd_ec'], rdf['sr_fcd_isr'],'ok', label = r'$CO_2$ total flux', alpha = 0.6, markersize = 6)
plt.plot(rdf['sr_fcd_ec'], rdf['sr_fcd_ec'], 'k', markersize=5, alpha = 0.7)
plt.plot(rdf['wd_me_flux_ec']*rdf['wd_ba_frac_flux'], rdf['sr_fba_cd_sr'], 'oc', label = r'$CO_2$ B flux', markersize=5, alpha = 0.7)
plt.plot(rdf['wd_me_flux_ec']*rdf['wd_en_frac_flux'], rdf['sr_fen_cd_sr'], 'or', label = r'$CO_2$ H flux', markersize=5, alpha = 0.7)
plt.axvline(x=0.0, color='k', linestyle='--')
plt.axhline(y=0.0, color='k', linestyle='--')
plt.ylabel(r'SR flux [$\mu mol\, m^{-2}\, s^{-1}$]')
plt.xlabel(r'EC flux [$\mu mol\, m^{-2}\, s^{-1}$]')
plt.legend()
plt.tight_layout()
plt.savefig(os.path.join(md.outdir_plot, 'ec_sr_all_fluxes_CO2_single.png'), dpi=300)
plt.close()
# datetimes = pd.to_datetime(rdf['csv_name'], format='%Y%m%d_%H%M')
plt.figure(figsize = (8, 8))
# plt.title('$Co_2$')
plt.plot(rdf['sr_fcd_ec'],
'ok', alpha = 0.6, markersize = 6)
plt.plot(rdf['wd_me_flux_ec']*rdf['wd_ba_frac_flux'], 'oc', label = r'$CO_2$ B flux', markersize=5, alpha = 0.7)
plt.plot(rdf['wd_me_flux_ec']*rdf['wd_en_frac_flux'], 'or', label = r'$CO_2$ H flux', markersize=5, alpha = 0.7)
# plt.plot(rdf['sr_fba_cd_sr'], '^b', label = r'$CO_2$ B flux', markersize=5, alpha = 0.7)
# plt.plot(rdf['sr_fen_cd_sr'], '^r', label = r'$CO_2$ H flux', markersize=5, alpha = 0.7)
# plt.plot(rdf['fluxes_cd_ecf'],rdf['fluxes_cd_ecf'], 'k')
# plt.plot(rdf['fluxes_cd_ecf'], rdf['srco2_fba_sr'], 'og')
plt.axvline(x=0.0, color='k', linestyle='--')
plt.axhline(y=0.0, color='k', linestyle='--')
plt.ylabel(r'SR flux [$\mu mol\, m^{-2}\, s^{-1}$]')
plt.xlabel(r'Run')
plt.legend()
plt.tight_layout()
plt.savefig(os.path.join(md.outdir_plot, 'ec_day_all_fluxes_CO2.png'), dpi=300)
plt.close()
# plt.figure()
# plt.plot(rdf['sr_fcd_ec'], rdf['sr_fcd_sr'], 'ok', alpha = 0.6, markersize = 6)
# plt.plot(rdf['sr_fcd_ec'], rdf['sr_fcd_ec'], 'k')
# plt.ylabel(r'$CO_2$ Surface Renewal flux')
# plt.xlabel(r'$CO_2$ Eddy covariance flux')
# plt.savefig(os.path.join(md.outdir_plot, 'ec_sr_all_fluxes_CO2.png'), dpi=300)
# plt.close()
plt.figure()
plt.plot(rdf['wd_en_M30'], rdf['wc_en_M30'], 'or')
plt.plot(rdf['wd_en_M30'], rdf['wd_en_M30'], 'k')
plt.plot(rdf['wd_me_M30'], rdf['wc_me_M30'], 'ok')
plt.plot(rdf['wd_ba_M30'], rdf['wc_ba_M30'], 'og')
plt.xscale('symlog')
plt.yscale('symlog')
plt.xlabel(r'$M_{30}$ $CH_4$')
plt.ylabel(r'$M_{30}$ $CO_2$')
plt.savefig(os.path.join(md.outdir_plot, 'M30_CH4_CO2.png'), dpi=300)
plt.close()
plt.figure()
plt.plot(rdf['wc_bar_M30'], rdf['wc_bar_M30'], 'k')
plt.plot(rdf['wc_bar_M30'], rdf['wc_ba_M30'], 'or')
plt.plot(rdf['wc_wa_M30'], rdf['wc_me_M30'], 'ob')
plt.xscale('symlog')
plt.yscale('symlog')
plt.show()
# rdf['time'] = rdf['datetime'].dt.hour
#
#
# plt.figure()
# # plt.plot(rdf['time'].values, rdf['sr_fme_ec'].values, 'o')
# # plt.plot(tdf['ustar'].values, rdf['wc_en_frac_time'].values, 'o')
# plt.plot(tdf['Tbar'].values, rdf['sr_fme_ec'].values*rdf['wc_en_frac_flux'].values, 'o')
# plt.plot(tdf['Tbar'].values, rdf['sr_fme_ec'].values, 'o')
# plt.yscale('log')
# # plt.plot(tdf['Tbar'].values, rdf['wc_en_frac_flux'], 'o')
# # plt.plot(rdf['time'].values, tdf['Tbar'], 'o')
# plt.show()
# plt.figure()
# plt.plot(rdf['wco2_me_M30'])
# plt.show()
# plt.figure()
# # plt.plot(rdf['time'].values, rdf['sr_fme_ec'].values, 'o')
# # plt.plot(tdf['ustar'].values, rdf['wc_en_frac_time'].values, 'o')
# plt.plot(tdf['stab'].values, rdf['sr_fme_ec'].values*rdf['wc_en_frac_flux'].values, 'o')
# plt.plot(tdf['stab'].values, rdf['sr_fme_ec'].values, 'o')
# plt.yscale('log')
# # plt.plot(tdf['Tbar'].values, rdf['wc_en_frac_flux'], 'o')
# # plt.plot(rdf['time'].values, tdf['Tbar'], 'o')
# plt.show()
# import pickle
# plt.figure()
# plt.plot(rdf['sr_ustar'], rdf['wc_en_frac_flux'], 'o')
# # plt.plot(rdf['sr_ustar'], rdf['wc_ba_frac_time'], 'o')
# # plt.plot(tdf['Re0'], rdf['wc_ba_frac_flux'], 'o')
# plt.xscale('log')
# plt.yscale('log')
# plt.plot()
# read environmental data: Pressure and Water table:#
#___________________________________________________#
dfpr = pd.read_csv(os.path.join('..', 'methane_data', 'SMEARII_p.csv'))
dfpr['datetime'] = pd.to_datetime(dfpr.iloc[:,:6])
dfpr.rename(columns = {'HYY_META.Pamb0':'patm'}, inplace = True)
# dfpr.set_index(dfpr['datetime'], inplace=True)
def pslope(y):
# compute slope of the time series
y = np.array(y)
# x = 60*np.arange(np.size(y)) # time in seconds
# slope= np.polyfit(x, y, 1)[0]
slope = (y[-1] - y[0])/30/60
return slope
# y = [1, 2, 3, 4, -5]
# print(pslope(y))
dfpr30 = dfpr.resample('30min', on = 'datetime').agg({'patm':[np.mean, np.std, pslope]})
dfpr30.columns = dfpr30.columns.droplevel(level=0)
dfpr30.rename(columns = {'mean':'patm_mean','std':'patm_std', 'pslope':'patm_slope'}, inplace = True)
# dfpr30['datetime'] = dfpr30.index
# .resample('1H', how={'radiation': np.sum, 'tamb': np.mean})
dfwt = pd.read_csv(os.path.join('..', 'methane_data', 'Siikaneva_wt_level.csv'))
dfwt['datetime'] = pd.to_datetime(dfwt.iloc[:,:6])
dfwt.rename(columns = {'SII1_META.WTD':'wtd'}, inplace = True)
dfwt.drop(columns=['Year', 'Month', 'Day', 'Hour', 'Minute', 'Second'], inplace = True)
rdfenv = rdf.merge(dfwt, on='datetime').merge(dfpr30, on='datetime')
plotwatertable = False
if plotwatertable:
fig, axes = plt.subplots(2, 1)
axes[0].plot(dfwt['datetime'], dfwt['wtd'], '*')
axes[0].plot(rdfenv['datetime'], rdfenv['wtd'], 'o')
axes[0].set_ylabel(r'water table level [cm]')
axes[1].plot(dfpr30.index, dfpr30['patm_mean'], '*')
axes[1].plot(rdfenv['datetime'], rdfenv['patm_mean'], 'o')
axes[1].set_xlabel('date')
axes[1].set_ylabel(r'$p_{atm}$ hPa')
plt.show()
# plt.plot(rdf['datetime'])
rdfenv['Ubar'] = tdf['Ubar'].values
rdfenv['Tbar'] = tdf['Tbar'].values
# envvar = 'patm_mean'
# envvar = 'patm_slope'
# envvar = 'sr_ustar'
# envvar = 'ts_shear_ts'
# envvar = 'ts_diss_ts'
envvar1 = 'wtd'
envvar2 = 'patm_slope'
# envvar = 'Ubar'
fig, axes = plt.subplots(1, 2, figsize = (12, 6))
ebflux = rdfenv['wc_en_frac_flux']*rdf['wc_me_flux_ec']
# axes[0].set_xlabel(r'$\frac{\Delta p_{atm}}{\Delta t}$ [$hPa \quad s^{-1}$]', fontsize = 16)
axes[0].plot(rdfenv[envvar1], rdfenv['wc_me_flux_ec'], 'sk', alpha = 0.6, markersize = 6, label = 'Total flux')
axes[0].plot(rdfenv[envvar1],ebflux , 'or', alpha = 0.6, markersize = 6, label = 'Hotspot flux')
axes[0].plot( [np.min(rdfenv[envvar1]), np.max(rdfenv[envvar1])],
[np.median(rdfenv['wc_me_flux_ec']), np.median(rdfenv['wc_me_flux_ec'])], 'k')
axes[0].plot( [np.min(rdfenv[envvar1]), np.max(rdfenv[envvar1])],
[np.median(ebflux), np.median(ebflux)], 'r')
# axes[0].plot(rdfenv[envvar], rdfenv['wc_ba_frac_flux']*rdf['wc_me_flux_ec'], '^c', alpha = 0.6, markersize = 6, label = 'Background flux')
axes[0].set_xlabel(r'Relative water table depth [cm]', fontsize = 16)
axes[0].set_yscale('log')
# axes[0].legend(ncol = 3, bbox_to_anchor=(1., 1.2))
axes[0].set_ylabel(r'CH4 Flux [$\mu mol\, m^{-2}\, s^{-1}$]', fontsize = 16)
axes[0].annotate("a)", xy=(0.04, 0.90), xycoords="axes fraction")
axes[1].plot( [0, 0], [np.min(ebflux), np.max(rdfenv['wc_me_flux_ec'])], '--k')
axes[1].plot(rdfenv[envvar2], rdfenv['wc_me_flux_ec'], 'sk', alpha = 0.6, markersize = 6, label = 'Total flux')
axes[1].plot(rdfenv[envvar2],ebflux , 'or', alpha = 0.6, markersize = 6, label = 'Hotspot flux')
axes[1].plot( [np.min(rdfenv[envvar2]), np.max(rdfenv[envvar2])],
[np.median(rdfenv['wc_me_flux_ec']),
|
np.median(rdfenv['wc_me_flux_ec'])
|
numpy.median
|
"""
Define related utility functions for Fourier–Bessel (2D), Spherical Fourier–Bessel (3D) and
prolate spheroidal wave function (PSWF) objects.
"""
import logging
import numpy as np
from numpy import diff, exp, log, pi
from numpy.polynomial.legendre import leggauss
from scipy.special import jn, jv, sph_harm
from aspire.utils import grid_2d, grid_3d
logger = logging.getLogger(__name__)
def check_besselj_zeros(nu, z):
"""
Sanity-check a sequence of estimated zeros of the Bessel function with order `nu`.
:param nu: The real number order of the Bessel function.
:param z: (Array-like) A sequence of postulated zeros.
:return result: True or False.
"""
# Compute first and second order differences of the sequence of zeros
dz = np.diff(z)
ddz = np.diff(dz)
# Check criteria for acceptable zeros
result = True
# Real roots
result = result and all(np.isreal(z))
# All roots should be > 0, check first of increasing sequence
result = result and z[0] > 0
# Spacing between zeros is greater than 3
result = result and all(dz > 3)
# Second order differences should be zero or just barely increasing to
# within 16x machine precision.
if nu >= 0.5:
result = result and all(ddz < 16 * np.spacing(z[1:-1]))
# For nu < 0.5 the spacing will be slightly decreasing, so flip the sign
else:
result = result and all(ddz > -16 * np.spacing(z[1:-1]))
return result
def besselj_newton(nu, z0, max_iter=10):
"""
Uses the Newton-Raphson method to compute the zero(s) of the
Bessel function with order `nu` with initial guess(es) `z0`.
:param nu: The real number order of the Bessel function.
:param z0: (Array-like) The initial guess(es) for the root-finding algorithm.
:param max_iter: Maximum number of iterations for Newton-Raphson
(default: 10).
:return z: (Array-like) The estimated root(s).
"""
z = z0
# Factor worse than machine precision
c = 8
for i in range(max_iter):
# Calculate values and derivatives at z
f = jv(nu, z)
fp = jv(nu - 1, z) - nu * f / z
# Update zeros
dz = -f / fp
z = z + dz
# Check for convergence
if all(np.abs(dz) < c * np.spacing(z)):
break
# If we're not converging yet, start relaxing convergence criterion
if i >= 6:
c *= 2
return z
def sph_bessel(ell, r):
"""
Compute spherical Bessel function values.
:param ell: The order of the spherical Bessel function.
:param r: The coordinates where the function is to be evaluated.
:return: The value of j_ell at r.
"""
scalar = np.isscalar(r)
len_r = 1 if scalar else len(r)
j = np.zeros(len_r)
j[r == 0] = 1 if ell == 0 else 0
r_mask = r != 0
j[r_mask] = np.sqrt(pi / (2 * r[r_mask])) * jv(ell + 0.5, r[r_mask])
if scalar:
j = j.item()
return j
def norm_assoc_legendre(j, m, x):
"""
Evaluate the normalized associated Legendre polynomial
:param j: The order of the associated Legendre polynomial, must satisfy |m| < j.
:param m: The degree of the associated Legendre polynomial, must satisfy |m| < j.
:param x: An array of values between -1 and +1 on which to evaluate.
:return: The normalized associated Legendre polynomial evaluated at corresponding x.
"""
# For negative m, flip sign and use the symmetry identity.
# In the rest, we assume that m is non-negative.
if m < 0:
m = -m
px = (-1) ** m * norm_assoc_legendre(j, m, x)
px *= (-1) ** m
return px
# Initialize the recurrence at (m, m) and (m, m+1).
p0 = (
(-1) ** m
* np.sqrt(
(2 * m + 1)
/ 2
* np.prod(np.arange(2 * m - 1, 0, -2) / np.arange(2 * m, 0, -2))
)
* (1 - x * x) ** (m / 2)
)
p1 = x * np.sqrt(2 * m + 3) * p0
# If these are the desired indices, return these initial values.
if j == m:
px = p0
elif j == m + 1:
px = p1
else:
# Fixing m, work our way up from (m, m+1) to (m, j).
for n in range(m + 1, j):
px = np.sqrt((2 * n + 3) / ((n + 1 + m) * (n + 1 - m))) * (
np.sqrt(2 * n + 1) * x * p1
- np.sqrt((n + m) * (n - m) / (2 * n - 1)) * p0
)
p0 = p1
p1 = px
return px
def real_sph_harmonic(j, m, theta, phi):
"""
Evaluate a real spherical harmonic
:param j: The order of the spherical harmonic. These must satisfy |m| < j.
:param m: The degree of the spherical harmonic. These must satisfy |m| < j.
:param theta: The spherical coordinates of the points at which we want to evaluate the real spherical harmonic.
`theta` is the latitude between 0 and pi
:param phi: The spherical coordinates of the points at which we want to evaluate the real spherical harmonic.
`phi` is the longitude, between 0 and 2*pi
:return: The real spherical harmonics evaluated at the points (theta, phi).
"""
abs_m = abs(m)
y = sph_harm(abs_m, j, phi, theta)
if m < 0:
y = np.sqrt(2) * np.imag(y)
elif m > 0:
y = np.sqrt(2) * np.real(y)
else:
y = np.real(y)
return y
def besselj_zeros(nu, k):
"""
Finds the first `k` zeros of the Bessel function of order `nu`, i.e. J_nu.
Adapted from "zerobess.m" by <NAME> <<EMAIL>>
:param nu: The real number order of the Bessel function (must be positive and <1e7).
:param k: The number of zeros to return (must be >= 3).
:return z: A 1D NumPy array of the first `k` zeros.
"""
assert k >= 3, "k must be >= 3"
assert 0 <= nu <= 1e7, "nu must be between 0 and 1e7"
z = np.zeros(k)
# Guess first zeros using powers of nu
c0 = np.array(
[
[0.1701, -0.6563, 1.0355, 1.8558],
[0.1608, -1.0189, 3.1348, 3.2447],
[-0.2005, -1.2542, 5.7249, 4.3817],
]
)
z0 = nu + c0 @ ((nu + 1) ** np.array([[-1, -2 / 3, -1 / 3, 1 / 3]]).T)
# refine guesses
z[:3] = besselj_newton(nu, z0).squeeze()
n = 3
j = 2
err_tol = 5e-3
# Estimate further zeros iteratively using spacing of last three zeros so far
while n < k:
j = min(j, k - n)
# Use last 3 zeros to predict spacing for next j zeros
r = diff(z[n - 3 : n]) - pi
if (r[0] * r[1]) > 0 and (r[0] / r[1]) > 1:
p = log(r[0] / r[1]) / log(1 - 1 / (n - 1))
t = np.array(np.arange(1, j + 1), ndmin=2).T / (n - 1)
dz = pi + r[1] * exp(p * log(1 + t))
else:
dz = pi * np.ones((j, 1))
# Guess and refine
z0 = z[n - 1] + np.cumsum(dz)
z[n : n + j] = besselj_newton(nu, z0)
# Check to see that the sequence of zeros makes sense
assert check_besselj_zeros(
nu, z[n - 2 : n + j]
), "Unable to properly estimate Bessel function zeros."
# Check how far off we are
err = (z[n : n + j] - z0) / np.diff(z[n - 1 : n + j])
n = n + j
if max(abs(err)) < err_tol:
# Predictions were close enough, double number of zeros
j *= 2
else:
# Some predictions were off, set to double the number of good predictions
j = 2 * (np.where(abs(err) >= err_tol)[0][0] + 1)
return z
def all_besselj_zeros(ell, r):
"""
Compute the zeros of the order `ell` Bessel function which are less than `r`.
:param ell: The real number order of the Bessel function.
:param r: The upper bound for zeros returned.
:return n, r0: The number of zeros and the zeros themselves
as a NumPy array.
"""
k = 4
# get the first 4 zeros
r0 = besselj_zeros(ell, k)
while all(r0 < r):
# increase the number of zeros sought
# until one of the zeros is greater than `r`
k *= 2
r0 = besselj_zeros(ell, k)
r0 = r0[r0 < r]
# return the number of zeros and the zeros themselves
return len(r0), r0
def unique_coords_nd(N, ndim, shifted=False, normalized=True, dtype=np.float32):
"""
Generate unique polar coordinates from 2D or 3D rectangular coordinates.
:param N: length size of a square or cube.
:param ndim: number of dimension, 2 or 3.
:param shifted: shifted half pixel or not for odd N.
:param normalized: normalize the grid or not.
:return: The unique polar coordinates in 2D or 3D
"""
assert ndim in (
2,
3,
), "Only two- or three-dimensional basis functions are supported."
assert N > 0, "Number of grid points should be greater than 0."
if ndim == 2:
grid = grid_2d(
N, shifted=shifted, normalized=normalized, indexing="yx", dtype=dtype
)
mask = grid["r"] <= 1
# Minor differences in r/theta/phi values are unimportant for the purpose
# of this function, so round off before proceeding
r = grid["r"][mask].round(5)
phi = grid["phi"][mask].round(5)
r_unique, r_idx = np.unique(r, return_inverse=True)
ang_unique, ang_idx = np.unique(phi, return_inverse=True)
else:
grid = grid_3d(
N, shifted=shifted, normalized=normalized, indexing="zyx", dtype=dtype
)
mask = grid["r"] <= 1
# Minor differences in r/theta/phi values are unimportant for the purpose of this function,
# so we round off before proceeding.
r = grid["r"][mask].round(5)
theta = grid["theta"][mask].round(5)
phi = grid["phi"][mask].round(5)
r_unique, r_idx = np.unique(r, return_inverse=True)
ang_unique, ang_idx = np.unique(
np.vstack([theta, phi]), axis=1, return_inverse=True
)
return {
"r_unique": r_unique,
"ang_unique": ang_unique,
"r_idx": r_idx,
"ang_idx": ang_idx,
"mask": mask,
}
def lgwt(ndeg, a, b, dtype=np.float32):
"""
Compute Legendre-Gauss quadrature
Generates the Legendre-Gauss nodes and weights on an interval
[a, b] with truncation order of ndeg for computing definite integrals
using Legendre-Gauss quadrature.
Suppose you have a continuous function f(x) which is defined on [a, b]
which you can evaluate at any x in [a, b]. Simply evaluate it at all of
the values contained in the x vector to obtain a vector f, then compute
the definite integral using sum(f.*w);
This is a 2rapper for numpy.polynomial leggauss which outputs only in the
range of (-1, 1).
:param ndeg: truncation order, that is, the number of nodes.
:param a, b: The endpoints of the interval over which the quadrature is defined.
:return x, w: The quadrature nodes and weights.
"""
x, w = leggauss(ndeg)
scale_factor = (b - a) / 2
shift = (a + b) / 2
x = scale_factor * x + shift
w = scale_factor * w
return x.astype(dtype), w.astype(dtype)
def d_decay_approx_fun(a, b, c, d):
return
|
np.square(c)
|
numpy.square
|
import unittest
import pycqed as pq
import numpy as np
import matplotlib.pyplot as plt
import os
from pycqed.analysis_v2 import measurement_analysis as ma
class Test_flipping_analysis(unittest.TestCase):
@classmethod
def tearDownClass(self):
plt.close("all")
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], "tests", "test_data")
ma.a_tools.datadir = self.datadir
def test_flipping_analysis(self):
# this test is based on an experiment with a known
# added detuning in the amplitude. The test tests that the analysis
# works for a range of known scale factors.
# 20% detuning only works for coarse
self._check_scaling("20170726_164507", 0.8, 1)
self._check_scaling("20170726_164536", 0.9, 1)
self._check_scaling("20170726_164550", 0.9, 1)
self._check_scaling("20170726_164605", 0.95, 2)
self._check_scaling("20170726_164619", 0.95, 2)
self._check_scaling("20170726_164635", 0.99, 2)
self._check_scaling("20170726_164649", 0.99, 2)
self._check_scaling("20170726_164704", 1, 2)
self._check_scaling("20170726_164718", 1, 2)
self._check_scaling("20170726_164733", 1.01, 2)
self._check_scaling("20170726_164747", 1.01, 2)
self._check_scaling("20170726_164802", 1.05, 1)
self._check_scaling("20170726_164816", 1.05, 1)
self._check_scaling("20170726_164831", 1.1, 1)
self._check_scaling("20170726_164845", 1.1, 1)
# 20% detuning only works for coarse
self._check_scaling("20170726_164901", 1.2, 1)
# Test running it once with showing the initial fit
ma.FlippingAnalysis(t_start="20170726_164901", options_dict={"plot_init": True})
def _check_scaling(self, timestamp, known_detuning, places):
a = ma.FlippingAnalysis(t_start=timestamp)
s = a.get_scale_factor()
self.assertAlmostEqual(s * known_detuning, 1, places=places)
print("Scale factor {:.4f} known detuning {:.4f}".format(s, known_detuning))
class Test_Idling_Error_Rate_Analyisis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], "tests", "test_data")
ma.a_tools.datadir = self.datadir
@unittest.skip("TODO: fix this test")
def test_error_rates_vary_N2(self):
a = ma.Idling_Error_Rate_Analyisis(
t_start="20180210_181633",
options_dict={"close_figs": True, "vary_N2": True},
)
expected_dict = {
"A": 0.41685563870942149,
"N1": 1064.7100611208791,
"N2": 3644.550952436859,
"offset": 0.52121402524448934,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit +"].best_values[key], value, decimal=2
)
expected_dict = {
"A": -0.13013585779457398,
"N1": 1138.3895116903586,
"N2": 601415.64642756886,
"offset": 0.14572799876310505,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit 0"].best_values[key], value, decimal=2
)
expected_dict = {
"A": 0.74324542246644376,
"N1": 939.61974247762646,
"N2": 3566698.2870284803,
"offset": 0.18301612896797623,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit 1"].best_values[key], value, decimal=2
)
def test_error_rates_fixed_N2(self):
a = ma.Idling_Error_Rate_Analyisis(
t_start="20180210_181633",
options_dict={"close_figs": True, "vary_N2": False},
)
expected_dict = {
"A": 0.43481425072120633,
"N1": 1034.9644095297574,
"N2": 1e21,
"offset": 0.50671519356947314,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit +"].best_values[key], value, decimal=2
)
expected_dict = {
"A": -0.13013614484482647,
"N1": 1138.3896694924019,
"N2": 1e21,
"offset": 0.1457282565842071,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit 0"].best_values[key], value, decimal=2
)
expected_dict = {
"A": 0.7432454022744126,
"N1": 939.61870748568992,
"N2": 1e21,
"offset": 0.18301632862249007,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit 1"].best_values[key], value, decimal=2
)
class Test_Conditional_Oscillation_Analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], "tests", "test_data")
ma.a_tools.datadir = self.datadir
# [2020-08-05 Victor] Experiment code and analysis was upgraded
# new tests are needed, including the case of measuring phase on the
# parked qubit
@unittest.skip("FIXME: test dataset has wrong channel convention")
def test_condition_oscillation_extracted_pars(self):
a = ma.Conditional_Oscillation_Analysis(
t_start="20181126_131143", cal_points="gef"
)
qoi = a.proc_data_dict["quantities_of_interest"]
print(qoi)
extracted = np.array(
[
qoi["phi_cond"].nominal_value,
qoi["phi_cond"].std_dev,
qoi["phi_0"].nominal_value,
qoi["phi_0"].std_dev,
qoi["phi_1"].nominal_value,
qoi["phi_1"].std_dev,
qoi["osc_amp_0"].nominal_value,
qoi["osc_amp_0"].std_dev,
qoi["osc_amp_1"].nominal_value,
qoi["osc_amp_1"].std_dev,
qoi["offs_diff"].nominal_value,
qoi["offs_diff"].std_dev,
qoi["osc_offs_0"].nominal_value,
qoi["osc_offs_0"].std_dev,
qoi["osc_offs_1"].nominal_value,
qoi["osc_offs_1"].std_dev,
]
)
expected = np.array(
[
7.139e01,
1.077e00,
8.753e01,
5.926e-01,
1.614e01,
8.990e-01,
4.859e-01,
5.026e-03,
4.792e-01,
7.518e-03,
1.225e-02,
6.395e-03,
4.869e-01,
3.554e-03,
4.992e-01,
5.316e-03,
]
)
|
np.testing.assert_almost_equal(extracted, expected, decimal=2)
|
numpy.testing.assert_almost_equal
|
'''
UCCSD with spatial integrals
'''
import time
import tempfile
import numpy
import numpy as np
import h5py
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.cc import rccsd
from pyscf.lib import linalg_helper
import uintermediates as imd
from pyscf.cc.addons import spatial2spin, spin2spatial
#einsum = np.einsum
einsum = lib.einsum
# This is unrestricted (U)CCSD, i.e. spin-orbital form.
def kernel(cc, eris, t1=None, t2=None, max_cycle=50, tol=1e-8, tolnormt=1e-6,
verbose=logger.INFO):
"""Exactly the same as pyscf.cc.ccsd.kernel, which calls a
*local* energy() function."""
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(cc.stdout, verbose)
r1, r2 = cc.init_amps(eris)[1:]
if t1 is None:
t1 = r1
if t2 is None:
t2 = r2
r1 = r2 = None
cput1 = cput0 = (time.clock(), time.time())
eold = 0
eccsd = 0
if cc.diis:
adiis = lib.diis.DIIS(cc, cc.diis_file)
adiis.space = cc.diis_space
conv = False
for istep in range(max_cycle):
t1new, t2new = cc.update_amps(t1, t2, eris)
vec = cc.amplitudes_to_vector(t1new, t2new)
normt = np.linalg.norm(vec - cc.amplitudes_to_vector(t1, t2))
t1, t2 = t1new, t2new
t1new = t2new = None
if cc.diis:
if (istep > cc.diis_start_cycle and
abs(eccsd-eold) < cc.diis_start_energy_diff):
vec = adiis.update(vec)
t1, t2 = cc.vector_to_amplitudes(vec)
log.debug1('DIIS for step %d', istep)
vec = None
eold, eccsd = eccsd, energy(cc, t1, t2, eris)
log.info('istep = %d E(CCSD) = %.15g dE = %.9g norm(t1,t2) = %.6g',
istep, eccsd, eccsd - eold, normt)
cput1 = log.timer('CCSD iter', *cput1)
if abs(eccsd-eold) < tol and normt < tolnormt:
conv = True
break
log.timer('CCSD', *cput0)
return conv, eccsd, t1, t2
def update_amps(cc, t1, t2, eris):
time0 = time.clock(), time.time()
log = logger.Logger(cc.stdout, cc.verbose)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
fooa = eris.focka[:nocca,:nocca]
foob = eris.fockb[:noccb,:noccb]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
fvva = eris.focka[nocca:,nocca:]
fvvb = eris.fockb[noccb:,noccb:]
u1a = np.zeros_like(t1a)
u1b = np.zeros_like(t1b)
u2aa = np.zeros_like(t2aa)
u2ab = np.zeros_like(t2ab)
u2bb = np.zeros_like(t2bb)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
Fooa = fooa - np.diag(np.diag(fooa))
Foob = foob - np.diag(np.diag(foob))
Fvva = fvva - np.diag(np.diag(fvva))
Fvvb = fvvb - np.diag(np.diag(fvvb))
Fooa += .5 * lib.einsum('me,ie->mi', fova, t1a)
Foob += .5 * lib.einsum('me,ie->mi', fovb, t1b)
Fvva -= .5 * lib.einsum('me,ma->ae', fova, t1a)
Fvvb -= .5 * lib.einsum('me,ma->ae', fovb, t1b)
wovvo = np.zeros((nocca,nvira,nvira,nocca))
wOVVO = np.zeros((noccb,nvirb,nvirb,noccb))
woVvO = np.zeros((nocca,nvirb,nvira,noccb))
woVVo = np.zeros((nocca,nvirb,nvirb,nocca))
wOvVo = np.zeros((noccb,nvira,nvirb,nocca))
wOvvO = np.zeros((noccb,nvira,nvira,noccb))
mem_now = lib.current_memory()[0]
max_memory = lib.param.MAX_MEMORY - mem_now
blksize = max(int(max_memory*1e6/8/(nvira**3*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = np.asarray(eris.ovvv[p0:p1]).reshape((p1-p0)*nvira,-1)
ovvv = lib.unpack_tril(ovvv).reshape(-1,nvira,nvira,nvira)
ovvv = ovvv - ovvv.transpose(0,3,2,1)
Fvva += np.einsum('mf,mfae->ae', t1a[p0:p1], ovvv)
wovvo[p0:p1] += einsum('jf,mebf->mbej', t1a, ovvv)
u1a += 0.5*lib.einsum('mief,meaf->ia', t2aa[p0:p1], ovvv)
u2aa[:,p0:p1] += lib.einsum('ie,mbea->imab', t1a, ovvv.conj())
tmp1aa = lib.einsum('ijef,mebf->ijmb', tauaa, ovvv)
u2aa -= lib.einsum('ijmb,ma->ijab', tmp1aa, t1a[p0:p1]*.5)
ovvv = tmp1aa = None
blksize = max(int(max_memory*1e6/8/(nvirb**3*3)), 2)
for p0,p1 in lib.prange(0, noccb, blksize):
OVVV = np.asarray(eris.OVVV[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVVV = lib.unpack_tril(OVVV).reshape(-1,nvirb,nvirb,nvirb)
OVVV = OVVV - OVVV.transpose(0,3,2,1)
Fvvb += np.einsum('mf,mfae->ae', t1b[p0:p1], OVVV)
wOVVO[p0:p1] = einsum('jf,mebf->mbej', t1b, OVVV)
u1b += 0.5*lib.einsum('MIEF,MEAF->IA', t2bb[p0:p1], OVVV)
u2bb[:,p0:p1] += lib.einsum('ie,mbea->imab', t1b, OVVV.conj())
tmp1bb = lib.einsum('ijef,mebf->ijmb', taubb, OVVV)
u2bb -= lib.einsum('ijmb,ma->ijab', tmp1bb, t1b[p0:p1]*.5)
OVVV = tmp1bb = None
blksize = max(int(max_memory*1e6/8/(nvira*nvirb**2*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
Fvvb += np.einsum('mf,mfAE->AE', t1a[p0:p1], ovVV)
woVvO[p0:p1] = einsum('JF,meBF->mBeJ', t1b, ovVV)
woVVo[p0:p1] = einsum('jf,mfBE->mBEj',-t1a, ovVV)
u1b += lib.einsum('mIeF,meAF->IA', t2ab[p0:p1], ovVV)
u2ab[p0:p1] += lib.einsum('IE,maEB->mIaB', t1b, ovVV.conj())
tmp1ab = lib.einsum('iJeF,meBF->iJmB', tauab, ovVV)
u2ab -= lib.einsum('iJmB,ma->iJaB', tmp1ab, t1a[p0:p1])
ovVV = tmp1ab = None
blksize = max(int(max_memory*1e6/8/(nvirb*nocca**2*3)), 2)
for p0,p1 in lib.prange(0, noccb, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVvv = lib.unpack_tril(OVvv).reshape(-1,nvirb,nvira,nvira)
Fvva += np.einsum('MF,MFae->ae', t1b[p0:p1], OVvv)
wOvVo[p0:p1] = einsum('jf,MEbf->MbEj', t1a, OVvv)
wOvvO[p0:p1] = einsum('JF,MFbe->MbeJ',-t1b, OVvv)
u1a += lib.einsum('iMfE,MEaf->ia', t2ab[:,p0:p1], OVvv)
u2ab[:,p0:p1] += lib.einsum('ie,MBea->iMaB', t1a, OVvv.conj())
tmp1abba = lib.einsum('iJeF,MFbe->iJbM', tauab, OVvv)
u2ab -= lib.einsum('iJbM,MA->iJbA', tmp1abba, t1b[p0:p1])
OVvv = tmp1abba = None
eris_ovov = np.asarray(eris.ovov)
eris_ooov = np.asarray(eris.ooov)
Woooo = lib.einsum('je,mine->mnij', t1a, eris_ooov)
Woooo = Woooo - Woooo.transpose(0,1,3,2)
Woooo += np.asarray(eris.oooo).transpose(0,2,1,3)
Woooo += lib.einsum('ijef,menf->mnij', tauaa, eris_ovov) * .5
u2aa += lib.einsum('mnab,mnij->ijab', tauaa, Woooo*.5)
Woooo = tauaa = None
ooov = eris_ooov - eris_ooov.transpose(2,1,0,3)
Fooa += np.einsum('ne,mine->mi', t1a, ooov)
u1a += 0.5*lib.einsum('mnae,nime->ia', t2aa, ooov)
wovvo += einsum('nb,mjne->mbej', t1a, ooov)
ooov = eris_ooov = None
tilaa = make_tau_aa(t2[0], t1a, t1a, fac=0.5)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
Fvva -= .5 * einsum('mnaf,menf->ae', tilaa, ovov)
Fooa += .5 * einsum('inef,menf->mi', tilaa, ovov)
Fova = np.einsum('nf,menf->me',t1a, ovov)
u2aa += ovov.conj().transpose(0,2,1,3) * .5
wovvo -= 0.5*einsum('jnfb,menf->mbej', t2aa, ovov)
woVvO += 0.5*einsum('nJfB,menf->mBeJ', t2ab, ovov)
tmpaa = einsum('jf,menf->mnej', t1a, ovov)
wovvo -= einsum('nb,mnej->mbej', t1a, tmpaa)
eirs_ovov = ovov = tmpaa = tilaa = None
eris_OVOV = np.asarray(eris.OVOV)
eris_OOOV = np.asarray(eris.OOOV)
WOOOO = lib.einsum('je,mine->mnij', t1b, eris_OOOV)
WOOOO = WOOOO - WOOOO.transpose(0,1,3,2)
WOOOO += np.asarray(eris.OOOO).transpose(0,2,1,3)
WOOOO += lib.einsum('ijef,menf->mnij', taubb, eris_OVOV) * .5
u2bb += lib.einsum('mnab,mnij->ijab', taubb, WOOOO*.5)
WOOOO = taubb = None
OOOV = eris_OOOV - eris_OOOV.transpose(2,1,0,3)
Foob += np.einsum('ne,mine->mi', t1b, OOOV)
u1b += 0.5*lib.einsum('mnae,nime->ia', t2bb, OOOV)
wOVVO += einsum('nb,mjne->mbej', t1b, OOOV)
OOOV = eris_OOOV = None
tilbb = make_tau_aa(t2[2], t1b, t1b, fac=0.5)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
Fvvb -= .5 * einsum('MNAF,MENF->AE', tilbb, OVOV)
Foob += .5 * einsum('inef,menf->mi', tilbb, OVOV)
Fovb = np.einsum('nf,menf->me',t1b, OVOV)
u2bb += OVOV.conj().transpose(0,2,1,3) * .5
wOVVO -= 0.5*einsum('jnfb,menf->mbej', t2bb, OVOV)
wOvVo += 0.5*einsum('jNbF,MENF->MbEj', t2ab, OVOV)
tmpbb = einsum('jf,menf->mnej', t1b, OVOV)
wOVVO -= einsum('nb,mnej->mbej', t1b, tmpbb)
eris_OVOV = OVOV = tmpbb = tilbb = None
eris_ooOV = np.asarray(eris.ooOV)
eris_OOov = np.asarray(eris.OOov)
Fooa += np.einsum('NE,miNE->mi', t1b, eris_ooOV)
u1a -= lib.einsum('nMaE,niME->ia', t2ab, eris_ooOV)
wOvVo -= einsum('nb,njME->MbEj', t1a, eris_ooOV)
woVVo += einsum('NB,mjNE->mBEj', t1b, eris_ooOV)
Foob += np.einsum('ne,MIne->MI', t1a, eris_OOov)
u1b -= lib.einsum('mNeA,NIme->IA', t2ab, eris_OOov)
woVvO -= einsum('NB,NJme->mBeJ', t1b, eris_OOov)
wOvvO += einsum('nb,MJne->MbeJ', t1a, eris_OOov)
WoOoO = lib.einsum('JE,miNE->mNiJ', t1b, eris_ooOV)
WoOoO+= lib.einsum('je,MIne->nMjI', t1a, eris_OOov)
WoOoO += np.asarray(eris.ooOO).transpose(0,2,1,3)
eris_ooOV = eris_OOov = None
eris_ovOV = np.asarray(eris.ovOV)
WoOoO += lib.einsum('iJeF,meNF->mNiJ', tauab, eris_ovOV)
u2ab += lib.einsum('mNaB,mNiJ->iJaB', tauab, WoOoO)
WoOoO = None
tilab = make_tau_ab(t2[1], t1 , t1 , fac=0.5)
Fvva -= einsum('mNaF,meNF->ae', tilab, eris_ovOV)
Fvvb -= einsum('nMfA,nfME->AE', tilab, eris_ovOV)
Fooa += einsum('iNeF,meNF->mi', tilab, eris_ovOV)
Foob += einsum('nIfE,nfME->MI', tilab, eris_ovOV)
Fova+= np.einsum('NF,meNF->me',t1b, eris_ovOV)
Fovb+= np.einsum('nf,nfME->ME',t1a, eris_ovOV)
u2ab += eris_ovOV.conj().transpose(0,2,1,3)
wovvo += 0.5*einsum('jNbF,meNF->mbej', t2ab, eris_ovOV)
wOVVO += 0.5*einsum('nJfB,nfME->MBEJ', t2ab, eris_ovOV)
wOvVo -= 0.5*einsum('jnfb,nfME->MbEj', t2aa, eris_ovOV)
woVvO -= 0.5*einsum('JNFB,meNF->mBeJ', t2bb, eris_ovOV)
woVVo += 0.5*einsum('jNfB,mfNE->mBEj', t2ab, eris_ovOV)
wOvvO += 0.5*einsum('nJbF,neMF->MbeJ', t2ab, eris_ovOV)
tmpabab = einsum('JF,meNF->mNeJ', t1b, eris_ovOV)
tmpbaba = einsum('jf,nfME->MnEj', t1a, eris_ovOV)
woVvO -= einsum('NB,mNeJ->mBeJ', t1b, tmpabab)
wOvVo -= einsum('nb,MnEj->MbEj', t1a, tmpbaba)
woVVo += einsum('NB,NmEj->mBEj', t1b, tmpbaba)
wOvvO += einsum('nb,nMeJ->MbeJ', t1a, tmpabab)
tmpabab = tmpbaba = tilab = None
u1a += fova.conj()
u1a += np.einsum('ie,ae->ia',t1a,Fvva)
u1a -= np.einsum('ma,mi->ia',t1a,Fooa)
u1a -= np.einsum('imea,me->ia', t2aa, Fova)
u1a += np.einsum('iMaE,ME->ia', t2ab, Fovb)
u1b += fovb.conj()
u1b += np.einsum('ie,ae->ia',t1b,Fvvb)
u1b -= np.einsum('ma,mi->ia',t1b,Foob)
u1b -= np.einsum('imea,me->ia', t2bb, Fovb)
u1b += np.einsum('mIeA,me->IA', t2ab, Fova)
eris_oovv = np.asarray(eris.oovv)
eris_ovvo = np.asarray(eris.ovvo)
wovvo -= eris_oovv.transpose(0,2,3,1)
wovvo += eris_ovvo.transpose(0,2,1,3)
oovv = eris_oovv - eris_ovvo.transpose(0,3,2,1)
u1a-= np.einsum('nf,niaf->ia', t1a, oovv)
tmp1aa = lib.einsum('ie,mjbe->mbij', t1a, oovv)
u2aa += 2*lib.einsum('ma,mbij->ijab', t1a, tmp1aa)
eris_ovvo = eris_oovv = oovv = tmp1aa = None
eris_OOVV = np.asarray(eris.OOVV)
eris_OVVO = np.asarray(eris.OVVO)
wOVVO -= eris_OOVV.transpose(0,2,3,1)
wOVVO += eris_OVVO.transpose(0,2,1,3)
OOVV = eris_OOVV - eris_OVVO.transpose(0,3,2,1)
u1b-= np.einsum('nf,niaf->ia', t1b, OOVV)
tmp1bb = lib.einsum('ie,mjbe->mbij', t1b, OOVV)
u2bb += 2*lib.einsum('ma,mbij->ijab', t1b, tmp1bb)
eris_OVVO = eris_OOVV = OOVV = None
eris_ooVV = np.asarray(eris.ooVV)
eris_ovVO = np.asarray(eris.ovVO)
woVVo -= eris_ooVV.transpose(0,2,3,1)
woVvO += eris_ovVO.transpose(0,2,1,3)
u1b+= np.einsum('nf,nfAI->IA', t1a, eris_ovVO)
tmp1ab = lib.einsum('ie,meBJ->mBiJ', t1a, eris_ovVO)
tmp1ab+= lib.einsum('IE,mjBE->mBjI', t1b, eris_ooVV)
u2ab -= lib.einsum('ma,mBiJ->iJaB', t1a, tmp1ab)
eris_ooVV = eris_ovVo = tmp1ab = None
eris_OOvv = np.asarray(eris.OOvv)
eris_OVvo = np.asarray(eris.OVvo)
wOvvO -= eris_OOvv.transpose(0,2,3,1)
wOvVo += eris_OVvo.transpose(0,2,1,3)
u1a+= np.einsum('NF,NFai->ia', t1b, eris_OVvo)
tmp1ba = lib.einsum('IE,MEbj->MbIj', t1b, eris_OVvo)
tmp1ba+= lib.einsum('ie,MJbe->MbJi', t1a, eris_OOvv)
u2ab -= lib.einsum('MA,MbIj->jIbA', t1b, tmp1ba)
eris_OOvv = eris_OVvO = tmp1ba = None
u2aa += 2*lib.einsum('imae,mbej->ijab', t2aa, wovvo)
u2aa += 2*lib.einsum('iMaE,MbEj->ijab', t2ab, wOvVo)
u2bb += 2*lib.einsum('imae,mbej->ijab', t2bb, wOVVO)
u2bb += 2*lib.einsum('mIeA,mBeJ->IJAB', t2ab, woVvO)
u2ab += lib.einsum('imae,mBeJ->iJaB', t2aa, woVvO)
u2ab += lib.einsum('iMaE,MBEJ->iJaB', t2ab, wOVVO)
u2ab += lib.einsum('iMeA,MbeJ->iJbA', t2ab, wOvvO)
u2ab += lib.einsum('IMAE,MbEj->jIbA', t2bb, wOvVo)
u2ab += lib.einsum('mIeA,mbej->jIbA', t2ab, wovvo)
u2ab += lib.einsum('mIaE,mBEj->jIaB', t2ab, woVVo)
wovvo = wOVVO = woVvO = wOvVo = woVVo = wOvvO = None
Ftmpa = Fvva - .5*lib.einsum('mb,me->be',t1a,Fova)
Ftmpb = Fvvb - .5*lib.einsum('mb,me->be',t1b,Fovb)
u2aa += lib.einsum('ijae,be->ijab', t2aa, Ftmpa)
u2bb += lib.einsum('ijae,be->ijab', t2bb, Ftmpb)
u2ab += lib.einsum('iJaE,BE->iJaB', t2ab, Ftmpb)
u2ab += lib.einsum('iJeA,be->iJbA', t2ab, Ftmpa)
Ftmpa = Fooa + 0.5*lib.einsum('je,me->mj', t1a, Fova)
Ftmpb = Foob + 0.5*lib.einsum('je,me->mj', t1b, Fovb)
u2aa -= lib.einsum('imab,mj->ijab', t2aa, Ftmpa)
u2bb -= lib.einsum('imab,mj->ijab', t2bb, Ftmpb)
u2ab -= lib.einsum('iMaB,MJ->iJaB', t2ab, Ftmpb)
u2ab -= lib.einsum('mIaB,mj->jIaB', t2ab, Ftmpa)
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:u2aa += lib.einsum('ijef,aebf->ijab', tauaa, eris_vvvv) * .5
#:u2bb += lib.einsum('ijef,aebf->ijab', taubb, eris_VVVV) * .5
#:u2ab += lib.einsum('iJeF,aeBF->iJaB', tauab, eris_vvVV)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
_add_vvvv_(cc, (tauaa,tauab,taubb), eris, (u2aa,u2ab,u2bb))
eris_oovo = numpy.asarray(eris.oovo)
eris_OOVO = numpy.asarray(eris.OOVO)
eris_ooVO = numpy.asarray(eris.ooVO)
eris_OOvo = numpy.asarray(eris.OOvo)
oovo = eris_oovo - eris_oovo.transpose(0,3,2,1)
OOVO = eris_OOVO - eris_OOVO.transpose(0,3,2,1)
u2aa -= lib.einsum('ma,mibj->ijab', t1a, oovo)
u2bb -= lib.einsum('ma,mibj->ijab', t1b, OOVO)
u2ab -= lib.einsum('ma,miBJ->iJaB', t1a, eris_ooVO)
u2ab -= lib.einsum('MA,MJbi->iJbA', t1b, eris_OOvo)
eris_oovo = eris_ooVO = eris_OOVO = eris_OOvo = None
u2aa *= .5
u2bb *= .5
u2aa = u2aa - u2aa.transpose(0,1,3,2)
u2aa = u2aa - u2aa.transpose(1,0,2,3)
u2bb = u2bb - u2bb.transpose(0,1,3,2)
u2bb = u2bb - u2bb.transpose(1,0,2,3)
eia_a = lib.direct_sum('i-a->ia', fooa.diagonal(), fvva.diagonal())
eia_b = lib.direct_sum('i-a->ia', foob.diagonal(), fvvb.diagonal())
u1a /= eia_a
u1b /= eia_b
u2aa /= lib.direct_sum('ia+jb->ijab', eia_a, eia_a)
u2ab /= lib.direct_sum('ia+jb->ijab', eia_a, eia_b)
u2bb /= lib.direct_sum('ia+jb->ijab', eia_b, eia_b)
time0 = log.timer_debug1('update t1 t2', *time0)
t1new = u1a, u1b
t2new = u2aa, u2ab, u2bb
return t1new, t2new
def energy(cc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
e = np.einsum('ia,ia', fova, t1a)
e += np.einsum('ia,ia', fovb, t1b)
e += 0.25*np.einsum('ijab,iajb',t2aa,eris_ovov)
e -= 0.25*np.einsum('ijab,ibja',t2aa,eris_ovov)
e += 0.25*np.einsum('ijab,iajb',t2bb,eris_OVOV)
e -= 0.25*np.einsum('ijab,ibja',t2bb,eris_OVOV)
e += np.einsum('iJaB,iaJB',t2ab,eris_ovOV)
e += 0.5*np.einsum('ia,jb,iajb',t1a,t1a,eris_ovov)
e -= 0.5*np.einsum('ia,jb,ibja',t1a,t1a,eris_ovov)
e += 0.5*np.einsum('ia,jb,iajb',t1b,t1b,eris_OVOV)
e -= 0.5*np.einsum('ia,jb,ibja',t1b,t1b,eris_OVOV)
e += np.einsum('ia,jb,iajb',t1a,t1b,eris_ovOV)
return e.real
class UCCSD(rccsd.RCCSD):
def __init__(self, mf, frozen=[[],[]], mo_coeff=None, mo_occ=None):
rccsd.RCCSD.__init__(self, mf, frozen, mo_coeff, mo_occ)
# Spin-orbital CCSD needs a stricter tolerance than spatial-orbital
self.conv_tol_normt = 1e-6
if hasattr(mf, 'mo_energy'):
self.orbspin = orbspin_of_sorted_mo_energy(mf.mo_energy, self.mo_occ)
else:
self.orbspin = None
self._keys = self._keys.union(['orbspin'])
def build(self):
'''Initialize integrals and orbspin'''
self.orbspin = None
@property
def nocc(self):
nocca, noccb = self.get_nocc()
return nocca + noccb
@property
def nmo(self):
nmoa, nmob = self.get_nmo()
return nmoa + nmob
def get_nocc(self):
if self._nocc is not None:
return self._nocc
if isinstance(self.frozen, (int, numpy.integer)):
nocca = len(self.mo_occ[0]) - (self.frozen+1)//2
noccb = len(self.mo_occ[1]) - self.frozen//2
elif isinstance(self.frozen[0], (int, numpy.integer)):
nocca = int(self.mo_occ[0].sum()) - self.frozen[0]
noccb = int(self.mo_occ[1].sum()) - self.frozen[1]
else:
mo_occa, mo_occb = self.mo_occ
if len(self.frozen[0]) > 0:
mo_occa = mo_occa.copy()
mo_occa[numpy.asarray(self.frozen[0])] = 0
if len(self.frozen[1]) > 0:
mo_occb = mo_occb.copy()
mo_occb[numpy.asarray(self.frozen[1])] = 0
nocca = np.count_nonzero(mo_occa==1)
noccb = np.count_nonzero(mo_occb==1)
return nocca, noccb
def get_nmo(self):
if self._nmo is not None:
return self._nmo
if isinstance(self.frozen, (int, numpy.integer)):
nmoa = self.mo_occ[0].size - (self.frozen+1)//2
nmob = self.mo_occ[1].size - self.frozen//2
elif isinstance(self.frozen[0], (int, numpy.integer)):
nmoa = self.mo_occ[0].size - self.frozen[0]
nmob = self.mo_occ[1].size - self.frozen[1]
else:
nmoa = len(self.mo_occ[0]) - len(self.frozen[0])
nmob = len(self.mo_occ[1]) - len(self.frozen[1])
return nmoa, nmob
def init_amps(self, eris):
time0 = time.clock(), time.time()
nocca, noccb = self.get_nocc()
fooa = eris.focka[:nocca,:nocca]
foob = eris.fockb[:noccb,:noccb]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
fvva = eris.focka[nocca:,nocca:]
fvvb = eris.fockb[noccb:,noccb:]
eia_a = lib.direct_sum('i-a->ia', fooa.diagonal(), fvva.diagonal())
eia_b = lib.direct_sum('i-a->ia', foob.diagonal(), fvvb.diagonal())
t1a = fova.conj() / eia_a
t1b = fovb.conj() / eia_b
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
t2aa = eris_ovov.transpose(0,2,1,3) / lib.direct_sum('ia+jb->ijab', eia_a, eia_a)
t2ab = eris_ovOV.transpose(0,2,1,3) / lib.direct_sum('ia+jb->ijab', eia_a, eia_b)
t2bb = eris_OVOV.transpose(0,2,1,3) / lib.direct_sum('ia+jb->ijab', eia_b, eia_b)
t2aa = t2aa - t2aa.transpose(0,1,3,2)
t2bb = t2bb - t2bb.transpose(0,1,3,2)
e = np.einsum('iJaB,iaJB', t2ab, eris_ovOV)
e += 0.25*np.einsum('ijab,iajb', t2aa, eris_ovov)
e -= 0.25*np.einsum('ijab,ibja', t2aa, eris_ovov)
e += 0.25*np.einsum('ijab,iajb', t2bb, eris_OVOV)
e -= 0.25*np.einsum('ijab,ibja', t2bb, eris_OVOV)
self.emp2 = e.real
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2)
logger.timer(self, 'init mp2', *time0)
return self.emp2, (t1a,t1b), (t2aa,t2ab,t2bb)
def kernel(self, t1=None, t2=None, eris=None, mbpt2=False):
return self.ccsd(t1, t2, eris, mbpt2)
def ccsd(self, t1=None, t2=None, eris=None, mbpt2=False):
'''Ground-state unrestricted (U)CCSD.
Kwargs:
mbpt2 : bool
Use one-shot MBPT2 approximation to CCSD.
'''
if eris is None: eris = self.ao2mo(self.mo_coeff)
self.eris = eris
self.dump_flags()
if mbpt2:
cctyp = 'MBPT2'
self.e_corr, self.t1, self.t2 = self.init_amps(eris)
else:
cctyp = 'CCSD'
self.converged, self.e_corr, self.t1, self.t2 = \
kernel(self, eris, t1, t2, max_cycle=self.max_cycle,
tol=self.conv_tol, tolnormt=self.conv_tol_normt,
verbose=self.verbose)
if self.converged:
logger.info(self, 'CCSD converged')
else:
logger.info(self, 'CCSD not converged')
if self._scf.e_tot == 0:
logger.note(self, 'E_corr = %.16g', self.e_corr)
else:
logger.note(self, 'E(%s) = %.16g E_corr = %.16g',
cctyp, self.e_tot, self.e_corr)
return self.e_corr, self.t1, self.t2
def ao2mo(self, mo_coeff=None):
return _ERIS(self, mo_coeff)
def update_amps(self, t1, t2, eris):
return update_amps(self, t1, t2, eris)
def nip(self):
nocc = self.nocc
nvir = self.nmo - nocc
self._nip = nocc + nocc*(nocc-1)//2*nvir
return self._nip
def nea(self):
nocc = self.nocc
nvir = self.nmo - nocc
self._nea = nvir + nocc*nvir*(nvir-1)//2
return self._nea
def nee(self):
nocc = self.nocc
nvir = self.nmo - nocc
self._nee = nocc*nvir + nocc*(nocc-1)//2*nvir*(nvir-1)//2
return self._nee
def ipccsd_matvec(self, vector):
# Ref: <NAME>, and <NAME>. Phys. 136, 174102 (2012) Eqs.(8)-(9)
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ip_imds:
self.eris.__dict__.update(_ERISspin(self).__dict__)
self.imds.make_ip()
imds = self.imds
r1,r2 = self.vector_to_amplitudes_ip(vector)
nocc, nvir = r2.shape[1:]
eris = self.eris
# Eq. (8)
Hr1 = np.einsum('me,mie->i',imds.Fov,r2)
Hr1 -= np.einsum('mi,m->i',imds.Foo,r1)
Hr1 -= 0.5*np.einsum('nmie,mne->i',imds.Wooov,r2)
# Eq. (9)
Hr2 = lib.einsum('ae,ije->ija',imds.Fvv,r2)
tmp1 = lib.einsum('mi,mja->ija',imds.Foo,r2)
Hr2 -= tmp1 - tmp1.transpose(1,0,2)
Hr2 -= np.einsum('maji,m->ija',imds.Wovoo,r1)
Hr2 += 0.5*lib.einsum('mnij,mna->ija',imds.Woooo,r2)
tmp2 = lib.einsum('maei,mje->ija',imds.Wovvo,r2)
Hr2 += tmp2 - tmp2.transpose(1,0,2)
eris_ovov = np.asarray(eris.ovov)
tmp = 0.5*np.einsum('menf,mnf->e', eris_ovov, r2)
tmp-= 0.5*np.einsum('mfne,mnf->e', eris_ovov, r2)
t2 = spatial2spin(self.t2, eris.orbspin)
Hr2 += np.einsum('e,ijae->ija', tmp, t2)
vector = self.amplitudes_to_vector_ip(Hr1,Hr2)
return vector
def ipccsd_diag(self):
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ip_imds:
self.eris.__dict__.update(_ERISspin(self).__dict__)
self.imds.make_ip()
imds = self.imds
t1, t2, eris = self.t1, self.t2, self.eris
t1 = spatial2spin(t1, eris.orbspin)
t2 = spatial2spin(t2, eris.orbspin)
nocc, nvir = t1.shape
Fo = np.diagonal(imds.Foo)
Fv = np.diagonal(imds.Fvv)
Hr1 = -Fo
Hr2 = lib.direct_sum('-i-j+a->ija', Fo, Fo, Fv)
Woooo = np.asarray(imds.Woooo)
Woo = np.zeros((nocc,nocc), dtype=t1.dtype)
Woo += np.einsum('ijij->ij', Woooo)
Woo -= np.einsum('ijji->ij', Woooo)
Hr2 += Woo.reshape(nocc,nocc,-1) * .5
Wov = np.einsum('iaai->ia', imds.Wovvo)
Hr2 += Wov
Hr2 += Wov.reshape(nocc,1,nvir)
eris_ovov = np.asarray(eris.ovov)
Hr2 -= np.einsum('iajb,ijab->ija', eris_ovov, t2)
Hr2 -= np.einsum('iajb,ijab->ijb', eris_ovov, t2)
vector = self.amplitudes_to_vector_ip(Hr1,Hr2)
return vector
def vector_to_amplitudes_ip(self,vector):
nocc = self.nocc
nvir = self.nmo - nocc
r1 = vector[:nocc].copy()
r2 = np.zeros((nocc**2,nvir), vector.dtype)
otril = np.tril_indices(nocc, k=-1)
r2_tril = vector[nocc:].reshape(-1,nvir)
lib.takebak_2d(r2, r2_tril, otril[0]*nocc+otril[1], np.arange(nvir))
lib.takebak_2d(r2,-r2_tril, otril[1]*nocc+otril[0], np.arange(nvir))
return r1,r2.reshape(nocc,nocc,nvir)
def amplitudes_to_vector_ip(self,r1,r2):
nocc = self.nocc
nvir = self.nmo - nocc
size = nocc + nocc*(nocc-1)//2*nvir
vector = np.empty(size, r1.dtype)
vector[:nocc] = r1.copy()
otril = np.tril_indices(nocc, k=-1)
lib.take_2d(r2.reshape(-1,nvir), otril[0]*nocc+otril[1],
np.arange(nvir), out=vector[nocc:])
return vector
def eaccsd_matvec(self,vector):
# Ref: Nooijen and <NAME>. Phys. 102, 3629 (1994) Eqs.(30)-(31)
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ea_imds:
self.eris.__dict__.update(_ERISspin(self).__dict__)
self.imds.make_ea()
imds = self.imds
r1,r2 = self.vector_to_amplitudes_ea(vector)
t1, t2, eris = self.t1, self.t2, self.eris
t1 = spatial2spin(t1, eris.orbspin)
t2 = spatial2spin(t2, eris.orbspin)
Hr1 = np.einsum('ac,c->a',imds.Fvv,r1)
Hr1 += np.einsum('ld,lad->a',imds.Fov,r2)
tmp1 = lib.einsum('ac,jcb->jab',imds.Fvv,r2)
Hr2 = (tmp1 - tmp1.transpose(0,2,1))
Hr2 -= lib.einsum('lj,lab->jab',imds.Foo,r2)
eris_ovvv = np.asarray(eris.ovvv)
Hr1 -= 0.5*np.einsum('lcad,lcd->a',eris_ovvv,r2)
Hr1 += 0.5*np.einsum('ldac,lcd->a',eris_ovvv,r2)
tau2 = r2 + np.einsum('jd,c->jcd', t1, r1) * 2
tau2 = tau2 - tau2.transpose(0,2,1)
tmp = lib.einsum('mcad,jcd->maj', eris_ovvv, tau2)
tmp = lib.einsum('mb,maj->jab', t1, tmp)
Hr2 += .5 * (tmp - tmp.transpose(0,2,1))
eris_ovov = np.asarray(eris.ovov)
tau = imd.make_tau(t2, t1, t1)
tmp = lib.einsum('menf,jef->mnj', eris_ovov, tau2)
Hr2 += .25*lib.einsum('mnab,mnj->jab', tau, tmp)
eris_ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
tmp = np.einsum('ndlc,lcd->n', eris_ovov, r2)
Hr1 += .5 * np.einsum('na,n->a', t1, tmp)
tmp = np.einsum('kcld,lcd->k', eris_ovov, r2)
t2 = spatial2spin(self.t2, eris.orbspin)
Hr2 -= 0.5 * np.einsum('k,kjab->jab', tmp, t2)
tmp = lib.einsum('lbdj,lad->jab', imds.Wovvo, r2)
Hr2 += tmp - tmp.transpose(0,2,1)
Hr2 += np.einsum('abcj,c->jab', imds.Wvvvo, r1)
eris_vvvv = np.asarray(eris.vvvv)
Hr2 += 0.5*einsum('acbd,jcd->jab',eris_vvvv,tau2)
vector = self.amplitudes_to_vector_ea(Hr1,Hr2)
return vector
def eaccsd_diag(self):
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ea_imds:
self.eris.__dict__.update(_ERISspin(self).__dict__)
self.imds.make_ea()
imds = self.imds
t1, t2, eris = self.t1, self.t2, self.eris
t1 = spatial2spin(t1, eris.orbspin)
t2 = spatial2spin(t2, eris.orbspin)
nocc, nvir = t1.shape
Fo = np.diagonal(imds.Foo)
Fv = np.diagonal(imds.Fvv)
Hr1 = Fv
Hr2 = lib.direct_sum('-j+a+b->jab', Fo, Fv, Fv)
Wov = np.einsum('iaai->ia', imds.Wovvo)
Hr2 += Wov.reshape(nocc,nvir,1)
Hr2 += Wov.reshape(nocc,1,nvir)
eris_ovov = np.asarray(eris.ovov)
Hr2 -= np.einsum('iajb,ijab->jab', eris_ovov, t2)
Hr2 -= np.einsum('iajb,ijab->iab', eris_ovov, t2)
eris_ovvv = np.asarray(eris.ovvv)
Wvv = einsum('mb,maab->ab', t1, eris_ovvv)
Wvv -= einsum('mb,mbaa->ab', t1, eris_ovvv)
Wvv = Wvv + Wvv.T
eris_vvvv = np.asarray(eris.vvvv)
Wvv += np.einsum('aabb->ab', eris_vvvv)
Wvv -= np.einsum('abba->ab', eris_vvvv)
tau = imd.make_tau(t2, t1, t1)
Wvv += 0.5*np.einsum('mnab,manb->ab', tau, eris_ovov)
Wvv -= 0.5*np.einsum('mnab,mbna->ab', tau, eris_ovov)
Hr2 += Wvv
vector = self.amplitudes_to_vector_ea(Hr1,Hr2)
return vector
def vector_to_amplitudes_ea(self,vector):
nocc = self.nocc
nvir = self.nmo - nocc
r1 = vector[:nvir].copy()
r2 = np.zeros((nocc,nvir*nvir), vector.dtype)
vtril = np.tril_indices(nvir, k=-1)
r2_tril = vector[nvir:].reshape(nocc,-1)
lib.takebak_2d(r2, r2_tril, np.arange(nocc), vtril[0]*nvir+vtril[1])
lib.takebak_2d(r2,-r2_tril, np.arange(nocc), vtril[1]*nvir+vtril[0])
return r1,r2.reshape(nocc,nvir,nvir)
def amplitudes_to_vector_ea(self,r1,r2):
nocc = self.nocc
nvir = self.nmo - nocc
size = nvir + nvir*(nvir-1)//2*nocc
vector = np.empty(size, r1.dtype)
vector[:nvir] = r1.copy()
vtril = np.tril_indices(nvir, k=-1)
lib.take_2d(r2.reshape(nocc,-1), np.arange(nocc),
vtril[0]*nvir+vtril[1], out=vector[nvir:])
return vector
def eeccsd(self, nroots=1, koopmans=False, guess=None):
'''Calculate N-electron neutral excitations via EE-EOM-CCSD.
Kwargs:
nroots : int
Number of roots (eigenvalues) requested
koopmans : bool
Calculate Koopmans'-like (1p1h) excitations only, targeting via
overlap.
guess : list of ndarray
List of guess vectors to use for targeting via overlap.
'''
spinvec_size = self.nee()
nroots = min(nroots, spinvec_size)
if hasattr(self,'imds') and (self.imds.made_ip_imds or self.imds.made_ea_imds):
self.orbspin = orbspin_of_sorted_mo_energy(self._scf.mo_energy, self.mo_occ)
self.eris = self.ao2mo(self.mo_coeff)
self.imds = _IMDS(self)
diag_ee, diag_sf = self.eeccsd_diag()
guess_ee = []
guess_sf = []
if guess and guess[0].size == spinvec_size:
for g in guess:
r1, r2 = self.vector_to_amplitudes_ee(g)
g = self.amplitudes_to_vector(self.spin2spatial(r1, self.orbspin),
self.spin2spatial(r2, self.orbspin))
if np.linalg.norm(g) > 1e-7:
guess_ee.append(g)
else:
r1 = self.spin2spatial(r1, self.orbspin)
r2 = self.spin2spatial(r2, self.orbspin)
g = self.amplitudes_to_vector_eomsf(r1, r2)
guess_sf.append(g)
r1 = r2 = None
nroots_ee = len(guess_ee)
nroots_sf = len(guess_sf)
elif guess:
for g in guess:
if g.size == diag_ee.size:
guess_ee.append(g)
else:
guess_sf.append(g)
nroots_ee = len(guess_ee)
nroots_sf = len(guess_sf)
else:
dee = np.sort(diag_ee)[:nroots]
dsf = np.sort(diag_sf)[:nroots]
dmax = np.sort(np.hstack([dee,dsf]))[nroots-1]
nroots_ee = np.count_nonzero(dee <= dmax)
nroots_sf = np.count_nonzero(dsf <= dmax)
guess_ee = guess_sf = None
e0 = e1 = []
v0 = v1 = []
if nroots_ee > 0:
e0, v0 = self.eomee_ccsd(nroots_ee, koopmans, guess_ee, diag_ee)
if nroots_ee == 1:
e0, v0 = [e0], [v0]
if nroots_sf > 0:
e1, v1 = self.eomsf_ccsd(nroots_sf, koopmans, guess_sf, diag_sf)
if nroots_sf == 1:
e1, v1 = [e1], [v1]
e = np.hstack([e0,e1])
v = v0 + v1
if nroots == 1:
return e[0], v[0]
else:
idx = e.argsort()
return e[idx], [v[x] for x in idx]
def eomee_ccsd(self, nroots=1, koopmans=False, guess=None, diag=None):
cput0 = (time.clock(), time.time())
if diag is None:
diag = self.eeccsd_diag()[0]
nocca, noccb = self.get_nocc()
nmoa, nmob = self.get_nmo()
nvira, nvirb = nmoa-nocca, nmob-noccb
user_guess = False
if guess:
user_guess = True
assert len(guess) == nroots
for g in guess:
assert g.size == diag.size
else:
idx = diag.argsort()
guess = []
if koopmans:
n = 0
for i in idx:
g = np.zeros_like(diag)
g[i] = 1.0
t1, t2 = self.vector_to_amplitudes(g, (nocca,noccb), (nvira,nvirb))
if np.linalg.norm(t1[0]) > .9 or np.linalg.norm(t1[1]) > .9:
guess.append(g)
n += 1
if n == nroots:
break
else:
for i in idx[:nroots]:
g = np.zeros_like(diag)
g[i] = 1.0
guess.append(g)
def precond(r, e0, x0):
return r/(e0-diag+1e-12)
eig = linalg_helper.eig
if user_guess or koopmans:
def pickeig(w, v, nr, envs):
x0 = linalg_helper._gen_x0(envs['v'], envs['xs'])
idx = np.argmax( np.abs(np.dot(np.array(guess).conj(),np.array(x0).T)), axis=1 )
return w[idx].real, v[:,idx].real, idx
eee, evecs = eig(self.eomee_ccsd_matvec, guess, precond, pick=pickeig,
tol=self.conv_tol, max_cycle=self.max_cycle,
max_space=self.max_space, nroots=nroots,
verbose=self.verbose)
else:
eee, evecs = eig(self.eomee_ccsd_matvec, guess, precond,
tol=self.conv_tol, max_cycle=self.max_cycle,
max_space=self.max_space, nroots=nroots,
verbose=self.verbose)
self.eee = eee.real
if nroots == 1:
eee, evecs = [self.eee], [evecs]
for n, en, vn in zip(range(nroots), eee, evecs):
t1, t2 = self.vector_to_amplitudes(vn, (nocca,noccb), (nvira,nvirb))
qpwt = np.linalg.norm(t1[0])**2 + np.linalg.norm(t1[1])**2
logger.info(self, 'EOM-EE root %d E = %.16g qpwt = %.6g', n, en, qpwt)
logger.timer(self, 'EOM-EE-CCSD', *cput0)
if nroots == 1:
return eee[0], evecs[0]
else:
return eee, evecs
def eomsf_ccsd(self, nroots=1, koopmans=False, guess=None, diag=None):
cput0 = (time.clock(), time.time())
if diag is None:
diag = self.eeccsd_diag()[1]
nocca, noccb = self.get_nocc()
nmoa, nmob = self.get_nmo()
nvira, nvirb = nmoa-nocca, nmob-noccb
user_guess = False
if guess:
user_guess = True
assert len(guess) == nroots
for g in guess:
assert g.size == diag.size
else:
idx = diag.argsort()
guess = []
if koopmans:
n = 0
for i in idx:
g = np.zeros_like(diag)
g[i] = 1.0
t1, t2 = self.vector_to_amplitudes_eomsf(g, (nocca,noccb), (nvira,nvirb))
if np.linalg.norm(t1[0]) > .9 or np.linalg.norm(t1[1]) > .9:
guess.append(g)
n += 1
if n == nroots:
break
else:
for i in idx[:nroots]:
g = np.zeros_like(diag)
g[i] = 1.0
guess.append(g)
def precond(r, e0, x0):
return r/(e0-diag+1e-12)
eig = linalg_helper.eig
if user_guess or koopmans:
def pickeig(w, v, nr, envs):
x0 = linalg_helper._gen_x0(envs['v'], envs['xs'])
idx = np.argmax( np.abs(np.dot(np.array(guess).conj(),np.array(x0).T)), axis=1 )
return w[idx].real, v[:,idx].real, idx
eee, evecs = eig(self.eomsf_ccsd_matvec, guess, precond, pick=pickeig,
tol=self.conv_tol, max_cycle=self.max_cycle,
max_space=self.max_space, nroots=nroots,
verbose=self.verbose)
else:
eee, evecs = eig(self.eomsf_ccsd_matvec, guess, precond,
tol=self.conv_tol, max_cycle=self.max_cycle,
max_space=self.max_space, nroots=nroots,
verbose=self.verbose)
self.eee = eee.real
if nroots == 1:
eee, evecs = [self.eee], [evecs]
for n, en, vn in zip(range(nroots), eee, evecs):
t1, t2 = self.vector_to_amplitudes_eomsf(vn, (nocca,noccb), (nvira,nvirb))
qpwt = np.linalg.norm(t1[0])**2 + np.linalg.norm(t1[1])**2
logger.info(self, 'EOM-SF root %d E = %.16g qpwt = %.6g', n, en, qpwt)
logger.timer(self, 'EOM-SF-CCSD', *cput0)
if nroots == 1:
return eee[0], evecs[0]
else:
return eee, evecs
# Ref: Wang, Tu, and Wang, J. Chem. Theory Comput. 10, 5567 (2014) Eqs.(9)-(10)
# Note: Last line in Eq. (10) is superfluous.
# See, e.g. Gwaltney, Nooijen, and Barlett, Chem. Phys. Lett. 248, 189 (1996)
def eomee_ccsd_matvec(self, vector):
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ee_imds:
self.imds.make_ee()
imds = self.imds
r1, r2 = self.vector_to_amplitudes(vector)
r1a, r1b = r1
r2aa, r2ab, r2bb = r2
t1, t2, eris = self.t1, self.t2, self.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
Hr1a = lib.einsum('ae,ie->ia', imds.Fvva, r1a)
Hr1a -= lib.einsum('mi,ma->ia', imds.Fooa, r1a)
Hr1a += np.einsum('me,imae->ia',imds.Fova, r2aa)
Hr1a += np.einsum('ME,iMaE->ia',imds.Fovb, r2ab)
Hr1b = lib.einsum('ae,ie->ia', imds.Fvvb, r1b)
Hr1b -= lib.einsum('mi,ma->ia', imds.Foob, r1b)
Hr1b += np.einsum('me,imae->ia',imds.Fovb, r2bb)
Hr1b += np.einsum('me,mIeA->IA',imds.Fova, r2ab)
Hr2aa = lib.einsum('mnij,mnab->ijab', imds.woooo, r2aa) * .25
Hr2bb = lib.einsum('mnij,mnab->ijab', imds.wOOOO, r2bb) * .25
Hr2ab = lib.einsum('mNiJ,mNaB->iJaB', imds.woOoO, r2ab)
Hr2aa+= lib.einsum('be,ijae->ijab', imds.Fvva, r2aa)
Hr2bb+= lib.einsum('be,ijae->ijab', imds.Fvvb, r2bb)
Hr2ab+= lib.einsum('BE,iJaE->iJaB', imds.Fvvb, r2ab)
Hr2ab+= lib.einsum('be,iJeA->iJbA', imds.Fvva, r2ab)
Hr2aa-= lib.einsum('mj,imab->ijab', imds.Fooa, r2aa)
Hr2bb-= lib.einsum('mj,imab->ijab', imds.Foob, r2bb)
Hr2ab-= lib.einsum('MJ,iMaB->iJaB', imds.Foob, r2ab)
Hr2ab-= lib.einsum('mj,mIaB->jIaB', imds.Fooa, r2ab)
#:tau2aa, tau2ab, tau2bb = make_tau(r2, r1, t1, 2)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1a += lib.einsum('mfae,imef->ia', eris_ovvv, r2aa)
#:tmpaa = lib.einsum('meaf,ijef->maij', eris_ovvv, tau2aa)
#:Hr2aa+= lib.einsum('mb,maij->ijab', t1a, tmpaa)
#:tmpa = lib.einsum('mfae,me->af', eris_ovvv, r1a)
#:tmpa-= lib.einsum('meaf,me->af', eris_ovvv, r1a)
#:Hr1b += lib.einsum('mfae,imef->ia', eris_OVVV, r2bb)
#:tmpbb = lib.einsum('meaf,ijef->maij', eris_OVVV, tau2bb)
#:Hr2bb+= lib.einsum('mb,maij->ijab', t1b, tmpbb)
#:tmpb = lib.einsum('mfae,me->af', eris_OVVV, r1b)
#:tmpb-= lib.einsum('meaf,me->af', eris_OVVV, r1b)
#:Hr1b += lib.einsum('mfAE,mIfE->IA', eris_ovVV, r2ab)
#:tmpab = lib.einsum('meAF,iJeF->mAiJ', eris_ovVV, tau2ab)
#:Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a, tmpab)
#:tmpb-= lib.einsum('meAF,me->AF', eris_ovVV, r1a)
#:Hr1a += lib.einsum('MFae,iMeF->ia', eris_OVvv, r2ab)
#:tmpba =-lib.einsum('MEaf,iJfE->MaiJ', eris_OVvv, tau2ab)
#:Hr2ab+= lib.einsum('MB,MaiJ->iJaB', t1b, tmpba)
#:tmpa-= lib.einsum('MEaf,ME->af', eris_OVvv, r1b)
tau2aa = make_tau_aa(r2aa, r1a, t1a, 2)
mem_now = lib.current_memory()[0]
max_memory = lib.param.MAX_MEMORY - mem_now
tmpa = np.zeros((nvira,nvira))
tmpb = np.zeros((nvirb,nvirb))
blksize = max(int(max_memory*1e6/8/(nvira**3*3)), 2)
for p0, p1 in lib.prange(0, nocca, blksize):
ovvv = np.asarray(eris.ovvv[p0:p1]).reshape((p1-p0)*nvira,-1)
ovvv = lib.unpack_tril(ovvv).reshape(-1,nvira,nvira,nvira)
Hr1a += lib.einsum('mfae,imef->ia', ovvv, r2aa[:,p0:p1])
tmpaa = lib.einsum('meaf,ijef->maij', ovvv, tau2aa)
Hr2aa+= lib.einsum('mb,maij->ijab', t1a[p0:p1], tmpaa)
tmpa+= lib.einsum('mfae,me->af', ovvv, r1a[p0:p1])
tmpa-= lib.einsum('meaf,me->af', ovvv, r1a[p0:p1])
ovvv = tmpaa = None
tau2aa = None
tau2bb = make_tau_aa(r2bb, r1b, t1b, 2)
blksize = max(int(max_memory*1e6/8/(nvirb**3*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = np.asarray(eris.OVVV[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVVV = lib.unpack_tril(OVVV).reshape(-1,nvirb,nvirb,nvirb)
Hr1b += lib.einsum('mfae,imef->ia', OVVV, r2bb[:,p0:p1])
tmpbb = lib.einsum('meaf,ijef->maij', OVVV, tau2bb)
Hr2bb+= lib.einsum('mb,maij->ijab', t1b[p0:p1], tmpbb)
tmpb+= lib.einsum('mfae,me->af', OVVV, r1b[p0:p1])
tmpb-= lib.einsum('meaf,me->af', OVVV, r1b[p0:p1])
OVVV = tmpbb = None
tau2bb = None
tau2ab = make_tau_ab(r2ab, r1 , t1 , 2)
blksize = max(int(max_memory*1e6/8/(nvira*nvirb**2*3)), 2)
for p0, p1 in lib.prange(0, nocca, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
Hr1b += lib.einsum('mfAE,mIfE->IA', ovVV, r2ab[p0:p1])
tmpab = lib.einsum('meAF,iJeF->mAiJ', ovVV, tau2ab)
Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a[p0:p1], tmpab)
tmpb-= lib.einsum('meAF,me->AF', ovVV, r1a[p0:p1])
ovVV = tmpab = None
blksize = max(int(max_memory*1e6/8/(nvirb*nvira**2*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVvv = lib.unpack_tril(OVvv).reshape(-1,nvirb,nvira,nvira)
Hr1a += lib.einsum('MFae,iMeF->ia', OVvv, r2ab[:,p0:p1])
tmpba = lib.einsum('MEaf,iJfE->MaiJ', OVvv, tau2ab)
Hr2ab-= lib.einsum('MB,MaiJ->iJaB', t1b[p0:p1], tmpba)
tmpa-= lib.einsum('MEaf,ME->af', OVvv, r1b[p0:p1])
OVvv = tmpba = None
tau2ab = None
Hr2aa-= lib.einsum('af,ijfb->ijab', tmpa, t2aa)
Hr2bb-= lib.einsum('af,ijfb->ijab', tmpb, t2bb)
Hr2ab-= lib.einsum('af,iJfB->iJaB', tmpa, t2ab)
Hr2ab-= lib.einsum('AF,iJbF->iJbA', tmpb, t2ab)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tau2aa = make_tau_aa(r2aa, r1a, t1a, 2)
tauaa = make_tau_aa(t2aa, t1a, t1a)
tmpaa = lib.einsum('menf,ijef->mnij', eris_ovov, tau2aa)
Hr2aa += lib.einsum('mnij,mnab->ijab', tmpaa, tauaa) * 0.25
tau2aa = tauaa = None
tau2bb = make_tau_aa(r2bb, r1b, t1b, 2)
taubb = make_tau_aa(t2bb, t1b, t1b)
tmpbb = lib.einsum('menf,ijef->mnij', eris_OVOV, tau2bb)
Hr2bb += lib.einsum('mnij,mnab->ijab', tmpbb, taubb) * 0.25
tau2bb = taubb = None
tau2ab = make_tau_ab(r2ab, r1 , t1 , 2)
tauab = make_tau_ab(t2ab, t1 , t1)
tmpab = lib.einsum('meNF,iJeF->mNiJ', eris_ovOV, tau2ab)
Hr2ab += lib.einsum('mNiJ,mNaB->iJaB', tmpab, tauab)
tau2ab = tauab = None
tmpa = lib.einsum('menf,imef->ni', eris_ovov, r2aa)
tmpa-= lib.einsum('neMF,iMeF->ni', eris_ovOV, r2ab)
tmpb = lib.einsum('menf,imef->ni', eris_OVOV, r2bb)
tmpb-= lib.einsum('mfNE,mIfE->NI', eris_ovOV, r2ab)
Hr1a += lib.einsum('na,ni->ia', t1a, tmpa)
Hr1b += lib.einsum('na,ni->ia', t1b, tmpb)
Hr2aa+= lib.einsum('mj,imab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('mj,imab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('MJ,iMaB->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('mj,mIaB->jIaB', tmpa, t2ab)
tmp1a = np.einsum('menf,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('mfne,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('neMF,MF->en', eris_ovOV, r1b)
tmp1b = np.einsum('menf,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfne,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfNE,mf->EN', eris_ovOV, r1a)
tmpa = np.einsum('en,nb->eb', tmp1a, t1a)
tmpa+= lib.einsum('menf,mnfb->eb', eris_ovov, r2aa)
tmpa-= lib.einsum('meNF,mNbF->eb', eris_ovOV, r2ab)
tmpb = np.einsum('en,nb->eb', tmp1b, t1b)
tmpb+= lib.einsum('menf,mnfb->eb', eris_OVOV, r2bb)
tmpb-= lib.einsum('nfME,nMfB->EB', eris_ovOV, r2ab)
Hr2aa+= lib.einsum('eb,ijae->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('eb,ijae->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('EB,iJaE->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('eb,iJeA->iJbA', tmpa, t2ab)
eirs_ovov = eris_ovOV = eris_OVOV = None
Hr2aa-= lib.einsum('mbij,ma->ijab', imds.wovoo, r1a)
Hr2bb-= lib.einsum('mbij,ma->ijab', imds.wOVOO, r1b)
Hr2ab-= lib.einsum('mBiJ,ma->iJaB', imds.woVoO, r1a)
Hr2ab-= lib.einsum('MbJi,MA->iJbA', imds.wOvOo, r1b)
Hr1a-= 0.5*lib.einsum('mnie,mnae->ia', imds.wooov, r2aa)
Hr1a-= lib.einsum('mNiE,mNaE->ia', imds.woOoV, r2ab)
Hr1b-= 0.5*lib.einsum('mnie,mnae->ia', imds.wOOOV, r2bb)
Hr1b-= lib.einsum('MnIe,nMeA->IA', imds.wOoOv, r2ab)
tmpa = lib.einsum('mnie,me->ni', imds.wooov, r1a)
tmpa-= lib.einsum('nMiE,ME->ni', imds.woOoV, r1b)
tmpb = lib.einsum('mnie,me->ni', imds.wOOOV, r1b)
tmpb-= lib.einsum('NmIe,me->NI', imds.wOoOv, r1a)
Hr2aa+= lib.einsum('ni,njab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('ni,njab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('ni,nJaB->iJaB', tmpa, t2ab)
Hr2ab+= lib.einsum('NI,jNaB->jIaB', tmpb, t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2aa+= lib.einsum('ejab,ie->ijab', imds.wvovv[p0:p1], r1a[:,p0:p1])
Hr2ab+= lib.einsum('eJaB,ie->iJaB', imds.wvOvV[p0:p1], r1a[:,p0:p1])
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2bb+= lib.einsum('ejab,ie->ijab', imds.wVOVV[p0:p1], r1b[:,p0:p1])
Hr2ab+= lib.einsum('EjBa,IE->jIaB', imds.wVoVv[p0:p1], r1b[:,p0:p1])
Hr1a += np.einsum('maei,me->ia',imds.wovvo,r1a)
Hr1a += np.einsum('MaEi,ME->ia',imds.wOvVo,r1b)
Hr1b += np.einsum('maei,me->ia',imds.wOVVO,r1b)
Hr1b += np.einsum('mAeI,me->IA',imds.woVvO,r1a)
Hr2aa+= lib.einsum('mbej,imae->ijab', imds.wovvo, r2aa) * 2
Hr2aa+= lib.einsum('MbEj,iMaE->ijab', imds.wOvVo, r2ab) * 2
Hr2bb+= lib.einsum('mbej,imae->ijab', imds.wOVVO, r2bb) * 2
Hr2bb+= lib.einsum('mBeJ,mIeA->IJAB', imds.woVvO, r2ab) * 2
Hr2ab+= lib.einsum('mBeJ,imae->iJaB', imds.woVvO, r2aa)
Hr2ab+= lib.einsum('MBEJ,iMaE->iJaB', imds.wOVVO, r2ab)
Hr2ab+= lib.einsum('mBEj,mIaE->jIaB', imds.woVVo, r2ab)
Hr2ab+= lib.einsum('mbej,mIeA->jIbA', imds.wovvo, r2ab)
Hr2ab+= lib.einsum('MbEj,IMAE->jIbA', imds.wOvVo, r2bb)
Hr2ab+= lib.einsum('MbeJ,iMeA->iJbA', imds.wOvvO, r2ab)
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Hr2aa += lib.einsum('ijef,aebf->ijab', tau2aa, eris_vvvv) * .5
#:Hr2bb += lib.einsum('ijef,aebf->ijab', tau2bb, eris_VVVV) * .5
#:Hr2ab += lib.einsum('iJeF,aeBF->iJaB', tau2ab, eris_vvVV)
tau2aa, tau2ab, tau2bb = make_tau(r2, r1, t1, 2)
_add_vvvv_(self, (tau2aa,tau2ab,tau2bb), eris, (Hr2aa,Hr2ab,Hr2bb))
Hr2aa *= .5
Hr2bb *= .5
Hr2aa = Hr2aa - Hr2aa.transpose(0,1,3,2)
Hr2aa = Hr2aa - Hr2aa.transpose(1,0,2,3)
Hr2bb = Hr2bb - Hr2bb.transpose(0,1,3,2)
Hr2bb = Hr2bb - Hr2bb.transpose(1,0,2,3)
vector = self.amplitudes_to_vector((Hr1a,Hr1b), (Hr2aa,Hr2ab,Hr2bb))
return vector
def eomsf_ccsd_matvec(self, vector):
'''Spin flip EOM-CCSD'''
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ee_imds:
self.imds.make_ee()
imds = self.imds
t1, t2, eris = self.t1, self.t2, self.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
r1, r2 = self.vector_to_amplitudes_eomsf(vector, (nocca,noccb), (nvira,nvirb))
r1ab, r1ba = r1
r2baaa, r2aaba, r2abbb, r2bbab = r2
Hr1ab = np.einsum('ae,ie->ia', imds.Fvvb, r1ab)
Hr1ab -= np.einsum('mi,ma->ia', imds.Fooa, r1ab)
Hr1ab += np.einsum('me,imae->ia', imds.Fovb, r2abbb)
Hr1ab += np.einsum('me,imae->ia', imds.Fova, r2aaba)
Hr1ba = np.einsum('ae,ie->ia', imds.Fvva, r1ba)
Hr1ba -= np.einsum('mi,ma->ia', imds.Foob, r1ba)
Hr1ba += np.einsum('me,imae->ia', imds.Fova, r2baaa)
Hr1ba += np.einsum('me,imae->ia', imds.Fovb, r2bbab)
Hr2baaa = .5 *lib.einsum('nMjI,Mnab->Ijab', imds.woOoO, r2baaa)
Hr2aaba = .25*lib.einsum('mnij,mnAb->ijAb', imds.woooo, r2aaba)
Hr2abbb = .5 *lib.einsum('mNiJ,mNAB->iJAB', imds.woOoO, r2abbb)
Hr2bbab = .25*lib.einsum('MNIJ,MNaB->IJaB', imds.wOOOO, r2bbab)
Hr2baaa += lib.einsum('be,Ijae->Ijab', imds.Fvva , r2baaa)
Hr2baaa -= lib.einsum('mj,imab->ijab', imds.Fooa*.5, r2baaa)
Hr2baaa -= lib.einsum('MJ,Miab->Jiab', imds.Foob*.5, r2baaa)
Hr2bbab -= lib.einsum('mj,imab->ijab', imds.Foob , r2bbab)
Hr2bbab += lib.einsum('BE,IJaE->IJaB', imds.Fvvb*.5, r2bbab)
Hr2bbab += lib.einsum('be,IJeA->IJbA', imds.Fvva*.5, r2bbab)
Hr2aaba -= lib.einsum('mj,imab->ijab', imds.Fooa , r2aaba)
Hr2aaba += lib.einsum('be,ijAe->ijAb', imds.Fvva*.5, r2aaba)
Hr2aaba += lib.einsum('BE,ijEa->ijBa', imds.Fvvb*.5, r2aaba)
Hr2abbb += lib.einsum('BE,iJAE->iJAB', imds.Fvvb , r2abbb)
Hr2abbb -= lib.einsum('mj,imab->ijab', imds.Foob*.5, r2abbb)
Hr2abbb -= lib.einsum('mj,mIAB->jIAB', imds.Fooa*.5, r2abbb)
tau2baaa = np.einsum('ia,jb->ijab', r1ba, t1a)
tau2baaa = tau2baaa - tau2baaa.transpose(0,1,3,2)
tau2abbb = np.einsum('ia,jb->ijab', r1ab, t1b)
tau2abbb = tau2abbb - tau2abbb.transpose(0,1,3,2)
tau2aaba = np.einsum('ia,jb->ijab', r1ab, t1a)
tau2aaba = tau2aaba - tau2aaba.transpose(1,0,2,3)
tau2bbab = np.einsum('ia,jb->ijab', r1ba, t1b)
tau2bbab = tau2bbab - tau2bbab.transpose(1,0,2,3)
tau2baaa += r2baaa
tau2bbab += r2bbab
tau2abbb += r2abbb
tau2aaba += r2aaba
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:Hr1ba += einsum('mfae,Imef->Ia', eris_ovvv, r2baaa)
#:tmp1aaba = lib.einsum('meaf,Ijef->maIj', eris_ovvv, tau2baaa)
#:Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a , tmp1aaba)
mem_now = lib.current_memory()[0]
max_memory = lib.param.MAX_MEMORY - mem_now
blksize = max(int(max_memory*1e6/8/(nvira**3*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = np.asarray(eris.ovvv[p0:p1]).reshape((p1-p0)*nvira,-1)
ovvv = lib.unpack_tril(ovvv).reshape(-1,nvira,nvira,nvira)
Hr1ba += einsum('mfae,Imef->Ia', ovvv, r2baaa[:,p0:p1])
tmp1aaba = lib.einsum('meaf,Ijef->maIj', ovvv, tau2baaa)
Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a[p0:p1], tmp1aaba)
ovvv = tmp1aaba = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1ab += einsum('MFAE,iMEF->iA', eris_OVVV, r2abbb)
#:tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', eris_OVVV, tau2abbb)
#:Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b , tmp1bbab)
blksize = max(int(max_memory*1e6/8/(nvirb**3*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = np.asarray(eris.OVVV[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVVV = lib.unpack_tril(OVVV).reshape(-1,nvirb,nvirb,nvirb)
Hr1ab += einsum('MFAE,iMEF->iA', OVVV, r2abbb[:,p0:p1])
tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', OVVV, tau2abbb)
Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b[p0:p1], tmp1bbab)
OVVV = tmp1bbab = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:Hr1ab += einsum('mfAE,imEf->iA', eris_ovVV, r2aaba)
#:tmp1abaa = lib.einsum('meAF,ijFe->mAij', eris_ovVV, tau2aaba)
#:tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', eris_ovVV, tau2bbab)
#:tmp1ba = lib.einsum('mfAE,mE->Af', eris_ovVV, r1ab)
#:Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a*.5, tmp1abbb)
#:Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a*.5, tmp1abaa)
tmp1ba = np.zeros((nvirb,nvira))
blksize = max(int(max_memory*1e6/8/(nvira*nvirb**2*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
Hr1ab += einsum('mfAE,imEf->iA', ovVV, r2aaba[:,p0:p1])
tmp1abaa = lib.einsum('meAF,ijFe->mAij', ovVV, tau2aaba)
tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', ovVV, tau2bbab)
tmp1ba += lib.einsum('mfAE,mE->Af', ovVV, r1ab[p0:p1])
Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a[p0:p1]*.5, tmp1abbb)
Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a[p0:p1]*.5, tmp1abaa)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:Hr1ba += einsum('MFae,IMeF->Ia', eris_OVvv, r2bbab)
#:tmp1baaa = lib.einsum('MEaf,ijEf->Maij', eris_OVvv, tau2aaba)
#:tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', eris_OVvv, tau2bbab)
#:tmp1ab = lib.einsum('MFae,Me->aF', eris_OVvv, r1ba)
#:Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b*.5, tmp1baaa)
#:Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b*.5, tmp1babb)
tmp1ab = np.zeros((nvira,nvirb))
blksize = max(int(max_memory*1e6/8/(nvirb*nvira**2*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVvv = lib.unpack_tril(OVvv).reshape(-1,nvirb,nvira,nvira)
Hr1ba += einsum('MFae,IMeF->Ia', OVvv, r2bbab[:,p0:p1])
tmp1baaa = lib.einsum('MEaf,ijEf->Maij', OVvv, tau2aaba)
tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', OVvv, tau2bbab)
tmp1ab+= lib.einsum('MFae,Me->aF', OVvv, r1ba[p0:p1])
Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b[p0:p1]*.5, tmp1baaa)
Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b[p0:p1]*.5, tmp1babb)
Hr2baaa += lib.einsum('aF,jIbF->Ijba', tmp1ab , t2ab)
Hr2bbab -= lib.einsum('aF,IJFB->IJaB', tmp1ab*.5, t2bb)
Hr2abbb += lib.einsum('Af,iJfB->iJBA', tmp1ba , t2ab)
Hr2aaba -= lib.einsum('Af,ijfb->ijAb', tmp1ba*.5, t2aa)
Hr2baaa -= lib.einsum('MbIj,Ma->Ijab', imds.wOvOo, r1ba )
Hr2bbab -= lib.einsum('MBIJ,Ma->IJaB', imds.wOVOO, r1ba*.5)
Hr2abbb -= lib.einsum('mBiJ,mA->iJAB', imds.woVoO, r1ab )
Hr2aaba -= lib.einsum('mbij,mA->ijAb', imds.wovoo, r1ab*.5)
Hr1ab -= 0.5*lib.einsum('mnie,mnAe->iA', imds.wooov, r2aaba)
Hr1ab -= lib.einsum('mNiE,mNAE->iA', imds.woOoV, r2abbb)
Hr1ba -= 0.5*lib.einsum('MNIE,MNaE->Ia', imds.wOOOV, r2bbab)
Hr1ba -= lib.einsum('MnIe,Mnae->Ia', imds.wOoOv, r2baaa)
tmp1ab = lib.einsum('MnIe,Me->nI', imds.wOoOv, r1ba)
tmp1ba = lib.einsum('mNiE,mE->Ni', imds.woOoV, r1ab)
Hr2baaa += lib.einsum('nI,njab->Ijab', tmp1ab*.5, t2aa)
Hr2bbab += lib.einsum('nI,nJaB->IJaB', tmp1ab , t2ab)
Hr2abbb += lib.einsum('Ni,NJAB->iJAB', tmp1ba*.5, t2bb)
Hr2aaba += lib.einsum('Ni,jNbA->ijAb', tmp1ba , t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2baaa += lib.einsum('ejab,Ie->Ijab', imds.wvovv[p0:p1], r1ba[:,p0:p1]*.5)
Hr2bbab += lib.einsum('eJaB,Ie->IJaB', imds.wvOvV[p0:p1], r1ba[:,p0:p1] )
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2abbb += lib.einsum('EJAB,iE->iJAB', imds.wVOVV[p0:p1], r1ab[:,p0:p1]*.5)
Hr2aaba += lib.einsum('EjAb,iE->ijAb', imds.wVoVv[p0:p1], r1ab[:,p0:p1] )
Hr1ab += np.einsum('mAEi,mE->iA', imds.woVVo, r1ab)
Hr1ba += np.einsum('MaeI,Me->Ia', imds.wOvvO, r1ba)
Hr2baaa += lib.einsum('mbej,Imae->Ijab', imds.wovvo, r2baaa)
Hr2baaa += lib.einsum('MbeJ,Miae->Jiab', imds.wOvvO, r2baaa)
Hr2baaa += lib.einsum('MbEj,IMaE->Ijab', imds.wOvVo, r2bbab)
Hr2bbab += lib.einsum('MBEJ,IMaE->IJaB', imds.wOVVO, r2bbab)
Hr2bbab += lib.einsum('MbeJ,IMeA->IJbA', imds.wOvvO, r2bbab)
Hr2bbab += lib.einsum('mBeJ,Imae->IJaB', imds.woVvO, r2baaa)
Hr2aaba += lib.einsum('mbej,imAe->ijAb', imds.wovvo, r2aaba)
Hr2aaba += lib.einsum('mBEj,imEa->ijBa', imds.woVVo, r2aaba)
Hr2aaba += lib.einsum('MbEj,iMAE->ijAb', imds.wOvVo, r2abbb)
Hr2abbb += lib.einsum('MBEJ,iMAE->iJAB', imds.wOVVO, r2abbb)
Hr2abbb += lib.einsum('mBEj,mIAE->jIAB', imds.woVVo, r2abbb)
Hr2abbb += lib.einsum('mBeJ,imAe->iJAB', imds.woVvO, r2aaba)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
tmp1baaa = lib.einsum('nfME,ijEf->Mnij', eris_ovOV, tau2aaba)
tmp1aaba = lib.einsum('menf,Ijef->mnIj', eris_ovov, tau2baaa)
tmp1abbb = lib.einsum('meNF,IJeF->mNIJ', eris_ovOV, tau2bbab)
tmp1bbab = lib.einsum('MENF,iJEF->MNiJ', eris_OVOV, tau2abbb)
Hr2baaa += 0.5*.5*lib.einsum('mnIj,mnab->Ijab', tmp1aaba, tauaa)
Hr2bbab += .5*lib.einsum('nMIJ,nMaB->IJaB', tmp1abbb, tauab)
Hr2aaba += .5*lib.einsum('Nmij,mNbA->ijAb', tmp1baaa, tauab)
Hr2abbb += 0.5*.5*lib.einsum('MNiJ,MNAB->iJAB', tmp1bbab, taubb)
tauaa = tauab = taubb = None
tmpab = lib.einsum('menf,Imef->nI', eris_ovov, r2baaa)
tmpab -= lib.einsum('nfME,IMfE->nI', eris_ovOV, r2bbab)
tmpba = lib.einsum('MENF,iMEF->Ni', eris_OVOV, r2abbb)
tmpba -= lib.einsum('meNF,imFe->Ni', eris_ovOV, r2aaba)
Hr1ab += np.einsum('NA,Ni->iA', t1b, tmpba)
Hr1ba += np.einsum('na,nI->Ia', t1a, tmpab)
Hr2baaa -= lib.einsum('mJ,imab->Jiab', tmpab*.5, t2aa)
Hr2bbab -= lib.einsum('mJ,mIaB->IJaB', tmpab*.5, t2ab) * 2
Hr2aaba -= lib.einsum('Mj,iMbA->ijAb', tmpba*.5, t2ab) * 2
Hr2abbb -= lib.einsum('Mj,IMAB->jIAB', tmpba*.5, t2bb)
tmp1ab = np.einsum('meNF,mF->eN', eris_ovOV, r1ab)
tmp1ba = np.einsum('nfME,Mf->En', eris_ovOV, r1ba)
tmpab = np.einsum('eN,NB->eB', tmp1ab, t1b)
tmpba = np.einsum('En,nb->Eb', tmp1ba, t1a)
tmpab -= lib.einsum('menf,mnBf->eB', eris_ovov, r2aaba)
tmpab += lib.einsum('meNF,mNFB->eB', eris_ovOV, r2abbb)
tmpba -= lib.einsum('MENF,MNbF->Eb', eris_OVOV, r2bbab)
tmpba += lib.einsum('nfME,Mnfb->Eb', eris_ovOV, r2baaa)
Hr2baaa -= lib.einsum('Eb,jIaE->Ijab', tmpba*.5, t2ab) * 2
Hr2bbab -= lib.einsum('Eb,IJAE->IJbA', tmpba*.5, t2bb)
Hr2aaba -= lib.einsum('eB,ijae->ijBa', tmpab*.5, t2aa)
Hr2abbb -= lib.einsum('eB,iJeA->iJAB', tmpab*.5, t2ab) * 2
eris_ovov = eris_OVOV = eris_ovOV = None
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Hr2baaa += .5*lib.einsum('Ijef,aebf->Ijab', tau2baaa, eris_vvvv)
#:Hr2abbb += .5*lib.einsum('iJEF,AEBF->iJAB', tau2abbb, eris_VVVV)
#:Hr2bbab += .5*lib.einsum('IJeF,aeBF->IJaB', tau2bbab, eris_vvVV)
#:Hr2aaba += .5*lib.einsum('ijEf,bfAE->ijAb', tau2aaba, eris_vvVV)
tau2baaa *= .5
rccsd._add_vvvv1_(self, tau2baaa, eris, Hr2baaa)
fakeri = lambda:None
fakeri.vvvv = eris.VVVV
tau2abbb *= .5
rccsd._add_vvvv1_(self, tau2abbb, fakeri, Hr2abbb)
fakeri.vvvv = eris.vvVV
tau2bbab *= .5
rccsd._add_vvvv1_(self, tau2bbab, fakeri, Hr2bbab)
fakeri = None
for i in range(nvira):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.vvVV[i0:i0+i+1]))
Hr2aaba[:,:,:,i ] += .5*lib.einsum('ijef,fae->ija', tau2aaba[:,:,:,:i+1], vvv)
Hr2aaba[:,:,:,:i] += .5*lib.einsum('ije,bae->ijab', tau2aaba[:,:,:,i], vvv[:i])
vvv = None
Hr2baaa = Hr2baaa - Hr2baaa.transpose(0,1,3,2)
Hr2bbab = Hr2bbab - Hr2bbab.transpose(1,0,2,3)
Hr2abbb = Hr2abbb - Hr2abbb.transpose(0,1,3,2)
Hr2aaba = Hr2aaba - Hr2aaba.transpose(1,0,2,3)
vector = self.amplitudes_to_vector_eomsf((Hr1ab, Hr1ba), (Hr2baaa,Hr2aaba,Hr2abbb,Hr2bbab))
return vector
def eeccsd_diag(self):
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ee_imds:
self.imds.make_ee()
imds = self.imds
eris = self.eris
t1, t2 = self.t1, self.t2
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
tauaa, tauab, taubb = make_tau(t2, t1, t1)
nocca, noccb, nvira, nvirb = t2ab.shape
Foa = imds.Fooa.diagonal()
Fob = imds.Foob.diagonal()
Fva = imds.Fvva.diagonal()
Fvb = imds.Fvvb.diagonal()
Wovaa = np.einsum('iaai->ia', imds.wovvo)
Wovbb = np.einsum('iaai->ia', imds.wOVVO)
Wovab = np.einsum('iaai->ia', imds.woVVo)
Wovba = np.einsum('iaai->ia', imds.wOvvO)
Hr1aa = lib.direct_sum('-i+a->ia', Foa, Fva)
Hr1bb = lib.direct_sum('-i+a->ia', Fob, Fvb)
Hr1ab = lib.direct_sum('-i+a->ia', Foa, Fvb)
Hr1ba = lib.direct_sum('-i+a->ia', Fob, Fva)
Hr1aa += Wovaa
Hr1bb += Wovbb
Hr1ab += Wovab
Hr1ba += Wovba
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
Wvvaa = .5*np.einsum('mnab,manb->ab', tauaa, eris_ovov)
Wvvbb = .5*np.einsum('mnab,manb->ab', taubb, eris_OVOV)
Wvvab = np.einsum('mNaB,maNB->aB', tauab, eris_ovOV)
ijb = np.einsum('iejb,ijbe->ijb', ovov, t2aa)
IJB = np.einsum('iejb,ijbe->ijb', OVOV, t2bb)
iJB =-np.einsum('ieJB,iJeB->iJB', eris_ovOV, t2ab)
Ijb =-np.einsum('jbIE,jIbE->Ijb', eris_ovOV, t2ab)
iJb =-np.einsum('ibJE,iJbE->iJb', eris_ovOV, t2ab)
IjB =-np.einsum('jeIB,jIeB->IjB', eris_ovOV, t2ab)
jab = np.einsum('kajb,jkab->jab', ovov, t2aa)
JAB = np.einsum('kajb,jkab->jab', OVOV, t2bb)
jAb =-np.einsum('jbKA,jKbA->jAb', eris_ovOV, t2ab)
JaB =-np.einsum('kaJB,kJaB->JaB', eris_ovOV, t2ab)
jaB =-np.einsum('jaKB,jKaB->jaB', eris_ovOV, t2ab)
JAb =-np.einsum('kbJA,kJbA->JAb', eris_ovOV, t2ab)
eris_ovov = eris_ovOV = eris_OVOV = ovov = OVOV = None
Hr2aa = lib.direct_sum('ijb+a->ijba', ijb, Fva)
Hr2bb = lib.direct_sum('ijb+a->ijba', IJB, Fvb)
Hr2ab = lib.direct_sum('iJb+A->iJbA', iJb, Fvb)
Hr2ab+= lib.direct_sum('iJB+a->iJaB', iJB, Fva)
Hr2aa+= lib.direct_sum('-i+jab->ijab', Foa, jab)
Hr2bb+= lib.direct_sum('-i+jab->ijab', Fob, JAB)
Hr2ab+= lib.direct_sum('-i+JaB->iJaB', Foa, JaB)
Hr2ab+= lib.direct_sum('-I+jaB->jIaB', Fob, jaB)
Hr2aa = Hr2aa + Hr2aa.transpose(0,1,3,2)
Hr2aa = Hr2aa + Hr2aa.transpose(1,0,2,3)
Hr2bb = Hr2bb + Hr2bb.transpose(0,1,3,2)
Hr2bb = Hr2bb + Hr2bb.transpose(1,0,2,3)
Hr2aa *= .5
Hr2bb *= .5
Hr2baaa = lib.direct_sum('Ijb+a->Ijba', Ijb, Fva)
Hr2aaba = lib.direct_sum('ijb+A->ijAb', ijb, Fvb)
Hr2aaba+= Fva.reshape(1,1,1,-1)
Hr2abbb = lib.direct_sum('iJB+A->iJBA', iJB, Fvb)
Hr2bbab = lib.direct_sum('IJB+a->IJaB', IJB, Fva)
Hr2bbab+= Fvb.reshape(1,1,1,-1)
Hr2baaa = Hr2baaa + Hr2baaa.transpose(0,1,3,2)
Hr2abbb = Hr2abbb + Hr2abbb.transpose(0,1,3,2)
Hr2baaa+= lib.direct_sum('-I+jab->Ijab', Fob, jab)
Hr2baaa-= Foa.reshape(1,-1,1,1)
tmpaaba = lib.direct_sum('-i+jAb->ijAb', Foa, jAb)
Hr2abbb+= lib.direct_sum('-i+JAB->iJAB', Foa, JAB)
Hr2abbb-= Fob.reshape(1,-1,1,1)
tmpbbab = lib.direct_sum('-I+JaB->IJaB', Fob, JaB)
Hr2aaba+= tmpaaba + tmpaaba.transpose(1,0,2,3)
Hr2bbab+= tmpbbab + tmpbbab.transpose(1,0,2,3)
tmpaaba = tmpbbab = None
Hr2aa += Wovaa.reshape(1,nocca,1,nvira)
Hr2aa += Wovaa.reshape(nocca,1,1,nvira)
Hr2aa += Wovaa.reshape(nocca,1,nvira,1)
Hr2aa += Wovaa.reshape(1,nocca,nvira,1)
Hr2ab += Wovbb.reshape(1,noccb,1,nvirb)
Hr2ab += Wovab.reshape(nocca,1,1,nvirb)
Hr2ab += Wovaa.reshape(nocca,1,nvira,1)
Hr2ab += Wovba.reshape(1,noccb,nvira,1)
Hr2bb += Wovbb.reshape(1,noccb,1,nvirb)
Hr2bb += Wovbb.reshape(noccb,1,1,nvirb)
Hr2bb += Wovbb.reshape(noccb,1,nvirb,1)
Hr2bb += Wovbb.reshape(1,noccb,nvirb,1)
Hr2baaa += Wovaa.reshape(1,nocca,1,nvira)
Hr2baaa += Wovba.reshape(noccb,1,1,nvira)
Hr2baaa += Wovba.reshape(noccb,1,nvira,1)
Hr2baaa += Wovaa.reshape(1,nocca,nvira,1)
Hr2aaba += Wovaa.reshape(1,nocca,1,nvira)
Hr2aaba += Wovaa.reshape(nocca,1,1,nvira)
Hr2aaba += Wovab.reshape(nocca,1,nvirb,1)
Hr2aaba += Wovab.reshape(1,nocca,nvirb,1)
Hr2abbb += Wovbb.reshape(1,noccb,1,nvirb)
Hr2abbb += Wovab.reshape(nocca,1,1,nvirb)
Hr2abbb += Wovab.reshape(nocca,1,nvirb,1)
Hr2abbb += Wovbb.reshape(1,noccb,nvirb,1)
Hr2bbab += Wovbb.reshape(1,noccb,1,nvirb)
Hr2bbab += Wovbb.reshape(noccb,1,1,nvirb)
Hr2bbab += Wovba.reshape(noccb,1,nvira,1)
Hr2bbab += Wovba.reshape(1,noccb,nvira,1)
Wooaa = np.einsum('ijij->ij', imds.woooo).copy()
Wooaa -= np.einsum('ijji->ij', imds.woooo)
Woobb = np.einsum('ijij->ij', imds.wOOOO).copy()
Woobb -= np.einsum('ijji->ij', imds.wOOOO)
Wooab = np.einsum('ijij->ij', imds.woOoO)
Wooba = Wooab.T
Wooaa *= .5
Woobb *= .5
Hr2aa += Wooaa.reshape(nocca,nocca,1,1)
Hr2ab += Wooab.reshape(nocca,noccb,1,1)
Hr2bb += Woobb.reshape(noccb,noccb,1,1)
Hr2baaa += Wooba.reshape(noccb,nocca,1,1)
Hr2aaba += Wooaa.reshape(nocca,nocca,1,1)
Hr2abbb += Wooab.reshape(nocca,noccb,1,1)
Hr2bbab += Woobb.reshape(noccb,noccb,1,1)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:Wvvaa += np.einsum('mb,maab->ab', t1a, eris_ovvv)
#:Wvvaa -= np.einsum('mb,mbaa->ab', t1a, eris_ovvv)
mem_now = lib.current_memory()[0]
max_memory = lib.param.MAX_MEMORY - mem_now
blksize = max(int(max_memory*1e6/8/(nvira**3*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = np.asarray(eris.ovvv[p0:p1]).reshape((p1-p0)*nvira,-1)
ovvv = lib.unpack_tril(ovvv).reshape(-1,nvira,nvira,nvira)
Wvvaa += np.einsum('mb,maab->ab', t1a[p0:p1], ovvv)
Wvvaa -= np.einsum('mb,mbaa->ab', t1a[p0:p1], ovvv)
ovvv = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Wvvbb += np.einsum('mb,maab->ab', t1b, eris_OVVV)
#:Wvvbb -= np.einsum('mb,mbaa->ab', t1b, eris_OVVV)
blksize = max(int(max_memory*1e6/8/(nvirb**3*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = np.asarray(eris.OVVV[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVVV = lib.unpack_tril(OVVV).reshape(-1,nvirb,nvirb,nvirb)
Wvvbb += np.einsum('mb,maab->ab', t1b[p0:p1], OVVV)
Wvvbb -= np.einsum('mb,mbaa->ab', t1b[p0:p1], OVVV)
OVVV = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:Wvvab -= np.einsum('mb,mbaa->ba', t1a, eris_ovVV)
blksize = max(int(max_memory*1e6/8/(nvira*nvirb**2*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
Wvvab -= np.einsum('mb,mbaa->ba', t1a[p0:p1], ovVV)
ovVV = None
blksize = max(int(max_memory*1e6/8/(nvirb*nvira**2*3)), 2)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:Wvvab -= np.einsum('mb,mbaa->ab', t1b, eris_OVvv)
idxa = np.arange(nvira)
idxa = idxa*(idxa+1)//2+idxa
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1])
Wvvab -= np.einsum('mb,mba->ab', t1b[p0:p1], OVvv[:,:,idxa])
OVvv = None
Wvvaa = Wvvaa + Wvvaa.T
Wvvbb = Wvvbb + Wvvbb.T
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Wvvaa += np.einsum('aabb->ab', eris_vvvv) - np.einsum('abba->ab', eris_vvvv)
#:Wvvbb += np.einsum('aabb->ab', eris_VVVV) - np.einsum('abba->ab', eris_VVVV)
#:Wvvab += np.einsum('aabb->ab', eris_vvVV)
for i in range(nvira):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.vvvv[i0:i0+i+1]))
tmp = np.einsum('bb->b', vvv[i])
Wvvaa[i] += tmp
tmp = np.einsum('bb->b', vvv[:,:i+1,i])
Wvvaa[i,:i+1] -= tmp
Wvvaa[:i ,i] -= tmp[:i]
vvv = lib.unpack_tril(np.asarray(eris.vvVV[i0:i0+i+1]))
Wvvab[i] += np.einsum('bb->b', vvv[i])
vvv = None
for i in range(nvirb):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.VVVV[i0:i0+i+1]))
tmp = np.einsum('bb->b', vvv[i])
Wvvbb[i] += tmp
tmp = np.einsum('bb->b', vvv[:,:i+1,i])
Wvvbb[i,:i+1] -= tmp
Wvvbb[:i ,i] -= tmp[:i]
vvv = None
Wvvba = Wvvab.T
Hr2aa += Wvvaa.reshape(1,1,nvira,nvira)
Hr2ab += Wvvab.reshape(1,1,nvira,nvirb)
Hr2bb += Wvvbb.reshape(1,1,nvirb,nvirb)
Hr2baaa += Wvvaa.reshape(1,1,nvira,nvira)
Hr2aaba += Wvvba.reshape(1,1,nvirb,nvira)
Hr2abbb += Wvvbb.reshape(1,1,nvirb,nvirb)
Hr2bbab += Wvvab.reshape(1,1,nvira,nvirb)
vec_ee = self.amplitudes_to_vector((Hr1aa,Hr1bb), (Hr2aa,Hr2ab,Hr2bb))
vec_sf = self.amplitudes_to_vector_eomsf((Hr1ab,Hr1ba), (Hr2baaa,Hr2aaba,Hr2abbb,Hr2bbab))
return vec_ee, vec_sf
def amplitudes_to_vector_ee(self, t1, t2, out=None):
return self.amplitudes_to_vector_s4(t1, t2, out)
def vector_to_amplitudes_ee(self, vector, nocc=None, nvir=None):
return self.vector_to_amplitudes_s4(vector, nocc, nvir)
def amplitudes_to_vector(self, t1, t2, out=None):
nocca, nvira = t1[0].shape
noccb, nvirb = t1[1].shape
sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2
sizeab = nocca * noccb * nvira * nvirb
vector = np.ndarray(sizea+sizeb+sizeab, t2[0].dtype, buffer=out)
self.amplitudes_to_vector_ee(t1[0], t2[0], out=vector[:sizea])
self.amplitudes_to_vector_ee(t1[1], t2[2], out=vector[sizea:])
vector[sizea+sizeb:] = t2[1].ravel()
return vector
def vector_to_amplitudes(self, vector, nocc=None, nvir=None):
if nocc is None:
nocca, noccb = self.get_nocc()
else:
nocca, noccb = nocc
if nvir is None:
nmoa, nmob = self.get_nmo()
nvira, nvirb = nmoa-nocca, nmob-noccb
else:
nvira, nvirb = nvir
nocc = nocca + noccb
nvir = nvira + nvirb
nov = nocc * nvir
size = nov + nocc*(nocc-1)//2*nvir*(nvir-1)//2
if vector.size == size:
return self.vector_to_amplitudes_ee(vector, nocc, nvir)
else:
sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2
sizeab = nocca * noccb * nvira * nvirb
t1a, t2aa = self.vector_to_amplitudes_ee(vector[:sizea], nocca, nvira)
t1b, t2bb = self.vector_to_amplitudes_ee(vector[sizea:sizea+sizeb], noccb, nvirb)
t2ab = vector[-sizeab:].copy().reshape(nocca,noccb,nvira,nvirb)
return (t1a,t1b), (t2aa,t2ab,t2bb)
def amplitudes_from_rccsd(self, t1, t2):
'''Convert spatial orbital T1,T2 to spin-orbital T1,T2'''
return addons.spatial2spin(t1), addons.spatial2spin(t2)
def spatial2spin(self, tx, orbspin=None):
if orbspin is None: orbspin = self.orbspin
return spatial2spin(tx, orbspin)
def spin2spatial(self, tx, orbspin=None):
if orbspin is None: orbspin = self.orbspin
return spin2spatial(tx, orbspin)
def amplitudes_to_vector_eomsf(self, t1, t2, out=None):
t1ab, t1ba = t1
t2baaa, t2aaba, t2abbb, t2bbab = t2
nocca, nvirb = t1ab.shape
noccb, nvira = t1ba.shape
nbaaa = noccb*nocca*nvira*(nvira-1)//2
naaba = nocca*(nocca-1)//2*nvirb*nvira
nabbb = nocca*noccb*nvirb*(nvirb-1)//2
nbbab = noccb*(noccb-1)//2*nvira*nvirb
size = t1ab.size + t1ba.size + nbaaa + naaba + nabbb + nbbab
vector = numpy.ndarray(size, t2baaa.dtype, buffer=out)
vector[:t1ab.size] = t1ab.ravel()
vector[t1ab.size:t1ab.size+t1ba.size] = t1ba.ravel()
pvec = vector[t1ab.size+t1ba.size:]
t2baaa = t2baaa.reshape(noccb*nocca,nvira*nvira)
t2aaba = t2aaba.reshape(nocca*nocca,nvirb*nvira)
t2abbb = t2abbb.reshape(nocca*noccb,nvirb*nvirb)
t2bbab = t2bbab.reshape(noccb*noccb,nvira*nvirb)
otrila = numpy.tril_indices(nocca, k=-1)
otrilb = numpy.tril_indices(noccb, k=-1)
vtrila = numpy.tril_indices(nvira, k=-1)
vtrilb = numpy.tril_indices(nvirb, k=-1)
oidxab = np.arange(nocca*noccb, dtype=numpy.int32)
vidxab = np.arange(nvira*nvirb, dtype=numpy.int32)
lib.take_2d(t2baaa, oidxab, vtrila[0]*nvira+vtrila[1], out=pvec)
lib.take_2d(t2aaba, otrila[0]*nocca+otrila[1], vidxab, out=pvec[nbaaa:])
lib.take_2d(t2abbb, oidxab, vtrilb[0]*nvirb+vtrilb[1], out=pvec[nbaaa+naaba:])
lib.take_2d(t2bbab, otrilb[0]*noccb+otrilb[1], vidxab, out=pvec[nbaaa+naaba+nabbb:])
return vector
def vector_to_amplitudes_eomsf(self, vector, nocc=None, nvir=None):
if nocc is None:
nocca, noccb = self.get_nocc()
else:
nocca, noccb = nocc
if nvir is None:
nmoa, nmob = self.get_nmo()
nvira, nvirb = nmoa-nocca, nmob-noccb
else:
nvira, nvirb = nvir
t1ab = vector[:nocca*nvirb].reshape(nocca,nvirb).copy()
t1ba = vector[nocca*nvirb:nocca*nvirb+noccb*nvira].reshape(noccb,nvira).copy()
pvec = vector[t1ab.size+t1ba.size:]
nbaaa = noccb*nocca*nvira*(nvira-1)//2
naaba = nocca*(nocca-1)//2*nvirb*nvira
nabbb = nocca*noccb*nvirb*(nvirb-1)//2
nbbab = noccb*(noccb-1)//2*nvira*nvirb
t2baaa = np.zeros((noccb*nocca,nvira*nvira), dtype=vector.dtype)
t2aaba = np.zeros((nocca*nocca,nvirb*nvira), dtype=vector.dtype)
t2abbb = np.zeros((nocca*noccb,nvirb*nvirb), dtype=vector.dtype)
t2bbab = np.zeros((noccb*noccb,nvira*nvirb), dtype=vector.dtype)
otrila =
|
numpy.tril_indices(nocca, k=-1)
|
numpy.tril_indices
|
#!/usr/bin/env python
import sys
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 17})
plt.rc('text', usetex=True)
plt.rc('font',**{'family':'serif','serif':['Computer Mordern Roman']})
# define the parameters
h = 0.1 # MPC-collisoin time
n = 5.0 # fluid paticle density
kBT = 1.0 # thermal energy = temperature
alpha = np.pi/2.0 # collision angle
grav = 0.0005 # gravitational acceleration
Ly = 52.0 # channel hight
# kinetiv part of the (dynamic) viscosity
def eta_kin(h, n, alpha, T):
f = n*T*h*(n/(1.0 - np.cos(2.0*alpha))/(n - 1.0 + np.exp(-n)) - 0.5)
return f
# collisional part of the (dynamic) viscosity
def eta_coll(h, n, alpha):
f = (1.0 - np.cos(alpha))/12.0/h*(n - 1.0 + np.exp(-n))
return f
# full (dynamic) viscosity
def eta(h, n, alpha, T):
f = eta_kin(h, n, alpha, T) + eta_coll(h, n, alpha)
return f
# theoretical maximum velocity in channel
v_max = grav*Ly*Ly/(8.0*eta(h, n, alpha, kBT)/n)
# theoretical flowprofile
def v_x(y):
f = 4.0*v_max*(Ly - y)*y/Ly/Ly
return f
# setup the plotrange for theoretical flowprofile
y = np.arange(0, Ly, 0.1)
# load the simulation data
Y, V_X =
|
np.loadtxt("./flowprofile.dat", unpack=True, skiprows=1)
|
numpy.loadtxt
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""Compute dynamic properties."""
import logging
from typing import Dict, Optional, Union
import numpy as np
import rowan
from ..frame import Frame
from ..molecules import Molecule
from ..util import create_freud_box, zero_quaternion
from ._util import TrackedMotion, molecule2particles
np.seterr(divide="raise", invalid="raise", over="raise")
logger = logging.getLogger(__name__)
YamlValue = Union[str, float, int]
class Dynamics:
"""Compute dynamic properties of a simulation.
Args:
timestep: The timestep on which the configuration was taken.
box: The lengths of each side of the simulation cell including
any tilt factors.
position: The positions of the molecules
with shape ``(nmols, 3)``. Even if the simulation is only 2D,
all 3 dimensions of the position need to be passed.
orientation: The orientations of all the
molecules as a quaternion in the form ``(w, x, y, z)``. If no
orientation is supplied then no rotational quantities are
calculated.
molecule: The molecule for which to compute the dynamics quantities.
This is used to compute the structural relaxation for all particles.
wave_number: The wave number of the maximum peak in the Fourier
transform of the radial distribution function. If None this is
calculated from the initial configuration.
"""
_all_quantities = [
"time",
"mean_displacement",
"msd",
"mfd",
"alpha",
"scattering_function",
"com_struct",
"mean_rotation",
"msr",
"rot1",
"rot2",
"alpha_rot",
"gamma",
"overlap",
"struct",
]
def __init__(
self,
timestep: int,
box: np.ndarray,
position: np.ndarray,
orientation: Optional[np.ndarray] = None,
molecule: Optional[Molecule] = None,
wave_number: Optional[float] = None,
angular_resolution=360,
) -> None:
"""Initialise Dynamics.
Args:
timestep: The timestep at which the reference frame was created.
box: The lengths of the simulation cell
position: The initial position of each particle.
orientation: The initial orientation of each particle.
molecule: The molecule which is represented by each position and orientation.
wave_number: The wave number corresponding to the maximum in the
static structure factor.
angular_resolution: The angular resolution of the intermediate scattering function.
"""
if position.shape[0] == 0:
raise RuntimeError("Position must contain values, has length of 0.")
if molecule is None:
is2d = True
self.mol_vector = None
else:
is2d = molecule.dimensions == 2
self.mol_vector = molecule.positions
self.motion = TrackedMotion(
create_freud_box(box, is_2D=is2d), position, orientation
)
self.timestep = timestep
self.num_particles = position.shape[0]
self.wave_number = wave_number
if self.wave_number is not None:
angles = np.linspace(
0, 2 * np.pi, num=angular_resolution, endpoint=False
).reshape((-1, 1))
self.wave_vector = (
np.concatenate([np.cos(angles), np.sin(angles)], axis=1) * wave_number
)
@classmethod
def from_frame(
cls,
frame: Frame,
molecule: Optional[Molecule] = None,
wave_number: Optional[float] = None,
) -> "Dynamics":
"""Initialise the Dynamics class from a Frame object.
There is significant overlap between the frame class and the dynamics class,
so this is a convenience method to make the initialisation simpler.
"""
return cls(
frame.timestep,
frame.box,
frame.position,
frame.orientation,
molecule=molecule,
wave_number=wave_number,
)
@property
def delta_translation(self):
return self.motion.delta_translation
@property
def delta_rotation(self):
return self.motion.delta_rotation
def add(self, position: np.ndarray, orientation: Optional[np.ndarray] = None):
"""Update the state of the dynamics calculations by adding a Frame.
This updates the motion of the particles, comparing the positions and
orientations of the current frame with the previous frame, adding the difference
to the total displacement. This approach allows for tracking particles over
periodic boundaries, or through larger rotations assuming that there are
sufficient frames to capture the information. Each single displacement obeys the
minimum image convention, so for large time intervals it is still possible to
have missing information.
Args:
position: The updated position of each particle
orientation: The updated orientation of each particle, represented as a quaternion
"""
self.motion.add(position, orientation)
def add_frame(self, frame: Frame):
"""Update the state of the dynamics calculations by adding a Frame.
This updates the motion of the particles, comparing the positions and
orientations of the current frame with the previous frame, adding the difference
to the total displacement. This approach allows for tracking particles over
periodic boundaries, or through larger rotations assuming that there are
sufficient frames to capture the information. Each single displacement obeys the
minimum image convention, so for large time intervals it is still possible to
have missing information.
Args:
frame: The configuration containing the current particle information.
"""
self.motion.add(frame.position, frame.orientation)
def compute_displacement(self) -> np.ndarray:
"""Compute the translational displacement for each particle."""
return np.linalg.norm(self.delta_translation, axis=1)
def compute_displacement2(self) -> np.ndarray:
"""Compute the squared displacement for each particle."""
return np.square(self.delta_translation).sum(axis=1)
def compute_msd(self) -> float:
"""Compute the mean squared displacement."""
return self.compute_displacement2().mean()
def compute_mfd(self) -> float:
"""Compute the fourth power of displacement."""
return np.square(self.compute_displacement2()).mean()
def compute_alpha(self) -> float:
r"""Compute the non-Gaussian parameter alpha for translational motion in 2D.
.. math::
\alpha = \frac{\langle \Delta r^4\rangle}
{2\langle \Delta r^2 \rangle^2} -1
"""
disp2 = self.compute_displacement2()
try:
return np.square(disp2).mean() / (2 * np.square((disp2).mean())) - 1
except FloatingPointError:
with np.errstate(invalid="ignore"):
res = np.square(disp2).mean() / (2 * np.square((disp2).mean())) - 1
np.nan_to_num(res, copy=False)
return res
def compute_time_delta(self, timestep: int) -> int:
"""Time difference between initial frame and timestep."""
return timestep - self.timestep
def compute_rotation(self) -> np.ndarray:
"""Compute the rotational motion for each particle."""
return np.linalg.norm(self.delta_rotation, axis=1)
def compute_rotation2(self) -> np.ndarray:
"""Compute the rotation from the initial frame."""
return np.square(self.delta_rotation).sum(axis=1)
def compute_mean_rotation(self) -> float:
"""Compute the rotation from the initial frame."""
return self.compute_rotation().mean()
def compute_msr(self) -> float:
"""Compute the mean squared rotation from the initial frame."""
return self.compute_rotation2().mean()
def compute_isf(self) -> float:
"""Compute the intermediate scattering function."""
return np.cos(np.dot(self.wave_vector, self.delta_translation[:, :2].T)).mean()
def compute_rotational_relax1(self) -> float:
r"""Compute the first-order rotational relaxation function.
.. math::
C_1(t) = \langle \hat{\mathbf{e}}(0) \cdot \hat{\mathbf{e}}(t) \rangle
Return:
float: The rotational relaxation
"""
return np.cos(self.compute_rotation()).mean()
def compute_rotational_relax2(self) -> float:
r"""Compute the second rotational relaxation function.
.. math::
C_1(t) = \langle 2(\hat{\mathbf{e}}(0) \cdot \hat{\mathbf{e}}(t))^2 - 1 \rangle
Return:
float: The rotational relaxation
"""
return np.mean(2 * np.square(np.cos(self.compute_rotation())) - 1)
def compute_alpha_rot(self) -> float:
r"""Compute the non-Gaussian parameter alpha for rotational motion in 2D.
Rotational motion in 2D, is a single dimension of rotational motion, hence the
use of a different divisor than translational motion.
.. math::
\alpha = \frac{\langle \Delta \theta^4\rangle}
{3\langle \Delta \theta^2 \rangle^2} -1
"""
theta2 = self.compute_rotation2()
try:
return np.square(theta2).mean() / (3 * np.square((theta2).mean())) - 1
except FloatingPointError:
with np.errstate(invalid="ignore"):
res = np.square(theta2).mean() / (3 * np.square((theta2).mean())) - 1
np.nan_to_num(res, copy=False)
return res
def compute_gamma(self) -> float:
r"""Calculate the second order coupling of translations and rotations.
.. math::
\gamma = \frac{\langle(\Delta r \Delta\theta)^2 \rangle}
{\langle\Delta r^2\rangle\langle\Delta\theta^2\rangle} - 1
Return:
float: The squared coupling of translations and rotations
:math:`\gamma`
"""
theta2 = self.compute_rotation2()
disp2 = self.compute_displacement2()
try:
return ((disp2 * theta2).mean()) / (disp2.mean() * theta2.mean()) - 1
except FloatingPointError:
with np.errstate(invalid="ignore"):
res = ((disp2 * theta2).mean()) / (disp2.mean() * theta2.mean()) - 1
np.nan_to_num(res, copy=False)
return res
def compute_struct_relax(self) -> float:
if self.distance is None:
raise ValueError(
"The wave number is required for the structural relaxation."
)
quat_rot = rowan.from_euler(
self.delta_rotation[:, 2],
self.delta_rotation[:, 1],
self.delta_rotation[:, 0],
)
final_pos = molecule2particles(
self.delta_translation, quat_rot, self.mol_vector
)
init_pos = molecule2particles(
np.zeros_like(self.delta_translation),
zero_quaternion(self.num_particles),
self.mol_vector,
)
try:
return np.mean(np.linalg.norm(final_pos - init_pos, axis=1) < self.distance)
except FloatingPointError:
return np.nan
def compute_all(
self,
timestep: int,
position: np.ndarray,
orientation: np.ndarray = None,
scattering_function: bool = False,
) -> Dict[str, Union[int, float]]:
"""Compute all possible dynamics quantities.
Args:
timestep: The current timestep of the dynamic quantity
position: The position of all particles at the new point in time
orientation: The orientation (as a quaternion) of all particles
Returns:
Mapping of the names of each dynamic quantity to their values for each particle.
Where a quantity can't be calculated, an array of nan values will be supplied
instead, allowing for continued compatibility.
"""
self.add(position, orientation)
# Set default result
dynamic_quantities = {key: np.nan for key in self._all_quantities}
# Calculate all the simple dynamic quantities
dynamic_quantities["time"] = self.compute_time_delta(timestep)
dynamic_quantities["mean_displacement"] = np.linalg.norm(
self.delta_translation, axis=1
).mean()
dynamic_quantities["msd"] = self.compute_msd()
dynamic_quantities["mfd"] = self.compute_mfd()
dynamic_quantities["alpha"] = self.compute_alpha()
# The scattering function takes too long to compute so is normally ignored.
if scattering_function and self.wave_number is not None:
dynamic_quantities["scattering_function"] = self.compute_isf()
# The structural relaxation requires the distance value to be set
if self.distance is not None:
dynamic_quantities["com_struct"] = structural_relax(
self.compute_displacement(), dist=self.distance
)
dynamic_quantities["mean_rotation"] = self.compute_mean_rotation()
dynamic_quantities["msr"] = self.compute_msr()
dynamic_quantities["rot1"] = self.compute_rotational_relax1()
dynamic_quantities["rot2"] = self.compute_rotational_relax2()
dynamic_quantities["alpha_rot"] = self.compute_alpha_rot()
dynamic_quantities["gamma"] = self.compute_gamma()
dynamic_quantities["overlap"] = mobile_overlap(
self.compute_displacement(), self.compute_rotation()
)
# The structural relaxation of all atoms is the most complex.
if (
self.distance is not None
and self.mol_vector is not None
and self.motion.previous_orientation is not None
):
dynamic_quantities["struct"] = self.compute_struct_relax()
assert dynamic_quantities["time"] is not None
return dynamic_quantities
def __len__(self) -> int:
return self.num_particles
@property
def distance(self) -> Optional[float]:
if self.wave_number is None:
return None
return np.pi / (2 * self.wave_number)
def get_molid(self):
"""Molecule ids of each of the values."""
return np.arange(self.num_particles)
def structural_relax(displacement: np.ndarray, dist: float = 0.3) -> float:
r"""Compute the structural relaxation.
The structural relaxation is given as the proportion of
particles which have moved further than `dist` from their
initial positions.
Args:
displacement: displacements
dist): The distance cutoff for considering relaxation. (defualt: 0.3)
Return:
float: The structural relaxation of the configuration
"""
try:
return
|
np.mean(displacement < dist)
|
numpy.mean
|
import gym
import numpy as np
import os
from pathlib import Path
import unittest
from ray.rllib.models.preprocessors import GenericPixelPreprocessor
from ray.rllib.models.torch.modules.convtranspose2d_stack import ConvTranspose2DStack
from ray.rllib.utils.framework import try_import_torch, try_import_tf
from ray.rllib.utils.images import imread
torch, nn = try_import_torch()
tf1, tf, tfv = try_import_tf()
class TestConvTranspose2DStack(unittest.TestCase):
"""Tests our ConvTranspose2D Stack modules/layers."""
def test_convtranspose2d_stack(self):
"""Tests, whether the conv2d stack can be trained to predict an image."""
batch_size = 128
input_size = 1
module = ConvTranspose2DStack(input_size=input_size)
preprocessor = GenericPixelPreprocessor(
gym.spaces.Box(0, 255, (64, 64, 3), np.uint8), options={"dim": 64}
)
optim = torch.optim.Adam(module.parameters(), lr=0.0001)
rllib_dir = Path(__file__).parent.parent.parent
img_file = os.path.join(rllib_dir, "tests/data/images/obstacle_tower.png")
img = imread(img_file)
# Preprocess.
img = preprocessor.transform(img)
# Make channels first.
img = np.transpose(img, (2, 0, 1))
# Add batch rank and repeat.
imgs = np.reshape(img, (1,) + img.shape)
imgs = np.repeat(imgs, batch_size, axis=0)
# Move to torch.
imgs = torch.from_numpy(imgs)
init_loss = loss = None
for _ in range(10):
# Random inputs.
inputs = torch.from_numpy(
|
np.random.normal(0.0, 1.0, (batch_size, input_size))
|
numpy.random.normal
|
"""
Plots normalized histograms of slope of observations for the paper
Reference : Barnes et al. [2020, JAMES]
Author : <NAME>
Date : 11 November 2020
"""
### Import packages
import numpy as np
import matplotlib.pyplot as plt
import cmocean
import scipy.stats as stats
### Set parameters
variables = [r'T2M']
datasets = [r'XGHG',r'XAER',r'lens']
seasons = [r'annual']
SAMPLEQ = 100
### Set directories
directorydata = '/Users/zlabe/Documents/Research/InternalSignal/Data/FINAL/'
directoryfigure = '/Users/zlabe/Documents/Projects/InternalSignal/DarkFigures/'
### Read in slope data
filename_slope = 'Slopes_20CRv3-Obs_XGHG-XAER-LENS_%s_RANDOMSEED_20ens.txt' % SAMPLEQ
slopes = np.genfromtxt(directorydata + filename_slope,unpack=True)
ghg_slopes = slopes[:,0]
aer_slopes = slopes[:,1]
lens_slopes = slopes[:,2]
### Read in R2 data
filename_R2= 'R2_20CRv3-Obs_XGHG-XAER-LENS_%s_RANDOMSEED_20ens.txt' % SAMPLEQ
slopes = np.genfromtxt(directorydata + filename_R2,unpack=True)
ghg_r2 = slopes[:,0]
aer_r2 = slopes[:,1]
lens_r2 = slopes[:,2]
###############################################################################
###############################################################################
###############################################################################
### Create plot for histograms of slopes
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='darkgrey')
plt.rc('xtick',color='darkgrey')
plt.rc('ytick',color='darkgrey')
plt.rc('axes',labelcolor='darkgrey')
plt.rc('axes',facecolor='black')
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
fig = plt.figure()
ax = plt.subplot(111)
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_color('darkgrey')
ax.spines['left'].set_color('darkgrey')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.tick_params('both',length=5.5,width=2,which='major',color='darkgrey')
### Plot histograms
plt.axvline(x=1,color='w',linewidth=2,linestyle='--',dashes=(1,0.3),
zorder=10)
weights_ghg = np.ones_like(ghg_slopes)/len(ghg_slopes)
n_ghg, bins_ghg, patches_ghg = plt.hist(ghg_slopes,bins=np.arange(-1.2,2.1,0.1)-0.05,
density=False,alpha=0.7,
label=r'\textbf{AER+}',
weights=weights_ghg,zorder=3)
for i in range(len(patches_ghg)):
patches_ghg[i].set_facecolor('deepskyblue')
patches_ghg[i].set_edgecolor('k')
patches_ghg[i].set_linewidth(0.5)
weights_aer = np.ones_like(aer_slopes)/len(aer_slopes)
n_aer, bins_aer, patches_aer = plt.hist(aer_slopes,bins=np.arange(-1.2,2.1,0.1)-0.05,
density=False,alpha=0.7,
label=r'\textbf{GHG+}',
weights=weights_aer,zorder=4)
for i in range(len(patches_aer)):
patches_aer[i].set_facecolor('gold')
patches_aer[i].set_edgecolor('k')
patches_aer[i].set_linewidth(0.5)
weights_lens = np.ones_like(lens_slopes)/len(lens_slopes)
n_lens, bins_lens, patches_lens = plt.hist(lens_slopes,bins=np.arange(-1.2,2.1,0.1)-0.05,
density=False,alpha=0.7,
label=r'\textbf{ALL}',
weights=weights_lens,zorder=5)
for i in range(len(patches_lens)):
patches_lens[i].set_facecolor('crimson')
patches_lens[i].set_edgecolor('k')
patches_lens[i].set_linewidth(0.5)
# leg = plt.legend(shadow=False,fontsize=7,loc='upper center',
# bbox_to_anchor=(0.11,1),fancybox=True,ncol=1,frameon=False,
# handlelength=3,handletextpad=1)
plt.ylabel(r'\textbf{PROPORTION}',fontsize=10,color='w')
plt.xlabel(r'\textbf{SLOPE OF OBSERVATIONS}',fontsize=10,color='w')
plt.yticks(np.arange(0,1.1,0.1),map(str,np.round(
|
np.arange(0,1.1,0.1)
|
numpy.arange
|
# ------------------------------------------------------------------------------
# Copyright (c) 2018 <NAME>
# Licensed under the MIT License.
# Written by: <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
import os
import cv2
import yaml
import pickle
import numpy as np
from tqdm import tqdm
from plyfile import PlyData
opj = os.path.join
class SixdToolkit:
"""Sixd toolkit, load datasets and some normal operations
Attrs
- root: (str) Path to root. e.g. '/home/penggao/data/sixd/hinterstoisser'
- num_kp: (int) Number of keypoints. e.g. 17
- type_kp: (str) Type of keypoints. e.g. 'cluster'
- unit: (float) Unit scale to meters. e.g. '1e-3' means unit is mm
- pklpath: (str) Path to .pkl file
- cam: (np.array) [3 x 3] camera matrix
- models: (dict) Named with sequence number (e.g. '01').
Each item is a [N x 3] np.array for corresponding model vertices
- kps: (dict) Same format as 'models', represents corresponding keypoints
- frames: (dict) Named with sequence number (e.g. '01')
Each item is a list of image frames, with file paths and annotations
"""
def __init__(self, dataset, num_kp, type_kp, unit, is_train, resume=True):
# Prepare
self.root = opj('/home/yusheng/linemode')
self.num_kp = num_kp
self.type_kp = type_kp
self.unit = unit
self.is_train = is_train
# self.pklpath = opj(self.root, 'libs/benchmark.%s.pkl' %
# ('train' if is_train else 'test'))
self.pklpath = opj(self.root, 'libs/benchmark.%s-%d-%s.pkl' %
('train' if is_train else 'test', self.num_kp, self.type_kp))
self.seq_num = 15 # FIXME: constant
self.cam = np.zeros((3, 3))
self.models = dict()
self.models_info = dict()
self.kps = dict()
self.frames = dict()
# Try to load from disk
if resume == True:
try:
self._load_from_disk()
print("[LOG] Load SIXD from pkl file success")
return
except Exception as e:
print("[ERROR]", str(e))
print("[ERROR] Load from pkl file failed. Load all anew")
else:
print("[LOG] Load SXID all anew")
# Load camera matrix
print("[LOG] Load camera matrix")
with open(os.path.join(self.root, 'camera.yml')) as f:
content = yaml.load(f)
self.cam = np.array([[content['fx'], 0, content['cx']],
[0, content['fy'], content['cy']],
[0, 0, 1]])
# Load models and keypoints
print("[LOG] Load models and keypoints")
MODEL_ROOT = os.path.join(self.root, 'models')
KP_ROOT = os.path.join(
self.root, 'kps', str(self.num_kp), self.type_kp)
with open(os.path.join(MODEL_ROOT, 'models_info.yml')) as f:
content = yaml.load(f)
for key, val in tqdm(content.items()):
name = '%02d' % int(key) # e.g. '01'
self.models_info[name] = val
ply_path = os.path.join(MODEL_ROOT, 'obj_%s.ply' % name)
data = PlyData.read(ply_path)
self.models[name] = np.stack((np.array(data['vertex']['x']),
np.array(data['vertex']['y']),
np.array(data['vertex']['z'])), axis=1)
kp_path = os.path.join(KP_ROOT, 'obj_%s.ply' % name)
data = PlyData.read(kp_path)
self.kps[name] = np.stack((np.array(data['vertex']['x']),
np.array(data['vertex']['y']),
np.array(data['vertex']['z'])), axis=1)
# Load annotations
print("[LOG] Load annotations")
for seq in tqdm(['%02d' % i for i in range(1, self.seq_num+1)]):
frames = list()
seq_root = opj(
self.root, 'train' if self.is_train else 'test', str(seq))
imgdir = opj(seq_root, 'rgb')
with open(opj(seq_root, 'gt.yml')) as f:
content = yaml.load(f)
for key, val in content.items():
frame = dict()
frame['path'] = opj(imgdir, '%04d.png' % int(key))
frame['annots'] = list()
for v in val:
annot = dict()
rot = np.array(v['cam_R_m2c']).reshape(3, 3)
tran = np.array(v['cam_t_m2c']).reshape(3, 1)
annot['pose'] =
|
np.concatenate((rot, tran), axis=1)
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 5 14:33:16 2021
@author: r.dewinter
"""
from testFunctions.BNH import BNH
from testFunctions.CTP1 import CTP1
from testFunctions.OSY import OSY
from testFunctions.CEXP import CEXP
from testFunctions.C3DTLZ4 import C3DTLZ4
from testFunctions.TNK import TNK
from testFunctions.SRN import SRN
from testFunctions.TBTD import TBTD
from testFunctions.SRD import SRD
from testFunctions.WB import WB
from testFunctions.DBD import DBD
from testFunctions.NBP import NBP
from testFunctions.SPD import SPD
from testFunctions.CSI import CSI
from testFunctions.WP import WP
from testFunctions.BICOP1 import BICOP1
from testFunctions.BICOP2 import BICOP2
from testFunctions.TRICOP import TRICOP
import numpy as np
from platypus import NSGAIII
from platypus import Problem
from platypus import Real
from platypus import nondominated
from hypervolume import hypervolume
import ast
import random
random.seed(0)
for d in range(1,10):
for g in range(1,10):
hyp = []
nfes = []
for i in range(10):
problem = Problem(2,2,2)
problem.types[:] = [Real(0,5),Real(0,3)]
problem.constraints[:] = "<=0"
problem.function = BNH
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'BNH'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([140,50])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 5005:
print('BNH',np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('BNH',np.mean(hyp), '(', np.std(hyp),')')
#print('BNH',np.max(hyp))
#print('BNH',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(12,18,1):
for g in range(10,15,1):
nfes = []
hyp = []
for i in range(10):
problem = Problem(2,2,2)
problem.types[:] = [Real(0.1,1),Real(0,5)]
problem.constraints[:] = "<=0"
problem.function = CEXP
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'CEXP'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([1,9])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 3.61811363037:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('CEXP',np.mean(hyp), '(', np.std(hyp),')')
#print('CEXP',np.max(hyp))
#print('CEXP',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(5,15,1):
for g in range(5,15,1):
nfes = []
hyp = []
for i in range(10):
problem = Problem(2,2,2)
problem.types[:] = [Real(-20,20),Real(-20,20)]
problem.constraints[:] = "<=0"
problem.function = SRN
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'SRN'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([301,72])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 59441.2892054:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('SRN',np.mean(hyp), '(', np.std(hyp),')')
#print('SRN',np.max(hyp))
#print('SRN',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(14,18,1):
for g in range(17,23,1):
nfes = []
hyp = []
for i in range(10):
problem = Problem(2,2,2)
problem.types[:] = [Real(1e-5,np.pi),Real(1e-5,np.pi)]
problem.constraints[:] = "<=0"
problem.function = TNK
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'TNK'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([3,3])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 7.65680404482:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('TNK',np.mean(hyp), '(', np.std(hyp),')')
#print('TNK',np.max(hyp))
#print('TNK',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(7,12,1):
for g in range(7,12,1):
nfes = []
hyp = []
for i in range(10):
problem = Problem(2,2,2)
problem.types[:] = [Real(0,1),Real(0,1)]
problem.constraints[:] = "<=0"
problem.function = CTP1
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'CTP1'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([1,2])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 1.23982829506:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('CTP1',np.mean(hyp), '(', np.std(hyp),')')
#print('CTP1',np.max(hyp))
#print('CTP1',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(80,100,2):
for g in range(40,55,2):
nfes = []
hyp = []
for i in range(10):
problem = Problem(6,2,2)
problem.types[:] = [Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1)]
problem.constraints[:] = "<=0"
problem.function = C3DTLZ4
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'C3DTLZ4'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([3,3])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 6.44303205286:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('C3DTLZ4',np.mean(hyp), '(', np.std(hyp),')')
#print('C3DTLZ4',np.max(hyp))
#print('C3DTLZ4',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(65,72,1):
for g in range(25,35,1):
nfes = []
hyp = []
for i in range(10):
problem = Problem(6,2,6)
problem.types[:] = [Real(0,10),Real(0,10),Real(1,5),Real(0,6),Real(1,5),Real(0,10)]
problem.constraints[:] = "<=0"
problem.function = OSY
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'OSY'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([0,386])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 95592.8275427:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('OSY',np.mean(hyp), '(', np.std(hyp),')')
#print('OSY',np.max(hyp))
#print('OSY',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(5,15,1):
for g in range(5,15,1):
hyp = []
nfes = []
for i in range(10):
problem = Problem(3,2,3)
problem.types[:] = [Real(1,3),Real(0.0005,0.05),Real(0.0005,0.05)]
problem.constraints[:] = "<=0"
problem.function = TBTD
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'TBTD'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([0.1,50000])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 3925:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('TBTD',np.mean(hyp), '(', np.std(hyp),')')
#print('TBTD',np.max(hyp))
#print('TBTD',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(7,14,1):
for g in range(7,14,1):
hyp = []
nfes = []
for i in range(10):
problem = Problem(2,2,5)
problem.types[:] = [Real(20,250),Real(10,50)]
problem.constraints[:] = "<=0"
problem.function = NBP
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'NBP'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([11150, 12500])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 102407195:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('NBP',np.mean(hyp), '(', np.std(hyp),')')
#print('NBP',np.max(hyp))
#print('NBP',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(3,10,1):
for g in range(3,10,1):
hyp = []
nfes = []
for i in range(10):
problem = Problem(4,2,5)
problem.types[:] = [Real(55,80),Real(75,110),Real(1000,3000),Real(2,20)]
problem.constraints[:] = "<=0"
problem.function = DBD
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'DBD'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([5,50])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 217.30940:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('DBD',np.mean(hyp), '(', np.std(hyp),')')
#print('DBD',np.max(hyp))
#print('DBD',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(250,300,10):
for g in range(30,40,5):
hyp = []
nfes = []
for i in range(10):
problem = Problem(6,3,9)
problem.types[:] = [Real(150,274.32),Real(25,32.31),Real(12,22),Real(8,11.71),Real(14,18),Real(0.63,0.75)]
problem.constraints[:] = "<=0"
problem.function = SPD
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'SPD'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([16,19000,-260000])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 36886805013.7:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('SPD',np.mean(hyp), '(', np.std(hyp),')')
#print('SPD',np.max(hyp))
#print('SPD',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(15,20,1):
for g in range(150,151,5):
hyp = []
nfes = []
for i in range(10):
problem = Problem(7,3,10)
problem.types[:] = [Real(0.5,1.5),Real(0.45,1.35),Real(0.5,1.5),Real(0.5,1.5),Real(0.875,2.625),Real(0.4,1.2),Real(0.4,1.2)]
problem.constraints[:] = "<=0"
problem.function = CSI
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'CSI'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([42,4.5,13])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 25.7171858898:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('CSI',np.mean(hyp), '(', np.std(hyp),')')
#print('CSI',np.max(hyp))
#print('CSI',np.std(hyp))
#print(funcname,algorithm.nfe)
random.seed(0)
for d in range(10,15,1):
for g in range(16,22,1):
hyp = []
nfes = []
for i in range(10):
problem = Problem(7,2,11)
problem.types[:] = [Real(2.6,3.6),Real(0.7,0.8),Real(17,28),Real(7.3,8.3),Real(7.3,8.3),Real(2.9,3.9),Real(5,5.5)]
problem.constraints[:] = "<=0"
problem.function = SRD
algorithm = NSGAIII(problem,d*problem.nvars)
algorithm.run(d*g*problem.nvars)
funcname = 'SRD'
# if not os.path.exists(funcname):
# os.makedirs(funcname)
nondominated_solutions = nondominated(algorithm.result)
ref = np.array([7000,1700])
obj = []
for s in nondominated_solutions:
lijst = str(s.objectives)
obj.append(ast.literal_eval(lijst))
obj = np.array(obj)
# np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',')
# print(hypervolume(obj,ref))
hyp.append(hypervolume(obj,ref))
nfes.append(algorithm.nfe)
print(np.mean(hyp))
if np.mean(hyp) > 3997308.6674:
print(funcname,np.mean(hyp), '(', np.std(hyp),')',g,d, np.mean(nfes))
print('SRD',
|
np.mean(hyp)
|
numpy.mean
|
import numpy as np
from pkg_resources import resource_filename, Requirement
import pickle as pickle
import math
class ColorNaming:
w2c = None
def __init__(self):
pass
@staticmethod
def __load_w2c_pkl():
with open(resource_filename(__name__, "data/w2c.pkl")) as f:
return pickle.load(f)
@staticmethod
def im2colors(im, out_type='color_names'):
"""
out_type:
'color_names': returns np.array((im.shape[0], im.shape[1]), dtype=np.uint8) with ids of one of 11 colors
'probability_vector': returns np.array((im.shape[0], im.shape[1], 11), stype=np.float) with probability
of each color
NOTE: first call might take a while as the lookup table is being loaded...
:param im:
:param w2c:
:param out_type:
:return:
"""
# black, blue, brown, gray,
# green, orange, pink, purple
# red, white, yellow
# color_values = {[0 0 0], [0 0 1], [.5 .4 .25], [.5 .5 .5],
# [0 1 0], [1 .8 0], [1 .5 1], [1 0 1],
# [1 0 0], [1 1 1], [1 1 0]};
if ColorNaming.w2c is None:
ColorNaming.w2c = ColorNaming.__load_w2c_pkl()
im = np.asarray(im, dtype=np.float)
h, w = im.shape[0], im.shape[1]
RR = im[:,:, 0].ravel()
GG = im[:,:, 1].ravel()
BB = im[:,:, 2].ravel()
index_im = np.asarray(np.floor(RR / 8)+32 * np.floor(GG / 8)+32 * 32 * np.floor(BB / 8), dtype=np.uint)
if out_type == 'colored_image':
pass
elif out_type == 'probability_vector':
out = ColorNaming.w2c[index_im].reshape((h, w, 11))
else:
w2cM = np.argmax(ColorNaming.w2c, axis=1)
out = np.asarray(w2cM[index_im], dtype=np.uint8)
out.shape = (h, w)
return out
def __mat2pkl(path, name):
from scipy.io import loadmat
import pickle as pickle
w2c = loadmat(path+'/'+name+'.mat')['w2c']
with open(path+'/'+name+'.pkl', 'w') as f:
pickle.dump(w2c, f)
def im2colors(im, out_type='color_names'):
return ColorNaming.im2colors(im, out_type)
if __name__ == '__main__':
import pickle as pickle
from scipy.misc import imread
# __mat2pkl('data', 'w2c')
im = imread('data/car.jpg')
# load lookup table
with open('data/w2c.pkl') as f:
w2c = pickle.load(f)
import numpy as np
import time
import matplotlib.pyplot as plt
color_values = [[0, 0, 0], [0, 0, 1], [.5, .4, .25], [.5, .5, .5], [0, 1, 0], [1, .8, 0], [1, .5, 1], [1, 0, 1],
[1, 0, 0], [1, 1, 1], [1, 1, 0]]
edge_len = math.ceil((255**2 + 128**2)**0.5)
edge_len_i = int(edge_len)
im = np.zeros((edge_len_i, edge_len_i, 3), dtype=np.uint8)
w2cM = np.argmax(w2c, axis=1)
alpha = math.atan(128.0/255.0)
beta = math.atan(255.0/(255.0*2**0.5))
for bp in range(edge_len_i):
for gp in range(edge_len_i):
b = min(math.cos(alpha) * bp, 255)
g = min(math.cos(alpha) * gp, 255)
i = [edge_len, edge_len]
rp = (np.dot(i, [bp, gp]) / np.linalg.norm(i)**2) * edge_len
r = min(math.cos(beta) * rp, 255)
id_ = int(np.floor(r / 8)+32 * np.floor(g / 8)+32 * 32 * np.floor(b / 8))
im[bp, gp, :] = np.array(color_values[w2cM[id_]]) * 255
plt.figure(1)
plt.imshow(im)
plt.ylabel('B')
plt.xlabel('G')
im = np.zeros((edge_len_i, edge_len_i, 3), dtype=np.uint8)
for rp in range(edge_len_i):
for gp in range(edge_len_i):
r = min((math.cos(alpha) * rp), 255)
g = min((math.cos(alpha) * gp), 255)
i = [edge_len, edge_len]
bp = (np.dot(i, [rp, gp]) / np.linalg.norm(i)**2) * edge_len
b = min(math.cos(beta) * bp, 255)
id_ = int(np.floor(r / 8)+32 * np.floor(g / 8)+32 * 32 * np.floor(b / 8))
im[rp, gp, :] = np.array(color_values[w2cM[id_]]) * 255
plt.figure(2)
plt.imshow(im)
plt.ylabel('R')
plt.xlabel('G')
im = np.zeros((edge_len_i, edge_len_i, 3), dtype=np.uint8)
for bp in range(edge_len_i):
for rp in range(edge_len_i):
b = min((math.cos(alpha) * bp), 255)
r = min((math.cos(alpha) * rp), 255)
i = [edge_len, edge_len]
gp = (np.dot(i, [bp, rp]) / np.linalg.norm(i)**2) * edge_len
g = min(math.cos(beta) * gp, 255)
id_ = int(np.floor(r / 8)+32 * np.floor(g / 8)+32 * 32 * np.floor(b / 8))
im[bp, rp, :] = np.array(color_values[w2cM[id_]]) * 255
plt.figure(3)
plt.imshow(im)
plt.ylabel('B')
plt.xlabel('R')
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(0)
ax = Axes3D(fig)
ax.hold(True)
rs = []
gs = []
bs = []
cs = []
step = 7
for r in range(0, 255, step):
print(r)
for g in range(0, 255, step):
for b in range(0, 255, step):
id_ = int(np.floor(r / 8) + 32 * np.floor(g / 8) + 32 * 32 * np.floor(b / 8))
c =
|
np.array(color_values[w2cM[id_]])
|
numpy.array
|
from doufo import singledispatch
import numpy as np
import tensorflow as tf
__all__ = ['sum_', 'norm', 'is_scalar', 'argmax']
@singledispatch(nargs=2, ndefs=1, nouts=1)
def sum_(t, axis=None):
return t.fmap(lambda _: sum_(_, axis))
@sum_.register(list)
def _(t, axis=None):
return sum(t)
@sum_.register(np.ndarray)
def _(t, axis=None):
return np.sum(t, axis=axis)
@sum_.register(tf.Tensor)
def _(x, axis=None):
return tf.reduce_sum(x, axis=axis)
@singledispatch()
def norm(t, p=2.0):
return
|
np.linalg.norm(t)
|
numpy.linalg.norm
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python module containing detrend methods.
:copyright:
The ObsPy Development Team (<EMAIL>)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import numpy as np
from scipy.interpolate import LSQUnivariateSpline
def simple(data):
"""
Detrend signal simply by subtracting a line through the first and last
point of the trace
:param data: Data to detrend, type numpy.ndarray.
:return: Detrended data. Returns the original array which has been
modified in-place if possible but it might have to return a copy in
case the dtype has to be changed.
"""
# Convert data if it's not a floating point type.
if not
|
np.issubdtype(data.dtype, np.floating)
|
numpy.issubdtype
|
#!/usr/env python
"""SPEC.PY - Spectroscopy tools
"""
from __future__ import print_function
__authors__ = '<NAME> <<EMAIL>>'
__version__ = '20210605' # yyyymmdd
# Imports
import os
import numpy as np
import copy
import time
import warnings
from astropy.io import fits
from astropy.table import Table
from dlnpyutils.minpack import curve_fit
from dlnpyutils import utils as dln, bindata, robust
from numpy.polynomial.polynomial import polyfit as npp_polyfit, polyval as npp_polyval
from scipy import ndimage
from scipy.signal import medfilt, argrelextrema
from scipy.ndimage.filters import median_filter,gaussian_filter1d
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit, least_squares
from scipy.special import erf, wofz
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.legend import Legend
try:
import __builtin__ as builtins # Python 2
except ImportError:
import builtins # Python 3
# Ignore these warnings, it's a bug
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
cspeed = 2.99792458e5 # speed of light in km/s
def gaussbin(x, amp, cen, sig, const=0, dx=1.0):
"""1-D gaussian with pixel binning
This function returns a binned Gaussian
par = [height, center, sigma]
Parameters
----------
x : array
The array of X-values.
amp : float
The Gaussian height/amplitude.
cen : float
The central position of the Gaussian.
sig : float
The Gaussian sigma.
const : float, optional, default=0.0
A constant offset.
dx : float, optional, default=1.0
The width of each "pixel" (scalar).
Returns
-------
geval : array
The binned Gaussian in the pixel
"""
xcen = np.array(x)-cen # relative to the center
x1cen = xcen - 0.5*dx # left side of bin
x2cen = xcen + 0.5*dx # right side of bin
t1cen = x1cen/(np.sqrt(2.0)*sig) # scale to a unitless Gaussian
t2cen = x2cen/(np.sqrt(2.0)*sig)
# For each value we need to calculate two integrals
# one on the left side and one on the right side
# Evaluate each point
# ERF = 2/sqrt(pi) * Integral(t=0-z) exp(-t^2) dt
# negative for negative z
geval_lower = erf(t1cen)
geval_upper = erf(t2cen)
geval = amp*np.sqrt(2.0)*sig * np.sqrt(np.pi)/2.0 * ( geval_upper - geval_lower )
geval += const # add constant offset
return geval
def gaussian(x, amp, cen, sig, const=0):
"""1-D gaussian: gaussian(x, amp, cen, sig)"""
return amp * np.exp(-(x-cen)**2 / (2*sig**2)) + const
def gaussfit(x,y,initpar=None,sigma=None,bounds=(-np.inf,np.inf),binned=False):
"""Fit a Gaussian to data."""
if initpar is None:
initpar = [np.max(y),x[np.argmax(y)],1.0,np.median(y)]
func = gaussian
if binned is True: func=gaussbin
return curve_fit(func, x, y, p0=initpar, sigma=sigma, bounds=bounds)
def wavesol(xpix,wave,order=3,xr=None):
"""
Fit wavelength solution to X and Wavelength arrays.
"""
n = len(xpix)
if n<2:
raise ValueError('Need at least two points.')
if n<order+1:
print('Warning: order='+str(order)+' but only '+str(n)+' points. Reducing to order='+str(n-1))
order = n-1
# Robust polynomial fit
coef = robust.polyfit(xpix,wave,order)
# Generate output array of wavelength values
if xr is None:
xx = np.arange(np.floor(np.min(xpix)),np.ceil(np.max(xpix)))
else:
xx = np.arange(xr[0],xr[1])
ww = npp_polyval(xx,coef)
return coef,ww
def trace(im,yestimate=None,yorder=2,sigorder=4,step=10,spectral_axis=1):
"""
Trace the spectrum. Spectral dimension is assumed to be on the horizontal axis.
Parameters
----------
im : numpy array
The input 2D image.
yestimate : float, optional
The initial estimate of the central Y (spatial dimension) position of the trace. Default
is to calculate using a median cut.
yorder : int, optional
Polynomial order to use for fitting the trace position as a function of column. Default is 2.
sigorder : int, optional
Polynomial order to use for fitting the Gaussian sigma as a function of column. Default is 4.
step : int, optional
Stepsize to take in the spectral dimension when tracing the spectrum. Default is 10 pixels.
spectral_axis : int, optional
The spectral axis. Default is 1.
Returns
-------
tcat : table
Table of Gaussian fits when stepping along in columns and tracing the spectrum.
ypars : numpy array
Polynomial coefficients of the trace.
sigpars : numpy array
Polynomial coefficients of the Gaussian sigma.
mcat : table
Table of model x, y and sigma values along the spectrum.
Example
-------
tcat,ypars,sigpars,mcat = trace(im)
"""
spec = np.copy(im) # internal copy
if spectral_axis==0: # transpose
spec = spec.T
ny,nx = spec.shape
y = np.arange(ny)
if yestimate is None:
ytot = np.sum(im,axis=1)
yestimate = np.argmax(ytot)
# Smooth in spectral dimension
# a uniform (boxcar) filter with a width of 50
smim = ndimage.uniform_filter1d(im, 50, 1)
nstep = nx//step
# Loop over the columns in steps and fit Gaussians
tcat = np.zeros(nstep,dtype=np.dtype([('x',float),('amp',float),('y',float),('sigma',float),
('pars',float,4),('perror',float,4)]))
for i in range(nstep):
pars,cov = dln.gaussfit(y[yestimate-10:yestimate+10],im[yestimate-10:yestimate+10,step*i+step//2])
perror = np.sqrt(np.diag(cov))
tcat['x'][i] = step*i+step//2
tcat['amp'][i] = pars[0]
tcat['y'][i] = pars[1]
tcat['sigma'][i] = pars[2]
tcat['pars'][i] = pars
tcat['perror'][i] = perror
# Fit polynomail to y vs. x and gaussian sigma vs. x
ypars = np.polyfit(tcat['x'],tcat['pars'][:,1],yorder)
sigpars = np.polyfit(tcat['x'],tcat['pars'][:,2],sigorder)
# Model
mcat = np.zeros(nx,dtype=np.dtype([('x',float),('y',float),('sigma',float)]))
xx = np.arange(nx)
mcat['x'] = xx
mcat['y'] = np.poly1d(ypars)(xx)
mcat['sigma'] = np.poly1d(sigpars)(xx)
return tcat, ypars, sigpars,mcat
def boxsum(im,ylo,yhi):
""" Helper function for boxcar extraction."""
y0 = np.min(ylo).astype(int)
y1 = np.max(yhi).astype(int)
# Sum up the flux
subim = im[y0:y1,:]
ny,nx = subim.shape
xx,yy = np.meshgrid(np.arange(nx),np.arange(ny))
mask = (yy>=(ylo-y0)) & (yy<=(yhi-y0))
flux = np.sum(mask*subim,axis=0)
return flux
def boxcar(im,ytrace=None,width=20,backlo=None,backhi=None):
"""
Boxcar extract the spectrum
Parameters
----------
im : numpy array
Image from which to extract the spectrum.
ytrace : numpy array, optional
The y (spatial) position of the trace as a function of column.
width : int, optional
The half-width of the spectrum in the spatial dimension to extract.
backlo : tuple, optional
The lower and upper offsets (relative to the trace) for the lower
background region (e.g., (-50,-40)). Default is None.
backhi : tuple, optional
The lower and upper offsets (relative to the trace) for the upper.
background region (e.g., (40,50)). Default is None.
Returns
-------
out : table
Output table with flux and background values.
Example
-------
out = boxcar(im)
"""
ny,nx = im.shape
# Get median trace position
if ytrace is None:
ytot = np.sum(im,axis=1)
yest = np.argmax(ytot)
ytrace = np.zeros(nx,float)+yest
# Start output
dt = np.dtype([('x',float),('ytrace',float),('sumflux',float),('background',float),
('backlo',float),('backhi',float),('flux',float)])
out = np.zeros(nx,dtype=dt)
out['x'] = np.arange(nx)
out['ytrace'] = ytrace
# Sum up the flux
ylo = np.maximum(ytrace-width,0).astype(int)
yhi = np.minimum(ytrace+width,ny).astype(int)
flux = boxsum(im,ylo,yhi)
out['sumflux'] = flux
# Background
bflux = None
if backlo is not None:
bloylo = np.maximum(ytrace+backlo[0],0).astype(int)
bloyhi =
|
np.maximum(ytrace+backlo[1],0)
|
numpy.maximum
|
from linlearn import Regressor
import numpy as np
import logging
import pickle
from datetime import datetime
from scipy.optimize import minimize
import sys
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from collections import namedtuple
import os
from noise_generators import (
gaussian,
frechet,
loglogistic,
lognormal,
weibull,
student,
pareto,
)
import argparse
def ensure_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
ensure_directory("exp_archives/")
experiment_logfile = "exp_archives/linreg_exp.log"
experiment_name = "linreg"
file_handler = logging.FileHandler(filename=experiment_logfile)
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
handlers=handlers,
)
parser = argparse.ArgumentParser()
parser.add_argument("--n_samples", type=int, default=500)
parser.add_argument("--n_features", type=int, default=5)
parser.add_argument("--random_seed", type=int, default=42)
parser.add_argument("--n_repeats", type=int, default=5)
parser.add_argument("--outlier_types", nargs="+", type=int, default=[])
parser.add_argument("--max_iter", type=int, default=100)
parser.add_argument("--step_size", type=float, default=0.1)
parser.add_argument("--confidence", type=float, default=0.01)
parser.add_argument("--corruption_rate", type=float, default=0.0)
parser.add_argument(
"--noise_dist",
type=str,
default="gaussian",
choices=["gaussian", "student", "weibull", "loglogistic", "lognormal", "pareto"],
)
parser.add_argument("--X_centered", dest="X_centered", action="store_true")
parser.add_argument("--X_not_centered", dest="X_centered", action="store_false")
parser.set_defaults(X_centered=True)
parser.add_argument("--save_results", dest="save_results", action="store_true")
parser.set_defaults(save_results=False)
args = parser.parse_args()
logging.info(48 * "=")
logging.info("Running new experiment session")
logging.info(48 * "=")
n_repeats = args.n_repeats
n_samples = args.n_samples
n_features = args.n_features
save_results = args.save_results
corruption_rate = args.corruption_rate
if not save_results:
logging.info("WARNING : results will NOT be saved at the end of this session")
fit_intercept = False
confidence = args.confidence
random_seed = args.random_seed
percentage = np.log(4 / confidence) / n_samples + 2 * corruption_rate
block_size = 1 / (18 * np.log(1 / confidence))
llm_block_size = 1 / (4 * np.log(1 / confidence))
if corruption_rate > 0.0:
block_size = min(block_size, 1 / (4 * (corruption_rate * n_samples)))
llm_block_size = min(llm_block_size, 1 / (4 * (corruption_rate * n_samples)))
#print(1 / (4 * (corruption_rate * n_samples)))
logging.info("percentage is %.2f" % percentage)
logging.info("block size is : %.2f" % block_size)
noise_sigma = {
"gaussian": 20,
"lognormal": 1.75,
"pareto": 30,
"student": 20,
"weibull": 20,
"frechet": 10,
"loglogistic": 10,
}
X_centered = args.X_centered
noise_dist = args.noise_dist
step_size = args.step_size
T = args.max_iter
outlier_types = args.outlier_types
Sigma_X = np.diag(np.arange(1, n_features + 1))
mu_X = np.zeros(n_features) if X_centered else np.ones(n_features)
w_star_dist = "uniform"
logging.info("Lauching experiment with parameters : \n %r" % args)
logging.info("mu_X = %r , Sigma_X = %r" % (mu_X, Sigma_X))
logging.info(
"w_star_dist = %s , noise_dist = %s , sigma = %f"
% (w_star_dist, noise_dist, noise_sigma[noise_dist])
)
rng = np.random.RandomState(random_seed) ## Global random generator
def corrupt_data(X, y, types, corruption_rate):
number = int((n_samples * corruption_rate) / len(types))
corrupted_indices = rng.choice(n_samples, size=number, replace=False)
dir = rng.randn(n_features)
dir /= np.sqrt((dir * dir).sum()) # random direction
max_y = np.max(np.abs(y))
for i in corrupted_indices:
type = rng.choice(types)
if type == 1:
X[i, :] =
|
np.max(Sigma_X)
|
numpy.max
|
import numpy as np
def checkerboard(n_samples=100, scale=2.0):
"""Checkerboard problem.
@param n_samples Number of samples to generate.
@param scale Scale of board (how much cells will be on board).
@return Generated X and y.
"""
X = np.random.uniform(low=0.0, high=1.0, size=(n_samples, 2))
y = (np.sin(np.pi * X[:, 0] * scale) > 0) ^ (np.sin(np.pi * X[:, 1] * scale) > 0)
return X, y
def linear_7dim(n_samples=100, noise_scale=0.05):
"""Simple linear 7-dimensional problem:
$10 x_1 - 20 x_2 - 2 x_3 + 3 x_4 + Noise$.
@param n_samples Number of samples to generate.
@param noise_scale Scale of Gaussian noise.
@return Generated X and y.
"""
coef = [10, -20, -2, 3, 0, 0, 0]
coef = np.array(coef)
X = np.random.uniform(low=0.0, high=1.0, size=(n_samples, len(coef)))
y = X.dot(coef)
y += np.random.normal(scale=noise_scale, size=y.shape)
return X, y
def nonlinear_7dim(n_samples=100, noise_scale=0.05):
"""Non-linear 7-dimensional problem,
like simple linear problem, but with quadratic dependence on the last feature:
$10 x_1 - 20 x_2 - 2 x_3 + 3 x_4 + 100 (x_5 - 0.5) ^ 2 + Noise$.
@param n_samples Number of samples to generate.
@param noise_scale Scale of Gaussian noise.
@return Generated X and y.
"""
X, y = linear_7dim(n_samples, noise_scale=noise_scale)
y += 100 * (X[:, -1] - 0.5) ** 2.0
return X, y
def polynomial_interaction(n_samples=100,
n_features=5,
n_components=5,
degree=2,
max_coefficient=100,
seed=None):
"""Random polynomial with feature interaction.
@param n_samples Number of samples to generate.
@param n_features Number of features.
@param n_components Number of components in sum.
@param degree Polynomial degree.
@param max_coefficient Maximum coefficient value.
@param seed Random seed.
@return Generated X and y.
"""
X = np.random.uniform(low=0.0, high=1.0, size=(n_samples, n_features))
y = np.zeros((n_samples,))
rng = np.random.default_rng(seed)
for i in range(n_components):
comp_degree = rng.integers(1, degree)
# coefficient from the normal distribution
tmp = rng.integers(-max_coefficient, max_coefficient)
for j in range(comp_degree):
feature = rng.integers(n_features)
tmp *= X[:, feature]
y += tmp
return X, y
def simple_polynomial(n_samples=100,
noise_scale=0.05):
"""Simple polynomial with four dependent features.
@param n_samples Number of samples to generate.
@param noise_scale Gaussian noise scale.
@return Generated X and y.
"""
n_features = 5
X =
|
np.random.uniform(low=0.0, high=1.0, size=(n_samples, n_features))
|
numpy.random.uniform
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 16:36:08 2020
@author: enric
"""
import numpy as np
from scipy.integrate import odeint
#import control
from tqdm import tqdm
import matplotlib.animation as animation
from matplotlib.patches import Rectangle
import matplotlib as mpl
import matplotlib.pyplot as plt
default_backend = plt.get_backend()
#%%
class InvertedPendulum():
# state = x, dot_x, theta, dot_theta
############################################################
def __init__(self, Q =
|
np.diag([1,1,1,1])
|
numpy.diag
|
# Licensed under an MIT style license -- see LICENSE.md
error_msg = (
"Unable to install '{}'. You will not be able to use some of the inbuilt "
"functions."
)
import copy
import numpy as np
from pathlib import Path
from pesummary import conf
from pesummary.utils.decorators import set_docstring
from pesummary.utils.exceptions import EvolveSpinError
from pesummary.utils.utils import logger
try:
import lalsimulation
except ImportError:
logger.warning(error_msg.format("lalsimulation"))
try:
import astropy
except ImportError:
logger.warning(error_msg.format("astropy"))
from .angles import *
from .cosmology import *
from .cosmology import _source_from_detector
from .mass import *
from .remnant import *
from .remnant import _final_from_initial_BBH
from .snr import *
from .snr import _ifo_snr
from .spins import *
from .tidal import *
from .tidal import _check_NSBH_approximant
from .time import *
__author__ = ["<NAME> <<EMAIL>>"]
_conversion_doc = """
Class to calculate all possible derived quantities
Parameters
----------
data: dict, list
either a dictionary or samples or a list of parameters and a list of
samples. See the examples below for details
extra_kwargs: dict, optional
dictionary of kwargs associated with this set of posterior samples.
f_low: float, optional
the low frequency cut-off to use when evolving the spins
f_ref: float, optional
the reference frequency when spins are defined
f_final: float, optional
the final frequency to use when integrating over frequencies
approximant: str, optional
the approximant to use when evolving the spins
evolve_spins_forwards: float/str, optional
the final velocity to evolve the spins up to.
evolve_spins_backwards: str, optional
method to use when evolving the spins backwards to an infinite separation
return_kwargs: Bool, optional
if True, return a modified dictionary of kwargs containing information
about the conversion
NRSur_fits: float/str, optional
the NRSurrogate model to use to calculate the remnant fits. If nothing
passed, the average NR fits are used instead
multipole_snr: Bool, optional
if True, the SNR in the (l, m) = [(2, 1), (3, 3), (4, 4)] multipoles are
calculated from the posterior samples.
samples.
precessing_snr: Bool, optional
if True, the precessing SNR is calculated from the posterior samples.
psd: dict, optional
dictionary containing a psd frequency series for each detector you wish
to include in calculations
waveform_fits: Bool, optional
if True, the approximant is used to calculate the remnant fits. Default
is False which means that the average NR fits are used
multi_process: int, optional
number of cores to use to parallelize the computationally expensive
conversions
redshift_method: str, optional
method you wish to use when calculating the redshift given luminosity
distance samples. If redshift samples already exist, this method is not
used. Default is 'approx' meaning that interpolation is used to calculate
the redshift given N luminosity distance points.
cosmology: str, optional
cosmology you wish to use when calculating the redshift given luminosity
distance samples.
force_non_evolved: Bool, optional
force non evolved remnant quantities to be calculated when evolved quantities
already exist in the input. Default False
force_BBH_remnant_computation: Bool, optional
force BBH remnant quantities to be calculated for systems that include
tidal deformability parameters where BBH fits may not be applicable.
Default False.
force_BH_spin_evolution: Bool, optional
force BH spin evolution methods to be applied for systems that include
tidal deformability parameters where these methods may not be applicable.
Default False.
disable_remnant: Bool, optional
disable all remnant quantities from being calculated. Default False.
add_zero_spin: Bool, optional
if no spins are present in the posterior table, add spins with 0 value.
Default False.
psd_default: str/pycbc.psd obj, optional
Default PSD to use for conversions when no other PSD is provided.
regenerate: list, optional
list of posterior distributions that you wish to regenerate
return_dict: Bool, optional
if True, return a pesummary.utils.utils.SamplesDict object
resume_file: str, optional
path to file to use for checkpointing. If not provided, checkpointing
is not used. Default None
Examples
--------
There are two ways of passing arguments to this conversion class, either
a dictionary of samples or a list of parameters and a list of samples. See
the examples below:
>>> samples = {"mass_1": 10, "mass_2": 5}
>>> converted_samples = %(function)s(samples)
>>> parameters = ["mass_1", "mass_2"]
>>> samples = [10, 5]
>>> converted_samples = %(function)s(parameters, samples)
>>> samples = {"mass_1": [10, 20], "mass_2": [5, 8]}
>>> converted_samples = %(function)s(samples)
>>> parameters = ["mass_1", "mass_2"]
>>> samples = [[10, 5], [20, 8]]
"""
@set_docstring(_conversion_doc % {"function": "convert"})
def convert(*args, restart_from_checkpoint=False, resume_file=None, **kwargs):
import os
if resume_file is not None:
if os.path.isfile(resume_file) and restart_from_checkpoint:
return _Conversion.load_current_state(resume_file)
logger.info(
"Unable to find resume file for conversion. Not restarting from "
"checkpoint"
)
return _Conversion(*args, resume_file=resume_file, **kwargs)
class _PickledConversion(object):
pass
@set_docstring(_conversion_doc % {"function": "_Conversion"})
class _Conversion(object):
@classmethod
def load_current_state(cls, resume_file):
"""Load current state from a resume file
Parameters
----------
resume_file: str
path to a resume file to restart conversion
"""
from pesummary.io import read
logger.info(
"Reading checkpoint file: {}".format(resume_file)
)
state = read(resume_file, checkpoint=True)
return cls(
state.parameters, state.samples, extra_kwargs=state.extra_kwargs,
evolve_spins_forwards=state.evolve_spins_forwards,
evolve_spins_backwards=state.evolve_spins_backwards,
NRSur_fits=state.NRSurrogate,
waveform_fits=state.waveform_fit, multi_process=state.multi_process,
redshift_method=state.redshift_method, cosmology=state.cosmology,
force_non_evolved=state.force_non_evolved,
force_BBH_remnant_computation=state.force_remnant,
disable_remnant=state.disable_remnant,
add_zero_spin=state.add_zero_spin, regenerate=state.regenerate,
return_kwargs=state.return_kwargs, return_dict=state.return_dict,
resume_file=state.resume_file
)
def write_current_state(self):
"""Write the current state of the conversion class to file
"""
from pesummary.io import write
state = _PickledConversion()
for key, value in vars(self).items():
setattr(state, key, value)
_path = Path(self.resume_file)
write(
state, outdir=_path.parent, file_format="pickle",
filename=_path.name, overwrite=True
)
logger.debug(
"Written checkpoint file: {}".format(self.resume_file)
)
def __new__(cls, *args, **kwargs):
from pesummary.utils.samples_dict import SamplesDict
from pesummary.utils.parameters import Parameters
obj = super(_Conversion, cls).__new__(cls)
base_replace = (
"'{}': {} already found in the result file. Overwriting with "
"the passed {}"
)
if len(args) > 2:
raise ValueError(
"The _Conversion module only takes as arguments a dictionary "
"of samples or a list of parameters and a list of samples"
)
elif isinstance(args[0], dict):
parameters = Parameters(args[0].keys())
samples = np.atleast_2d(
np.array([args[0][i] for i in parameters]).T
).tolist()
else:
if not isinstance(args[0], Parameters):
parameters = Parameters(args[0])
else:
parameters = args[0]
samples = args[1]
samples = np.atleast_2d(samples).tolist()
extra_kwargs = kwargs.get("extra_kwargs", {"sampler": {}, "meta_data": {}})
f_low = kwargs.get("f_low", None)
f_ref = kwargs.get("f_ref", None)
f_final = kwargs.get("f_final", None)
delta_f = kwargs.get("delta_f", None)
for param, value in {"f_final": f_final, "delta_f": delta_f}.items():
if value is not None and param in extra_kwargs["meta_data"].keys():
logger.warning(
base_replace.format(
param, extra_kwargs["meta_data"][param], value
)
)
extra_kwargs["meta_data"][param] = value
elif value is not None:
extra_kwargs["meta_data"][param] = value
else:
logger.warning(
"Could not find {} in input file and one was not passed "
"from the command line. Using {}Hz as default".format(
param, getattr(conf, "default_{}".format(param))
)
)
extra_kwargs["meta_data"][param] = getattr(
conf, "default_{}".format(param)
)
approximant = kwargs.get("approximant", None)
NRSurrogate = kwargs.get("NRSur_fits", False)
redshift_method = kwargs.get("redshift_method", "approx")
cosmology = kwargs.get("cosmology", "Planck15")
force_non_evolved = kwargs.get("force_non_evolved", False)
force_remnant = kwargs.get("force_BBH_remnant_computation", False)
force_evolve = kwargs.get("force_BH_spin_evolution", False)
disable_remnant = kwargs.get("disable_remnant", False)
if redshift_method not in ["approx", "exact"]:
raise ValueError(
"'redshift_method' can either be 'approx' corresponding to "
"an approximant method, or 'exact' corresponding to an exact "
"method of calculating the redshift"
)
if isinstance(NRSurrogate, bool) and NRSurrogate:
raise ValueError(
"'NRSur_fits' must be a string corresponding to the "
"NRSurrogate model you wish to use to calculate the remnant "
"quantities"
)
waveform_fits = kwargs.get("waveform_fits", False)
evolve_spins_forwards = kwargs.get("evolve_spins_forwards", False)
evolve_spins_backwards = kwargs.get("evolve_spins_backwards", False)
if disable_remnant and (
force_non_evolved or force_remnant
or NRSurrogate or waveform_fits or evolve_spins_forwards
):
_disable = []
if force_non_evolved:
_disable.append("force_non_evolved")
force_non_evolved = False
if force_remnant:
_disable.append("force_BBH_remnant_computation")
force_remnant = False
if NRSurrogate:
_disable.append("NRSur_fits")
NRSurrogate = False
if waveform_fits:
_disable.append("waveform_fits")
waveform_fits = False
if evolve_spins_forwards:
_disable.append("evolve_spins_forwards")
evolve_spins_forwards = False
logger.warning(
"Unable to use 'disable_remnant' and {}. Setting "
"{} and disabling all remnant quantities from being "
"calculated".format(
" or ".join(_disable),
" and ".join(["{}=False".format(_p) for _p in _disable])
)
)
if NRSurrogate and waveform_fits:
raise ValueError(
"Unable to use both the NRSurrogate and {} to calculate "
"remnant quantities. Please select only one option".format(
approximant
)
)
if isinstance(evolve_spins_forwards, bool) and evolve_spins_forwards:
raise ValueError(
"'evolve_spins_forwards' must be a float, the final velocity to "
"evolve the spins up to, or a string, 'ISCO', meaning "
"evolve the spins up to the ISCO frequency"
)
if not evolve_spins_forwards and (NRSurrogate or waveform_fits):
if (approximant is not None and "eob" in approximant) or NRSurrogate:
logger.warning(
"Only evolved spin remnant quantities are returned by the "
"{} fits.".format(
"NRSurrogate" if NRSurrogate else approximant
)
)
elif evolve_spins_forwards and (NRSurrogate or waveform_fits):
if (approximant is not None and "eob" in approximant) or NRSurrogate:
logger.warning(
"The {} fits already evolve the spins. Therefore "
"additional spin evolution will not be performed.".format(
"NRSurrogate" if NRSurrogate else approximant
)
)
else:
logger.warning(
"The {} fits are not applied with spin evolution.".format(
approximant
)
)
evolve_spins_forwards = False
multipole_snr = kwargs.get("multipole_snr", False)
precessing_snr = kwargs.get("precessing_snr", False)
if f_low is not None and "f_low" in extra_kwargs["meta_data"].keys():
logger.warning(
base_replace.format(
"f_low", extra_kwargs["meta_data"]["f_low"], f_low
)
)
extra_kwargs["meta_data"]["f_low"] = f_low
elif f_low is not None:
extra_kwargs["meta_data"]["f_low"] = f_low
else:
logger.warning(
"Could not find minimum frequency in input file and "
"one was not passed from the command line. Using {}Hz "
"as default".format(conf.default_flow)
)
extra_kwargs["meta_data"]["f_low"] = conf.default_flow
if approximant is not None and "approximant" in extra_kwargs["meta_data"].keys():
logger.warning(
base_replace.format(
"approximant", extra_kwargs["meta_data"]["approximant"],
approximant
)
)
extra_kwargs["meta_data"]["approximant"] = approximant
elif approximant is not None:
extra_kwargs["meta_data"]["approximant"] = approximant
if f_ref is not None and "f_ref" in extra_kwargs["meta_data"].keys():
logger.warning(
base_replace.format(
"f_ref", extra_kwargs["meta_data"]["f_ref"], f_ref
)
)
extra_kwargs["meta_data"]["f_ref"] = f_ref
elif f_ref is not None:
extra_kwargs["meta_data"]["f_ref"] = f_ref
regenerate = kwargs.get("regenerate", None)
multi_process = kwargs.get("multi_process", None)
if multi_process is not None:
multi_process = int(multi_process)
psd_default = kwargs.get("psd_default", "aLIGOZeroDetHighPower")
psd = kwargs.get("psd", {})
if psd is None:
psd = {}
elif psd is not None and not isinstance(psd, dict):
raise ValueError(
"'psd' must be a dictionary of frequency series for each detector"
)
ifos = list(psd.keys())
pycbc_psd = copy.deepcopy(psd)
if psd != {}:
from pesummary.gw.file.psd import PSD
if isinstance(psd[ifos[0]], PSD):
for ifo in ifos:
try:
pycbc_psd[ifo] = pycbc_psd[ifo].to_pycbc(
extra_kwargs["meta_data"]["f_low"],
f_high=extra_kwargs["meta_data"]["f_final"],
f_high_override=True
)
except (ImportError, IndexError, ValueError):
pass
obj.__init__(
parameters, samples, extra_kwargs, evolve_spins_forwards, NRSurrogate,
waveform_fits, multi_process, regenerate, redshift_method,
cosmology, force_non_evolved, force_remnant,
kwargs.get("add_zero_spin", False), disable_remnant,
kwargs.get("return_kwargs", False), kwargs.get("return_dict", True),
kwargs.get("resume_file", None), multipole_snr, precessing_snr,
pycbc_psd, psd_default, evolve_spins_backwards, force_evolve
)
return_kwargs = kwargs.get("return_kwargs", False)
if kwargs.get("return_dict", True) and return_kwargs:
return [
SamplesDict(obj.parameters, np.array(obj.samples).T),
obj.extra_kwargs
]
elif kwargs.get("return_dict", True):
return SamplesDict(obj.parameters, np.array(obj.samples).T)
elif return_kwargs:
return obj.parameters, obj.samples, obj.extra_kwargs
else:
return obj.parameters, obj.samples
def __init__(
self, parameters, samples, extra_kwargs, evolve_spins_forwards, NRSurrogate,
waveform_fits, multi_process, regenerate, redshift_method,
cosmology, force_non_evolved, force_remnant, add_zero_spin,
disable_remnant, return_kwargs, return_dict, resume_file, multipole_snr,
precessing_snr, psd, psd_default, evolve_spins_backwards, force_evolve
):
self.parameters = parameters
self.samples = samples
self.extra_kwargs = extra_kwargs
self.evolve_spins_forwards = evolve_spins_forwards
self.evolve_spins_backwards = evolve_spins_backwards
self.NRSurrogate = NRSurrogate
self.waveform_fit = waveform_fits
self.multi_process = multi_process
self.regenerate = regenerate
self.redshift_method = redshift_method
self.cosmology = cosmology
self.force_non_evolved = force_non_evolved
self.force_remnant = force_remnant
self.force_evolve = force_evolve
self.disable_remnant = disable_remnant
self.return_kwargs = return_kwargs
self.return_dict = return_dict
self.resume_file = resume_file
self.multipole_snr = multipole_snr
self.precessing_snr = precessing_snr
self.psd = psd
self.psd_default = psd_default
self.non_precessing = False
cond1 = any(
param in self.parameters for param in
conf.precessing_angles + conf.precessing_spins
)
if not cond1:
self.non_precessing = True
if "chi_p" in self.parameters:
_chi_p = self.specific_parameter_samples(["chi_p"])
if not
|
np.any(_chi_p)
|
numpy.any
|
from __future__ import division
from __future__ import print_function
import math
import numpy
import scipy.special as special
from numpy import sum, cos, sin, sqrt
def A1(kappa):
# XXX R has these exponentially scaled, but this seems to work
result = special.i1(kappa) / special.i0(kappa)
return result
def A1inv(R):
if 0 <= R < 0.53:
return 2 * R + R ** 3 + (5 * R ** 5) / 6
elif R < 0.85:
return -0.4 + 1.39 * R + 0.43 / (1 - R)
else:
return 1 / (R ** 3 - 4 * R ** 2 + 3 * R)
def mle_vonmises(theta):
results = {}
n = len(theta)
C = sum(cos(theta))
S = sum(sin(theta))
R = sqrt(C ** 2 + S ** 2)
mean_direction = math.atan2(S, C)
results["mu"] = mean_direction
mean_R = R / n
kappa = A1inv(mean_R)
results["kappa"] = kappa
if 0:
z = F(theta - mu_hat)
z.sort()
z_bar = sum(z) / n
if 0:
tmp = 0
for i in range(n):
tmp += (z[i] - 2 * i / (2 * n)) ** 2
U2 = tmp - n * (z_bar - 0.5) ** 2 + 1 / (12 * n)
else:
U2 = (
sum((z - 2 * numpy.arange(n) / (2 * n)) ** 2)
- n * (z_bar - 0.5) ** 2
+ 1 / (12 * n)
)
results["U2"] = U2
return results
def lm_circular_cl(y, x, init, verbose=False, tol=1e-10):
"""circular-linear regression
y in radians
x is linear
"""
y = numpy.mod(y, 2 * numpy.pi)
betaPrev = init
n = len(y)
S = numpy.sum(numpy.sin(y - 2 * numpy.arctan(x * betaPrev))) / n
C = numpy.sum(numpy.cos(y - 2 * numpy.arctan(x * betaPrev))) / n
R = numpy.sqrt(S ** 2 + C ** 2)
mu = numpy.arctan2(S, C)
k = A1inv(R)
diff = tol + 1
iter = 0
while diff > tol:
iter += 1
u = k * numpy.sin(y - mu - 2 * numpy.arctan(x * betaPrev))
A = k * A1(k) * numpy.eye(n)
g_p = 2 / (1 + betaPrev * x) ** 2 *
|
numpy.eye(n)
|
numpy.eye
|
import gc
import sys
import json
import shlex
import argparse
import pprint
import pathlib
import logging
import random
import warnings
from dataclasses import dataclass
from typing import Optional
import torch
import numpy as np
import transformers
from utils import set_seed, generate_seeds
from models import AbstractClassifier, TransformersClassifier, TMixClassifier
from dataset import DataSplits, SequenceDataset, AugmentedDataset, load_data, \
balanced_samples
from augment import GPT3MixAugmenter
from openai_utils import resolve_api_key
@dataclass
class Runner:
model: AbstractClassifier
master_seed: Optional[int] = None
def run_single(self, data_splits: DataSplits):
gc.collect()
torch.cuda.empty_cache()
self.model.reset()
logging.info("running single trial...")
summary = self.model.fit(data_splits.train, data_splits.valid)
if summary:
logging.info(f"training summary: {json.dumps(summary, indent=2)}")
pred = self.model.predict([ex.text for ex in data_splits.test])
acc = np.mean([p == ex.label for p, ex in zip(pred, data_splits.test)])
logging.info(f"final test acc: {acc}")
return acc
def run_multiple(self, data_splits: DataSplits, n, prepare_fn=None):
seed_generator = generate_seeds(self.master_seed, "running experiments")
seeds = list(seed for _, seed in zip(range(n), seed_generator))
logging.info(f"generated seeds: {seeds}")
accs = []
for run_idx, seed in enumerate(seeds):
if prepare_fn is None:
prepared_data_splits = data_splits
else:
prepared_data_splits = prepare_fn(run_idx, data_splits)
set_seed(seed, "running a single trial")
results = self.run_single(prepared_data_splits)
accs.append(results)
return accs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--datasets", nargs="+", required=True,
choices=["gpt3mix/rt20", "gpt3mix/sst2"],
default=[])
group = parser.add_argument_group("Experiment Options")
group.add_argument("--num-trials", type=int, default=1)
group.add_argument("--master-exp-seed", type=int, default=42)
group.add_argument("--master-data-seed", type=int, default=42)
group.add_argument("--data-seeds", type=int, default=[],
action="append",
help="Set data seeds for sub-sampling. "
"If --num-trials > the number of data seeds, then "
"the data seeds will be used in "
"the round-robin fashion.")
group.add_argument("--progress", default=False, action="store_true")
group.add_argument("--save-dir", default="/tmp/out")
group = parser.add_argument_group("Data Options")
group.add_argument("--train-subsample",
help="Subsample is given as "
"number of samples per class (e.g. 3s) or "
"as a class-balanced ratio (e.g. 0.1f).")
group.add_argument("--valid-subsample",
help="Subsample is given as "
"number of samples per class (e.g. 3s) or "
"as a class-balanced ratio (e.g. 0.1f).")
group.add_argument("--test-subsample",
help="Subsample is given as "
"number of samples per class (e.g. 3s) or "
"as a class-balanced ratio (e.g. 0.1f).")
group.add_argument("--default-metatype", type=int, default=0,
help="Use default metatypes ('text' and 'label')")
group.add_argument("--text-type-override")
group.add_argument("--label-type-override")
group.add_argument("--label-map-override")
group = parser.add_argument_group("OpenAI GPT-3 Options")
group.add_argument("--api-key",
help="WARN: Save the api-key as 'openai-key' "
"in the working directory instead.")
group.add_argument("--gpt3-engine", default="ada",
choices=("ada", "babbage", "curie", "davinci"))
group.add_argument("--gpt3-batch-size", type=int, default=20)
group.add_argument("--gpt3-num-examples", type=int, default=10)
group.add_argument("--gpt3-frequency-penalty", type=float, default=0.01)
group.add_argument("--gpt3-max-retries", type=int, default=10)
group = parser.add_argument_group("Classifier Options")
group.add_argument("--classifier", default="transformers",
choices=["transformers", "tmix"])
group.add_argument("--model-name", default="distilbert-base-uncased")
group.add_argument("--batch-size", type=int, default=32)
group.add_argument("--patience", type=int, default=10)
group.add_argument("--optimizer", default="AdamW")
group.add_argument("--lr", type=float, default=5e-5)
group = parser.add_argument_group("Augmentation Options")
group.add_argument("--augmenter", default="none",
choices=("none", "gpt3-mix"))
group.add_argument("--reuse", type=int, default=1,
help="Whether to reuse generated synthetic augmentation")
group.add_argument("--multiplier", type=int, default=1,
help="Ratio of real-to-synthetic data.")
group.add_argument("--num-examples", type=int, default=2,
help="Number of examples to use for generating "
"augmentation sample.")
group.add_argument("--num-classes", type=int, default=2,
help="Number of classes to use for generating "
"augmentation sample.")
group.add_argument("--example-sampling", default="uniform",
choices=("uniform", "furthest", "closest",
"class-balanced"),
help="Example sampling strategy.")
group = parser.add_argument_group("GPT3Mix Specific Options")
group.add_argument("--gpt3-mix-max-tokens", type=int, default=100)
group.add_argument("--gpt3-mix-soft", type=int, default=1)
group = parser.add_argument_group("TMix Options")
group.add_argument("--tmix-alpha", type=float, default=0.75)
group.add_argument("--tmix-layers", type=int, nargs="+", default=[7, 9, 12])
return parser.parse_args()
def main():
args = parse_args()
save_dir = pathlib.Path(args.save_dir)
if save_dir.exists():
warnings.warn(f"saving directory {save_dir} already exists. "
f"overwriting...")
save_dir.mkdir(parents=True, exist_ok=True)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(save_dir.joinpath("run.log")),
logging.StreamHandler()
]
)
resolve_api_key(args)
logging.info("Command-line Arguments:")
logging.info(pprint.pformat(vars(args)))
logging.info(f"Raw command-line arguments: "
f"{' '.join(map(shlex.quote, sys.argv))}")
for dataset in args.datasets:
logging.info(f"loading {dataset} dataset...")
data, label_set, metatype = load_data(
name=dataset,
label_map=eval(args.label_map_override or "None")
)
if args.default_metatype:
metatype = {"text_type": "text", "label_type": "label"}
if args.text_type_override:
metatype["text_type"] = args.text_type_override
if args.label_type_override:
metatype["label_type"] = args.label_type_override
label_set = set(label_set)
data_splits = DataSplits(
train=SequenceDataset(data["train"]),
valid=SequenceDataset(data["validation"]),
test=SequenceDataset(data["test"])
)
if not args.data_seeds:
data_seed_generator = generate_seeds(args.master_data_seed, "data")
data_seeds = [seed for _, seed in zip(range(args.num_trials),
data_seed_generator)]
logging.info(f"generated data seeds: {data_seeds}")
else:
data_seeds = list(args.data_seeds)
logging.info(f"sample dataset instance: "
f"{random.choice(data_splits.train)}")
logging.info(f"label set: {label_set}")
def create_augmenter():
if args.augmenter == "gpt3-mix":
augmenter = GPT3MixAugmenter(
api_key=args.api_key,
label_set=label_set,
engine=args.gpt3_engine,
batch_size=args.gpt3_batch_size,
label_type=metatype["label_type"],
text_type=metatype["text_type"],
max_tokens=args.gpt3_mix_max_tokens,
frequency_penalty=args.gpt3_frequency_penalty,
max_retries=args.gpt3_max_retries,
soft_label=bool(args.gpt3_mix_soft),
ignore_error=True
)
augmenter.construct_prompt(
random.sample(data_splits.train, args.num_examples),
demo=True
)
return augmenter
elif args.augmenter == "none":
return
else:
raise NotImplementedError(
f"unsupported augmenter: {args.augmenter}")
def prepare_datasplits(run_idx, data_splits):
def _prepare_train_data(train_data):
if args.augmenter == "none":
return train_data
aug_save_dir = save_dir.joinpath("augmentations")
aug_save_dir.mkdir(parents=True, exist_ok=True)
return AugmentedDataset(
data=list(train_data),
augmenter=create_augmenter(),
multiplier=args.multiplier,
reuse=args.reuse,
save_path=aug_save_dir.joinpath(
f"run-{run_idx:03d}.jsonlines"),
num_examples=args.num_examples,
num_classes=args.num_classes,
sampling_strategy=args.example_sampling
)
data_seed = data_seeds.pop(0)
data_seeds.append(data_seed)
sub_splits = data_splits.to_dict()
for split in ("train", "valid", "test"):
subsample_spec = getattr(args, f"{split}_subsample")
if subsample_spec is None:
continue
set_seed(data_seed, f"subsampling {split}")
if subsample_spec.endswith("f"):
ratio = float(subsample_spec[:-1])
size = max(1, int(round(len(getattr(data_splits, split)) *
ratio / len(label_set))))
elif subsample_spec.endswith("s"):
size = int(subsample_spec[:-1])
else:
raise ValueError(f"unsupported subsample "
f"specification format: {subsample_spec}")
logging.info(f"subsampling {size} instances per class "
f"(total {len(label_set)} classes) "
f"from {split} set...")
subsample, _ = balanced_samples(
data=getattr(data_splits, split),
size=size
)
sub_splits[split] = SequenceDataset(subsample)
sub_splits["train"] = _prepare_train_data(sub_splits["train"])
s = DataSplits(**sub_splits)
return s
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if args.classifier == "transformers":
model = TransformersClassifier(
label_set=label_set,
tok_cls=transformers.AutoTokenizer,
model_cls=transformers.AutoModelForSequenceClassification,
model_name=args.model_name,
batch_size=args.batch_size,
patience=args.patience,
optimizer_cls=lambda x: getattr(torch.optim, args.optimizer)(
params=x, lr=args.lr, eps=1e-8),
device=device
).to(device)
elif args.classifier == "tmix":
model = TMixClassifier(
label_set=label_set,
model_name=args.model_name,
batch_size=args.batch_size,
patience=args.patience,
optimizer_cls=lambda x: getattr(torch.optim, args.optimizer)(
params=x, lr=args.lr, eps=1e-8),
device=device,
alpha=args.tmix_alpha,
mix_layer_set=frozenset(args.tmix_layers)
)
else:
raise ValueError(f"unrecognized classifier type: {args.classifier}")
if isinstance(model, torch.nn.Module):
num_params = sum(np.prod(p.size())
for p in model.parameters() if p.requires_grad)
logging.info(f"number of model params: {num_params:,d}")
runner = Runner(
model=model,
master_seed=args.master_exp_seed
)
accs = runner.run_multiple(data_splits, args.num_trials,
prepare_fn=prepare_datasplits)
accs_str = ", ".join(map("{:.4f}".format, accs))
logging.info(f"all accuracies: {accs_str}")
logging.info(f"mean: {np.mean(accs)}, std: {
|
np.std(accs)
|
numpy.std
|
# Natural Language Toolkit: Hidden Markov Model
#
# Copyright (C) 2001-2015 NLTK Project
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>> (fixes)
# <NAME> <<EMAIL>> (fixes)
# <NAME> <<EMAIL>> (fixes)
# <NAME> <<EMAIL>> (fixes)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Hidden Markov Models (HMMs) largely used to assign the correct label sequence
to sequential data or assess the probability of a given label and data
sequence. These models are finite state machines characterised by a number of
states, transitions between these states, and output symbols emitted while in
each state. The HMM is an extension to the Markov chain, where each state
corresponds deterministically to a given event. In the HMM the observation is
a probabilistic function of the state. HMMs share the Markov chain's
assumption, being that the probability of transition from one state to another
only depends on the current state - i.e. the series of states that led to the
current state are not used. They are also time invariant.
The HMM is a directed graph, with probability weighted edges (representing the
probability of a transition between the source and sink states) where each
vertex emits an output symbol when entered. The symbol (or observation) is
non-deterministically generated. For this reason, knowing that a sequence of
output observations was generated by a given HMM does not mean that the
corresponding sequence of states (and what the current state is) is known.
This is the 'hidden' in the hidden markov model.
Formally, a HMM can be characterised by:
- the output observation alphabet. This is the set of symbols which may be
observed as output of the system.
- the set of states.
- the transition probabilities *a_{ij} = P(s_t = j | s_{t-1} = i)*. These
represent the probability of transition to each state from a given state.
- the output probability matrix *b_i(k) = P(X_t = o_k | s_t = i)*. These
represent the probability of observing each symbol in a given state.
- the initial state distribution. This gives the probability of starting
in each state.
To ground this discussion, take a common NLP application, part-of-speech (POS)
tagging. An HMM is desirable for this task as the highest probability tag
sequence can be calculated for a given sequence of word forms. This differs
from other tagging techniques which often tag each word individually, seeking
to optimise each individual tagging greedily without regard to the optimal
combination of tags for a larger unit, such as a sentence. The HMM does this
with the Viterbi algorithm, which efficiently computes the optimal path
through the graph given the sequence of words forms.
In POS tagging the states usually have a 1:1 correspondence with the tag
alphabet - i.e. each state represents a single tag. The output observation
alphabet is the set of word forms (the lexicon), and the remaining three
parameters are derived by a training regime. With this information the
probability of a given sentence can be easily derived, by simply summing the
probability of each distinct path through the model. Similarly, the highest
probability tagging sequence can be derived with the Viterbi algorithm,
yielding a state sequence which can be mapped into a tag sequence.
This discussion assumes that the HMM has been trained. This is probably the
most difficult task with the model, and requires either MLE estimates of the
parameters or unsupervised learning using the Baum-Welch algorithm, a variant
of EM.
For more information, please consult the source code for this module,
which includes extensive demonstration code.
"""
from __future__ import print_function, unicode_literals, division
import re
import itertools
try:
import numpy as np
except ImportError:
pass
from cnltk.probability import (FreqDist, ConditionalFreqDist,
ConditionalProbDist, DictionaryProbDist,
DictionaryConditionalProbDist,
LidstoneProbDist, MutableProbDist,
MLEProbDist, RandomProbDist)
from cnltk.metrics import accuracy
from cnltk.util import LazyMap, unique_list
from cnltk.compat import python_2_unicode_compatible, izip, imap
from cnltk.tag.api import TaggerI
_TEXT = 0 # index of text in a tuple
_TAG = 1 # index of tag in a tuple
def _identity(labeled_symbols):
return labeled_symbols
@python_2_unicode_compatible
class HiddenMarkovModelTagger(TaggerI):
"""
Hidden Markov model class, a generative model for labelling sequence data.
These models define the joint probability of a sequence of symbols and
their labels (state transitions) as the product of the starting state
probability, the probability of each state transition, and the probability
of each observation being generated from each state. This is described in
more detail in the module documentation.
This implementation is based on the HMM description in Chapter 8, Huang,
Acero and Hon, Spoken Language Processing and includes an extension for
training shallow HMM parsers or specialized HMMs as in Molina et.
al, 2002. A specialized HMM modifies training data by applying a
specialization function to create a new training set that is more
appropriate for sequential tagging with an HMM. A typical use case is
chunking.
:param symbols: the set of output symbols (alphabet)
:type symbols: seq of any
:param states: a set of states representing state space
:type states: seq of any
:param transitions: transition probabilities; Pr(s_i | s_j) is the
probability of transition from state i given the model is in
state_j
:type transitions: ConditionalProbDistI
:param outputs: output probabilities; Pr(o_k | s_i) is the probability
of emitting symbol k when entering state i
:type outputs: ConditionalProbDistI
:param priors: initial state distribution; Pr(s_i) is the probability
of starting in state i
:type priors: ProbDistI
:param transform: an optional function for transforming training
instances, defaults to the identity function.
:type transform: callable
"""
def __init__(self, symbols, states, transitions, outputs, priors,
transform=_identity):
self._symbols = unique_list(symbols)
self._states = unique_list(states)
self._transitions = transitions
self._outputs = outputs
self._priors = priors
self._cache = None
self._transform = transform
@classmethod
def _train(cls, labeled_sequence, test_sequence=None,
unlabeled_sequence=None, transform=_identity,
estimator=None, **kwargs):
if estimator is None:
def estimator(fd, bins):
return LidstoneProbDist(fd, 0.1, bins)
labeled_sequence = LazyMap(transform, labeled_sequence)
symbols = unique_list(word for sent in labeled_sequence
for word, tag in sent)
tag_set = unique_list(tag for sent in labeled_sequence
for word, tag in sent)
trainer = HiddenMarkovModelTrainer(tag_set, symbols)
hmm = trainer.train_supervised(labeled_sequence, estimator=estimator)
hmm = cls(hmm._symbols, hmm._states, hmm._transitions, hmm._outputs,
hmm._priors, transform=transform)
if test_sequence:
hmm.test(test_sequence, verbose=kwargs.get('verbose', False))
if unlabeled_sequence:
max_iterations = kwargs.get('max_iterations', 5)
hmm = trainer.train_unsupervised(unlabeled_sequence, model=hmm,
max_iterations=max_iterations)
if test_sequence:
hmm.test(test_sequence, verbose=kwargs.get('verbose', False))
return hmm
@classmethod
def train(cls, labeled_sequence, test_sequence=None,
unlabeled_sequence=None, **kwargs):
"""
Train a new HiddenMarkovModelTagger using the given labeled and
unlabeled training instances. Testing will be performed if test
instances are provided.
:return: a hidden markov model tagger
:rtype: HiddenMarkovModelTagger
:param labeled_sequence: a sequence of labeled training instances,
i.e. a list of sentences represented as tuples
:type labeled_sequence: list(list)
:param test_sequence: a sequence of labeled test instances
:type test_sequence: list(list)
:param unlabeled_sequence: a sequence of unlabeled training instances,
i.e. a list of sentences represented as words
:type unlabeled_sequence: list(list)
:param transform: an optional function for transforming training
instances, defaults to the identity function, see ``transform()``
:type transform: function
:param estimator: an optional function or class that maps a
condition's frequency distribution to its probability
distribution, defaults to a Lidstone distribution with gamma = 0.1
:type estimator: class or function
:param verbose: boolean flag indicating whether training should be
verbose or include printed output
:type verbose: bool
:param max_iterations: number of Baum-Welch interations to perform
:type max_iterations: int
"""
return cls._train(labeled_sequence, test_sequence,
unlabeled_sequence, **kwargs)
def probability(self, sequence):
"""
Returns the probability of the given symbol sequence. If the sequence
is labelled, then returns the joint probability of the symbol, state
sequence. Otherwise, uses the forward algorithm to find the
probability over all label sequences.
:return: the probability of the sequence
:rtype: float
:param sequence: the sequence of symbols which must contain the TEXT
property, and optionally the TAG property
:type sequence: Token
"""
return 2**(self.log_probability(self._transform(sequence)))
def log_probability(self, sequence):
"""
Returns the log-probability of the given symbol sequence. If the
sequence is labelled, then returns the joint log-probability of the
symbol, state sequence. Otherwise, uses the forward algorithm to find
the log-probability over all label sequences.
:return: the log-probability of the sequence
:rtype: float
:param sequence: the sequence of symbols which must contain the TEXT
property, and optionally the TAG property
:type sequence: Token
"""
sequence = self._transform(sequence)
T = len(sequence)
if T > 0 and sequence[0][_TAG]:
last_state = sequence[0][_TAG]
p = self._priors.logprob(last_state) + \
self._output_logprob(last_state, sequence[0][_TEXT])
for t in range(1, T):
state = sequence[t][_TAG]
p += self._transitions[last_state].logprob(state) + \
self._output_logprob(state, sequence[t][_TEXT])
last_state = state
return p
else:
alpha = self._forward_probability(sequence)
p = logsumexp2(alpha[T-1])
return p
def tag(self, unlabeled_sequence):
"""
Tags the sequence with the highest probability state sequence. This
uses the best_path method to find the Viterbi path.
:return: a labelled sequence of symbols
:rtype: list
:param unlabeled_sequence: the sequence of unlabeled symbols
:type unlabeled_sequence: list
"""
unlabeled_sequence = self._transform(unlabeled_sequence)
return self._tag(unlabeled_sequence)
def _tag(self, unlabeled_sequence):
path = self._best_path(unlabeled_sequence)
return list(izip(unlabeled_sequence, path))
def _output_logprob(self, state, symbol):
"""
:return: the log probability of the symbol being observed in the given
state
:rtype: float
"""
return self._outputs[state].logprob(symbol)
def _create_cache(self):
"""
The cache is a tuple (P, O, X, S) where:
- S maps symbols to integers. I.e., it is the inverse
mapping from self._symbols; for each symbol s in
self._symbols, the following is true::
self._symbols[S[s]] == s
- O is the log output probabilities::
O[i,k] = log( P(token[t]=sym[k]|tag[t]=state[i]) )
- X is the log transition probabilities::
X[i,j] = log( P(tag[t]=state[j]|tag[t-1]=state[i]) )
- P is the log prior probabilities::
P[i] = log( P(tag[0]=state[i]) )
"""
if not self._cache:
N = len(self._states)
M = len(self._symbols)
P = np.zeros(N, np.float32)
X = np.zeros((N, N), np.float32)
O = np.zeros((N, M), np.float32)
for i in range(N):
si = self._states[i]
P[i] = self._priors.logprob(si)
for j in range(N):
X[i, j] = self._transitions[si].logprob(self._states[j])
for k in range(M):
O[i, k] = self._output_logprob(si, self._symbols[k])
S = {}
for k in range(M):
S[self._symbols[k]] = k
self._cache = (P, O, X, S)
def _update_cache(self, symbols):
# add new symbols to the symbol table and repopulate the output
# probabilities and symbol table mapping
if symbols:
self._create_cache()
P, O, X, S = self._cache
for symbol in symbols:
if symbol not in self._symbols:
self._cache = None
self._symbols.append(symbol)
# don't bother with the work if there aren't any new symbols
if not self._cache:
N = len(self._states)
M = len(self._symbols)
Q = O.shape[1]
# add new columns to the output probability table without
# destroying the old probabilities
O = np.hstack([O, np.zeros((N, M - Q), np.float32)])
for i in range(N):
si = self._states[i]
# only calculate probabilities for new symbols
for k in range(Q, M):
O[i, k] = self._output_logprob(si, self._symbols[k])
# only create symbol mappings for new symbols
for k in range(Q, M):
S[self._symbols[k]] = k
self._cache = (P, O, X, S)
def reset_cache(self):
self._cache = None
def best_path(self, unlabeled_sequence):
"""
Returns the state sequence of the optimal (most probable) path through
the HMM. Uses the Viterbi algorithm to calculate this part by dynamic
programming.
:return: the state sequence
:rtype: sequence of any
:param unlabeled_sequence: the sequence of unlabeled symbols
:type unlabeled_sequence: list
"""
unlabeled_sequence = self._transform(unlabeled_sequence)
return self._best_path(unlabeled_sequence)
def _best_path(self, unlabeled_sequence):
T = len(unlabeled_sequence)
N = len(self._states)
self._create_cache()
self._update_cache(unlabeled_sequence)
P, O, X, S = self._cache
V = np.zeros((T, N), np.float32)
B = -np.ones((T, N), np.int)
V[0] = P + O[:, S[unlabeled_sequence[0]]]
for t in range(1, T):
for j in range(N):
vs = V[t-1, :] + X[:, j]
best = np.argmax(vs)
V[t, j] = vs[best] + O[j, S[unlabeled_sequence[t]]]
B[t, j] = best
current = np.argmax(V[T-1,:])
sequence = [current]
for t in range(T-1, 0, -1):
last = B[t, current]
sequence.append(last)
current = last
sequence.reverse()
return list(map(self._states.__getitem__, sequence))
def best_path_simple(self, unlabeled_sequence):
"""
Returns the state sequence of the optimal (most probable) path through
the HMM. Uses the Viterbi algorithm to calculate this part by dynamic
programming. This uses a simple, direct method, and is included for
teaching purposes.
:return: the state sequence
:rtype: sequence of any
:param unlabeled_sequence: the sequence of unlabeled symbols
:type unlabeled_sequence: list
"""
unlabeled_sequence = self._transform(unlabeled_sequence)
return self._best_path_simple(unlabeled_sequence)
def _best_path_simple(self, unlabeled_sequence):
T = len(unlabeled_sequence)
N = len(self._states)
V = np.zeros((T, N), np.float64)
B = {}
# find the starting log probabilities for each state
symbol = unlabeled_sequence[0]
for i, state in enumerate(self._states):
V[0, i] = self._priors.logprob(state) + \
self._output_logprob(state, symbol)
B[0, state] = None
# find the maximum log probabilities for reaching each state at time t
for t in range(1, T):
symbol = unlabeled_sequence[t]
for j in range(N):
sj = self._states[j]
best = None
for i in range(N):
si = self._states[i]
va = V[t-1, i] + self._transitions[si].logprob(sj)
if not best or va > best[0]:
best = (va, si)
V[t, j] = best[0] + self._output_logprob(sj, symbol)
B[t, sj] = best[1]
# find the highest probability final state
best = None
for i in range(N):
val = V[T-1, i]
if not best or val > best[0]:
best = (val, self._states[i])
# traverse the back-pointers B to find the state sequence
current = best[1]
sequence = [current]
for t in range(T-1, 0, -1):
last = B[t, current]
sequence.append(last)
current = last
sequence.reverse()
return sequence
def random_sample(self, rng, length):
"""
Randomly sample the HMM to generate a sentence of a given length. This
samples the prior distribution then the observation distribution and
transition distribution for each subsequent observation and state.
This will mostly generate unintelligible garbage, but can provide some
amusement.
:return: the randomly created state/observation sequence,
generated according to the HMM's probability
distributions. The SUBTOKENS have TEXT and TAG
properties containing the observation and state
respectively.
:rtype: list
:param rng: random number generator
:type rng: Random (or any object with a random() method)
:param length: desired output length
:type length: int
"""
# sample the starting state and symbol prob dists
tokens = []
state = self._sample_probdist(self._priors, rng.random(), self._states)
symbol = self._sample_probdist(self._outputs[state],
rng.random(), self._symbols)
tokens.append((symbol, state))
for i in range(1, length):
# sample the state transition and symbol prob dists
state = self._sample_probdist(self._transitions[state],
rng.random(), self._states)
symbol = self._sample_probdist(self._outputs[state],
rng.random(), self._symbols)
tokens.append((symbol, state))
return tokens
def _sample_probdist(self, probdist, p, samples):
cum_p = 0
for sample in samples:
add_p = probdist.prob(sample)
if cum_p <= p <= cum_p + add_p:
return sample
cum_p += add_p
raise Exception('Invalid probability distribution - '
'does not sum to one')
def entropy(self, unlabeled_sequence):
"""
Returns the entropy over labellings of the given sequence. This is
given by::
H(O) = - sum_S Pr(S | O) log Pr(S | O)
where the summation ranges over all state sequences, S. Let
*Z = Pr(O) = sum_S Pr(S, O)}* where the summation ranges over all state
sequences and O is the observation sequence. As such the entropy can
be re-expressed as::
H = - sum_S Pr(S | O) log [ Pr(S, O) / Z ]
= log Z - sum_S Pr(S | O) log Pr(S, 0)
= log Z - sum_S Pr(S | O) [ log Pr(S_0) + sum_t Pr(S_t | S_{t-1}) + sum_t Pr(O_t | S_t) ]
The order of summation for the log terms can be flipped, allowing
dynamic programming to be used to calculate the entropy. Specifically,
we use the forward and backward probabilities (alpha, beta) giving::
H = log Z - sum_s0 alpha_0(s0) beta_0(s0) / Z * log Pr(s0)
+ sum_t,si,sj alpha_t(si) Pr(sj | si) Pr(O_t+1 | sj) beta_t(sj) / Z * log Pr(sj | si)
+ sum_t,st alpha_t(st) beta_t(st) / Z * log Pr(O_t | st)
This simply uses alpha and beta to find the probabilities of partial
sequences, constrained to include the given state(s) at some point in
time.
"""
unlabeled_sequence = self._transform(unlabeled_sequence)
T = len(unlabeled_sequence)
N = len(self._states)
alpha = self._forward_probability(unlabeled_sequence)
beta = self._backward_probability(unlabeled_sequence)
normalisation = logsumexp2(alpha[T-1])
entropy = normalisation
# starting state, t = 0
for i, state in enumerate(self._states):
p = 2**(alpha[0, i] + beta[0, i] - normalisation)
entropy -= p * self._priors.logprob(state)
#print 'p(s_0 = %s) =' % state, p
# state transitions
for t0 in range(T - 1):
t1 = t0 + 1
for i0, s0 in enumerate(self._states):
for i1, s1 in enumerate(self._states):
p = 2**(alpha[t0, i0] + self._transitions[s0].logprob(s1) +
self._outputs[s1].logprob(
unlabeled_sequence[t1][_TEXT]) +
beta[t1, i1] - normalisation)
entropy -= p * self._transitions[s0].logprob(s1)
#print 'p(s_%d = %s, s_%d = %s) =' % (t0, s0, t1, s1), p
# symbol emissions
for t in range(T):
for i, state in enumerate(self._states):
p = 2**(alpha[t, i] + beta[t, i] - normalisation)
entropy -= p * self._outputs[state].logprob(
unlabeled_sequence[t][_TEXT])
#print 'p(s_%d = %s) =' % (t, state), p
return entropy
def point_entropy(self, unlabeled_sequence):
"""
Returns the pointwise entropy over the possible states at each
position in the chain, given the observation sequence.
"""
unlabeled_sequence = self._transform(unlabeled_sequence)
T = len(unlabeled_sequence)
N = len(self._states)
alpha = self._forward_probability(unlabeled_sequence)
beta = self._backward_probability(unlabeled_sequence)
normalisation = logsumexp2(alpha[T-1])
entropies =
|
np.zeros(T, np.float64)
|
numpy.zeros
|
# <NAME>, 23.01.2018
import sys
import os
import numpy as np
from scipy.signal import convolve2d
import copy as cp
from shapely.geometry import Point, Polygon, MultiPolygon, shape
from shapely.vectorized import contains
import shapefile as shp
import json
import geopandas as gpd
import matplotlib.pylab as plt
import matplotlib.colors as mcolors
import matplotlib
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
import pyfesom2 as pf
from .sub_mesh import *
from .sub_data import *
from .sub_plot import *
from .sub_index import *
from .colormap_c2c import *
def load_transect_fesom2(mesh, data, transect_list, do_compute=True, ):
#___________________________________________________________________________
# str_anod = ''
index_list = []
idxin_list = []
cnt = 0
#___________________________________________________________________________
# loop over box_list
for transect in transect_list:
#_______________________________________________________________________
# select data points closest to transect points --> pyfesom2
idx_nodes = pf.tunnel_fast1d(mesh.n_y, mesh.n_x, transect_list[0]['ipm'])
index_list.append( data.isel(nod2=idx_nodes.astype(int)) )
index_list[cnt] = index_list[cnt].assign_coords(lon=("lon",transect_list[0]['ipm'][0,:]))
index_list[cnt] = index_list[cnt].assign_coords(lat=("lat",transect_list[0]['ipm'][1,:]))
index_list[cnt] = index_list[cnt].assign_coords(dst=("dst",transect_list[0]['ipmd']))
#_______________________________________________________________________
if do_compute: index_list[cnt] = index_list[cnt].compute()
#_______________________________________________________________________
vname = list(index_list[cnt].keys())
if transect['name'] is not None:
index_list[cnt][vname[0]].attrs['transect_name'] = transect['name']
else:
index_list[cnt][vname[0]].attrs['transect_name'] = 'None'
index_list[cnt][vname[0]].attrs['lon'] = transect['lon']
index_list[cnt][vname[0]].attrs['lat'] = transect['lat']
#_______________________________________________________________________
cnt = cnt + 1
#___________________________________________________________________________
return(index_list)
def load_zmeantransect_fesom2(mesh, data, box_list, dlat=0.5, boxname=None, do_harithm='mean',
do_compute=True, do_outputidx=False, diagpath=None, do_onelem=False,
do_info=False, do_smooth=True,
**kwargs,):
#___________________________________________________________________________
# str_anod = ''
index_list = []
idxin_list = []
cnt = 0
#___________________________________________________________________________
# loop over box_list
vname = list(data.keys())[0]
if 'nz' in list(data[vname].dims): which_ddim, ndi, depth = 'nz' , mesh.nlev , mesh.zlev
elif 'nz1' in list(data[vname].dims): which_ddim, ndi, depth = 'nz1', mesh.nlev-1, mesh.zmid
for box in box_list:
if not isinstance(box, shp.Reader) and not box =='global' and not box==None :
if len(box)==2: boxname, box = box[1], box[0]
#_______________________________________________________________________
# compute box mask index for nodes
n_idxin=do_boxmask(mesh,box)
if do_onelem:
e_idxin = n_idxin[mesh.e_i].sum(axis=1)>=1
#___________________________________________________________________________
# do zonal mean calculation either on nodes or on elements
# keep in mind that node area info is changing over depth--> therefor load from file
fname = data[vname].attrs['runid']+'.mesh.diag.nc'
if diagpath is None:
if os.path.isfile( os.path.join(data[vname].attrs['datapath'], fname) ):
dname = data[vname].attrs['datapath']
elif os.path.isfile( os.path.join( os.path.join(os.path.dirname(os.path.normpath(data[vname].attrs['datapath'])),'1/'), fname) ):
dname = os.path.join(os.path.dirname(os.path.normpath(data[vname].attrs['datapath'])),'1/')
elif os.path.isfile( os.path.join(mesh.path,fname) ):
dname = mesh.path
else:
raise ValueError('could not find directory with...mesh.diag.nc file')
diagpath = os.path.join(dname,fname)
if do_info: print(' --> found diag in directory:{}', diagpath)
else:
if os.path.isfile(os.path.join(diagpath,fname)):
diagpath = os.path.join(diagpath,fname)
elif os.path.isfile(os.path.join(os.path.join(os.path.dirname(os.path.normpath(diagpath)),'1/'),fname)) :
diagpath = os.path.join(os.path.join(os.path.dirname(os.path.normpath(diagpath)),'1/'),fname)
#___________________________________________________________________________
# compute area weighted vertical velocities on elements
if do_onelem:
#_______________________________________________________________________
# load elem area from diag file
if ( os.path.isfile(diagpath)):
mat_area = xr.open_mfdataset(diagpath, parallel=True, **kwargs)['elem_area']
mat_area = mat_area.isel(elem=e_idxin).compute()
mat_area = mat_area.expand_dims({which_ddim:depth}).transpose()
mat_iz = xr.open_mfdataset(diagpath, parallel=True, **kwargs)['nlevels']-1
mat_iz = mat_iz.isel(elem=e_idxin).compute()
else:
raise ValueError('could not find ...mesh.diag.nc file')
#_______________________________________________________________________
# create meridional bins
e_y = mesh.n_y[mesh.e_i[e_idxin,:]].sum(axis=1)/3.0
lat = np.arange(np.floor(e_y.min())+dlat/2,
np.ceil( e_y.max())-dlat/2,
dlat)
lat_i = (( e_y-lat[0])/dlat ).astype('int')
#_______________________________________________________________________
# mean over elements + select MOC basin
if 'time' in list(data.dims):
wdim = ['time','elem',which_ddim]
wdum = data[vname].data[:, mesh.e_i[e_idxin,:], :].sum(axis=2)/3.0 * 1e-6
else :
wdim = ['elem',which_ddim]
wdum = data[vname].data[mesh.e_i[e_idxin,:], :].sum(axis=1)/3.0 * 1e-6
mat_mean = xr.DataArray(data=wdum, dims=wdim)
mat_mean = mat_mean.fillna(0.0)
del wdim, wdum
#_______________________________________________________________________
# calculate area weighted mean
if 'time' in list(data.dims):
nt = data['time'].values.size
for nti in range(nt):
mat_mean.data[nti,:,:] = np.multiply(mat_mean.data[nti,:,:], mat_area.data)
# be sure ocean floor is setted to zero
for di in range(0,ndi):
mat_mean.data[:, np.where(di>=mat_iz)[0], di]=0.0
else:
mat_mean.data = np.multiply(mat_mean.data, mat_area.data)
# be sure ocean floor is setted to zero
for di in range(0,ndi):
mat_mean.data[np.where(di>=mat_iz)[0], di]=0.0
del mat_area
# compute area weighted vertical velocities on vertices
else:
#_______________________________________________________________________
# load vertice cluster area from diag file
if ( os.path.isfile(diagpath)):
mat_area = xr.open_mfdataset(diagpath, parallel=True, **kwargs)['nod_area'].transpose()
if 'nod_n' in list(mat_area.dims): mat_area = mat_area.isel(nod_n=n_idxin).compute()
elif 'nod2' in list(mat_area.dims): mat_area = mat_area.isel(nod2=n_idxin).compute()
mat_iz = xr.open_mfdataset(diagpath, parallel=True, **kwargs)['nlevels_nod2D']-1
if 'nod_n' in list(mat_area.dims): mat_iz = mat_iz.isel(nod_n=n_idxin).compute()
elif 'nod2' in list(mat_area.dims): mat_iz = mat_iz.isel(nod2=n_idxin).compute()
else:
raise ValueError('could not find ...mesh.diag.nc file')
# data are on mid depth levels
if which_ddim=='nz1': mat_area = mat_area[:,:-1]
#_______________________________________________________________________
# create meridional bins
lat = np.arange(np.floor(mesh.n_y[n_idxin].min())+dlat/2,
np.ceil( mesh.n_y[n_idxin].max())-dlat/2,
dlat)
lat_i = ( (mesh.n_y[n_idxin]-lat[0])/dlat ).astype('int')
#_______________________________________________________________________
# select MOC basin
mat_mean = data[vname].isel(nod2=n_idxin)
isnan = np.isnan(mat_mean.values)
mat_mean.values[isnan] = 0.0
mat_area.values[isnan] = 0.0
del(isnan)
#mat_mean = mat_mean.fillna(0.0)
#_______________________________________________________________________
# calculate area weighted mean
if 'time' in list(data.dims):
nt = data['time'].values.size
for nti in range(nt):
mat_mean.data[nti,:,:] = np.multiply(mat_mean.data[nti,:,:], mat_area.data)
# be sure ocean floor is setted to zero
for di in range(0,ndi):
mat_mean.data[:, np.where(di>=mat_iz)[0], di]=0.0
else:
mat_mean.data = np.multiply(mat_mean.data, mat_area.data)
# be sure ocean floor is setted to zero
for di in range(0,ndi):
mat_mean.data[np.where(di>=mat_iz)[0], di]=0.0
#___________________________________________________________________________
# This approach is five time faster than the original from dima at least for
# COREv2 mesh but needs probaply a bit more RAM
if 'time' in list(data.dims):
aux_zonmean = np.zeros([nt, ndi, lat.size])
aux_zonarea = np.zeros([nt, ndi, lat.size])
else :
aux_zonmean = np.zeros([ndi, lat.size])
aux_zonarea = np.zeros([ndi, lat.size])
bottom = np.zeros([lat.size,])
numbtri = np.zeros([lat.size,])
# switch topo beteen computation on nodes and elements
if do_onelem: topo = np.float16(mesh.zlev[mesh.e_iz[e_idxin]])
else : topo = np.float16(mesh.n_z[n_idxin])
# this is more or less required so bottom patch looks aceptable
topo[np.where(topo>-30.0)[0]]=np.nan
# loop over meridional bins
if 'time' in list(data.dims):
for bini in range(lat_i.min(), lat_i.max()):
numbtri[bini]= np.sum(lat_i==bini)
aux_zonmean[:,:, bini]=mat_mean[:,lat_i==bini,:].sum(axis=1)
aux_zonarea[:,:, bini]=mat_area[:,lat_i==bini,:].sum(axis=1)
bottom[bini] = np.nanpercentile(topo[lat_i==bini],15)
# kickout outer bins where eventually no triangles are found
idx = numbtri>0
aux_zonmean = aux_zonmean[:,:,idx]
aux_zonarea = aux_zonarea[:,:,idx]
del(mat_mean, mat_area, topo)
else:
for bini in range(lat_i.min(), lat_i.max()):
numbtri[bini]= np.sum(lat_i==bini)
aux_zonmean[:, bini]=mat_mean[lat_i==bini,:].sum(axis=0)
aux_zonarea[:, bini]=mat_area[lat_i==bini,:].sum(axis=0)
#bottom[bini] = np.nanpercentile(topo[lat_i==bini],15)
bottom[bini] = np.nanpercentile(topo[lat_i==bini],10)
# kickout outer bins where eventually no triangles are found
idx = numbtri>0
aux_zonmean = aux_zonmean[:,idx]
aux_zonarea = aux_zonarea[:,idx]
del(mat_mean, mat_area, topo)
bottom = bottom[idx]
lat = lat[idx]
aux_zonmean[aux_zonarea!=0]= aux_zonmean[aux_zonarea!=0]/aux_zonarea[aux_zonarea!=0]
#___________________________________________________________________________
if do_smooth:
filt=np.array([1,2,1])
filt=filt/np.sum(filt)
filt=filt[np.newaxis,:]
aux_zonmean[aux_zonarea==0] = 0.0
aux_zonmean = convolve2d(aux_zonmean, filt, mode='same', boundary='symm')
#___________________________________________________________________________
aux_zonmean[aux_zonarea==0]= np.nan
del(aux_zonarea)
#___________________________________________________________________________
# smooth bottom line a bit
filt=np.array([1,2,3,2,1])
filt=filt/np.sum(filt)
aux = np.concatenate( (np.ones((filt.size,))*bottom[0],bottom,np.ones((filt.size,))*bottom[-1] ) )
aux = np.convolve(aux,filt,mode='same')
bottom = aux[filt.size:-filt.size]
del aux, filt
#___________________________________________________________________________
# Create Xarray Datasert for moc_basins
# copy global attributes from dataset
global_attr = data.attrs
# copy local attributes from dataset
local_attr = data[vname].attrs
if 'long_name' in local_attr:
local_attr['long_name'] = " zonal mean {}".format(local_attr['long_name'])
else:
local_attr['long_name'] = " zonal mean {}".format(vname)
# create coordinates
if 'time' in list(data.dims):
coords = {'depth' : ([which_ddim], depth),
'lat' : (['ny'], lat ),
'bottom': (['ny'], bottom ),
'time' : (['time'], data['time'].values)}
dims = ['time', which_ddim, 'ny']
else:
coords = {'depth' : ([which_ddim], depth),
'lat' : (['ny'], lat ),
'bottom': (['ny'], bottom )}
dims = [which_ddim,'ny']
# create coordinates
data_vars = {vname : (dims, aux_zonmean, local_attr)}
index_list.append( xr.Dataset(data_vars=data_vars, coords=coords, attrs=global_attr) )
#_______________________________________________________________________
if box is None or box is 'global':
index_list[cnt][vname].attrs['transect_name'] = 'global zonal mean'
elif isinstance(box, shp.Reader):
str_name = box.shapeName.split('/')[-1].replace('_',' ')
index_list[cnt][vname].attrs['transect_name'] = '{} zonal mean'.format(str_name.lower())
#_______________________________________________________________________
cnt = cnt + 1
#___________________________________________________________________________
if do_outputidx:
return(index_list, idxin_list)
else:
return(index_list)
#
#
#_______________________________________________________________________________
def analyse_transects(input_transect, which_res='res', res=1.0, npts=500):
transect_list = []
# loop oover transects in list
for transec_lon,transec_lat, transec_name in input_transect:
ip, ipm, ipd, ipmd, pm_nvecm, pm_evec, idx_nodes = [],[],[],[],[],[],[]
# loop oover transect points
for ii in range(0,len(transec_lon)-1):
#___________________________________________________________________
P1 = [transec_lon[ii ], transec_lat[ii ]]
P2 = [transec_lon[ii+1], transec_lat[ii+1]]
#___________________________________________________________________
# unit vector of line
evec = np.array([P2[0]-P1[0], P2[1]-P1[1]])
evecn = (evec[0]**2+evec[1]**2)**0.5
evec = evec/evecn
if which_res=='npts':
evecnl = np.linspace(0,evecn,npts)
loop_pts = npts
elif which_res=='res':
evecnl = np.arange(0,evecn,res)
loop_pts=evecnl.size
# normal vector
nvec = np.array([-evec[1],evec[0]])
#___________________________________________________________________
# interpolation points
dum_ip = np.vstack(( P1[0]+evecnl*evec[0], P1[1]+evecnl*evec[1]))
# interpolation mid points
evecnlpm = evecnl[:-1] + (evecnl[1:]-evecnl[:-1])/2.0
dum_ipm = np.vstack(( P1[0]+evecnlpm*evec[0], P1[1]+evecnlpm*evec[1]))
del(evecnlpm)
# compute dr in km
Rearth = 6371.0
x,y,z = grid_cart3d(np.radians(dum_ip[0,:]), np.radians(dum_ip[1,:]), R=Rearth)
dr = Rearth*np.arccos( (x[:-1]*x[1:] + y[:-1]*y[1:] + z[:-1]*z[1:])/(Rearth**2) )
x,y,z = grid_cart3d(np.radians(dum_ipm[0,:]), np.radians(dum_ipm[1,:]), R=Rearth)
drm = Rearth*np.arccos( (x[:-1]*x[1:] + y[:-1]*y[1:] + z[:-1]*z[1:])/(Rearth**2) )
del(x,y,z)
# compute distance from start point for corner and mid points
if ii==0: dstart = 0.0
else : dstart = ipd[-1]
dum_ipd = np.cumsum(np.hstack((dstart, dr)))
dum_ipmd = np.cumsum(np.hstack((dstart+dr[0]/2, drm)))
# all normal and unit vector at mis points
dum_pm_nvec = np.vstack( ( np.ones((loop_pts-1,))*nvec[0], np.ones((loop_pts-1,))*nvec[1]) )
dum_pm_evec = np.vstack( ( np.ones((loop_pts-1,))*evec[0], np.ones((loop_pts-1,))*evec[1]) )
#___________________________________________________________________
# collect points from section of transect
if ii==0:
ip, ipm, ipd, ipmd, pm_nvec, pm_evec = dum_ip, dum_ipm, dum_ipd, dum_ipmd, dum_pm_nvec, dum_pm_evec
else:
ip = np.hstack((ip, dum_ip))
ipm = np.hstack((ipm, dum_ipm))
ipd = np.hstack((ipd, dum_ipd))
ipmd = np.hstack((ipmd, dum_ipmd))
pm_nvec = np.hstack((pm_nvec, dum_pm_nvec))
pm_Evec = np.hstack((pm_evec, dum_pm_evec))
del(dum_ip, dum_ipm, dum_ipd, dum_ipmd, dum_pm_nvec, dum_pm_evec)
#_______________________________________________________________________
transect = dict()
transect['lon'] = transec_lon
transect['lat'] = transec_lat
transect['name'] = transec_name
transect['ip'] = ip
transect['ipm'] = ipm
transect['ipd'] = ipd
transect['ipmd'] = ipmd
transect['pm_nvec'] = pm_nvec
transect['pm_evec'] = pm_evec
transect_list.append(transect)
#___________________________________________________________________________
return(transect_list)
#+___PLOT MERIDIONAL OVERTRUNING CIRCULATION _________________________________+
#| |
#+_____________________________________________________________________________+
def plot_transects(data, transects, figsize=[12, 6],
n_rc=[1, 1], do_grid=True, cinfo=None, do_rescale=False,
cbar_nl=8, cbar_orient='vertical', cbar_label=None, cbar_unit=None,
do_bottom=True, max_dep=[], color_bot=[0.6, 0.6, 0.6],
pos_fac=1.0, pos_gap=[0.02, 0.02], do_save=None, save_dpi=600,
do_contour=True, do_clabel=True, title='descript',
pos_extend=[0.05, 0.08, 0.95,0.95], do_ylog=True,
):
#____________________________________________________________________________
fontsize = 12
rescale_str = None
#___________________________________________________________________________
# make matrix with row colum index to know where to put labels
rowlist = np.zeros((n_rc[0], n_rc[1]))
collist = np.zeros((n_rc[0], n_rc[1]))
for ii in range(0,n_rc[0]): rowlist[ii,:]=ii
for ii in range(0,n_rc[1]): collist[:,ii]=ii
rowlist = rowlist.flatten()
collist = collist.flatten()
#___________________________________________________________________________
# create figure and axes
fig, ax = plt.subplots( n_rc[0],n_rc[1], figsize=figsize,
gridspec_kw=dict(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.05, hspace=0.05,),
constrained_layout=False, sharex=True, sharey=True)
#___________________________________________________________________________
# flatt axes if there are more than 1
if isinstance(ax, np.ndarray): ax = ax.flatten()
else: ax = [ax]
nax = len(ax)
#___________________________________________________________________________
# data must be list filled with xarray data
if not isinstance(data , list): data = [data]
ndata = len(data)
#___________________________________________________________________________
# set up color info
cinfo = do_setupcinfo(cinfo, data, do_rescale, do_index=True)
#_______________________________________________________________________
# setup normalization log10, symetric log10, None
which_norm = do_compute_scalingnorm(cinfo, do_rescale)
#___________________________________________________________________________
# loop over axes
for ii in range(0,ndata):
#_______________________________________________________________________
# limit data to color range
vname= list(data[ii][0].keys())[0]
data_plot = data[ii][0][vname].values.transpose().copy()
#_______________________________________________________________________
# setup x-coord and y-coord
if
|
np.unique(data[ii][0]['lon'].values)
|
numpy.unique
|
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
from warnings import warn
import numpy as np
import numpy.fft as fft
from scipy.interpolate import LSQUnivariateSpline, interp1d
from astropy.modeling import fitting, models
from astropy.modeling import models as astropy_models
from scipy.signal import argrelmin
import astropy.units as u
from matplotlib.path import Path
from skimage.measure import find_contours
from scipy.ndimage import map_coordinates
from ..stats_utils import EllipseModel
def WidthEstimate2D(inList, method='contour', noise_ACF=0,
diagnosticplots=False, brunt_beamcorrect=True,
beam_fwhm=None, spatial_cdelt=None, **fit_kwargs):
"""
Estimate spatial widths from a set of autocorrelation images.
.. warning:: Error estimation is not implemented for `interpolate` or
`xinterpolate`.
Parameters
----------
inList: {list of 2D `~numpy.ndarray`s, 3D `~numpy.ndarray}
The list of autocorrelation images.
method: {'contour', 'fit', 'interpolate', 'xinterpolate'}, optional
The width estimation method to use. `contour` fits an ellipse to the
1/e contour about the peak. `fit` fits a 2D Gaussian to the peak.
`interpolate` and `xinterpolate` both estimate the 1/e level from
interpolating the data onto a finer grid near the center.
`xinterpolate` first fits a 2D Gaussian to estimate the radial
distances about the peak.
noise_ACF: {float, 2D `~numpy.ndarray`}, optional
The noise autocorrelation function to subtract from the autocorrelation
images. This is typically produced by the last few eigenimages, whose
structure should consistent of irreducible noise.
diagnosticplots: bool, optional
Show diagnostic plots for the first 9 autocorrelation images showing
the goodness of fit (for the gaussian estimator) or ??? (presently
nothing) for the others.
brunt_beamcorrect : bool, optional
Apply the beam correction. When enabled, the beam size must be given.
beam_fwhm : None or astropy.units.Quantity
The FWHM beam width in angular units. Must be given when using
`brunt_beamcorrect`.
spatial_cdelt : {None, astropy.units.Quantity}, optional
The angular scale of a pixel in the given data. Must be given when
using brunt_beamcorrect.
fit_kwargs : dict, optional
Used when method is 'contour'. Passed to
`turbustat.statistics.stats_utils.EllipseModel.estimate_stderrs`.
Returns
-------
scales : array
The array of estimated scales with length len(inList) or the 0th
dimension size if `inList` is a 3D array.
scale_errors : array
Uncertainty estimations on the scales.
"""
allowed_methods = ['fit', 'interpolate', 'xinterpolate', 'contour']
if method not in allowed_methods:
raise ValueError("Method must be 'fit', 'interpolate', 'xinterpolate'"
" or 'contour'.")
y_scales = np.zeros(len(inList))
x_scales = np.zeros(len(inList))
y_scale_errors = np.zeros(len(inList))
x_scale_errors = np.zeros(len(inList))
# set up the x/y grid just once
z = inList[0]
# NOTE: previous versions were dividing by an extra factor of 2!
x = fft.fftfreq(z.shape[0]) * z.shape[0]
y = fft.fftfreq(z.shape[1]) * z.shape[1]
xmat, ymat = np.meshgrid(x, y, indexing='ij')
xmat = np.fft.fftshift(xmat)
ymat = np.fft.fftshift(ymat)
rmat = (xmat**2 + ymat**2)**0.5
for idx, zraw in enumerate(inList):
z = zraw - noise_ACF
if method == 'fit':
output, cov = fit_2D_gaussian(xmat, ymat, z)
y_scales[idx] = output.y_stddev_0.value
x_scales[idx] = output.x_stddev_0.value
errs = np.sqrt(np.abs(cov.diagonal()))
# Order in the cov matrix is given by the order of parameters in
# model.param_names. But amplitude and the means are fixed, so in
# this case they are the first 2.
y_scale_errors[idx] = errs[1]
x_scale_errors[idx] = errs[0]
if diagnosticplots and idx < 9:
import matplotlib.pyplot as plt
ax = plt.subplot(3, 3, idx + 1)
ax.imshow(z, cmap='afmhot')
ax.contour(output(xmat, ymat),
levels=np.array([0.25, 0.5, 0.75, 1.0]) * z.max(),
colors=['c'] * 3)
# ax.show()
elif method == 'interpolate':
warn("Error estimation not implemented for interpolation!")
rvec = rmat.ravel()
zvec = z.ravel()
zvec /= zvec.max()
sortidx = np.argsort(zvec)
rvec = rvec[sortidx]
zvec = zvec[sortidx]
dz = int(len(zvec) / 100.)
spl = LSQUnivariateSpline(zvec, rvec, zvec[dz:-dz:dz])
x_scales[idx] = spl(np.exp(-1)) / np.sqrt(2)
y_scales[idx] = spl(np.exp(-1)) / np.sqrt(2)
# Need to implement some error estimation
x_scale_errors[idx] = 0.0
y_scale_errors[idx] = 0.0
elif method == 'xinterpolate':
warn("Error estimation not implemented for interpolation!")
output, cov = fit_2D_gaussian(xmat, ymat, z)
try:
aspect = output.y_stddev_0.value[0] / output.x_stddev_0.value[0]
theta = output.theta_0.value[0]
except IndexError: # raised with astropy >v3.3 (current dev.)
aspect = output.y_stddev_0.value / output.x_stddev_0.value
theta = output.theta_0.value
rmat = ((xmat *
|
np.cos(theta)
|
numpy.cos
|
"""
Reactor model accounting for a static (single) particle size in one or more
CSTR reactors in series at steady-state conditions. Chemistry in each reactor
based on Liden 1988 kinetic scheme for biomass fast pyrolysis in a bubbling
fluidized bed reactor.
Test rxns- Based on Liden's (1988) kinetics
R1: W => t1*T (wood to tar), k1 = rate coeff. (1/s)
R2: T => g2*G (tar to gas), k2 = rate coeff. (1/s)
R3: W => c3*C + g3*G (wood to char + gas), k3 = rate coeff. (1/s)
Stagewise mass balances for each species:
dyW(i)/dt = -(k1+k3)*yW(i)+yW(i-1)/tau-yW(i)/tau => (Wood)
dyT(i)/dt = t1*k1*yW(i)-k2*yT(i)+yT(i-1)/tau-yT(i)/tau (Tar)
dyG(i)/dt = g2*k2*yT(i)+g3*k3*yW(i)+yG(i-1)/tau-yG(i)/tau (Gas)
dyC(i)/dt = c3*k3*yW(i)+yC(i-1)/tau-yC(i)/tau (Carbonized char)
Explicit s.s. solution to mass balances if done in proper sequence
General pattern yi = (i inflow + i gen rate*tau)/(1+i sink ks*tau)
yW = (1 + 0*tau)/(1+(k1+k3)*tau)
yT = (0 + t1*k1*yW*tau)/(1+k2*tau)
yG = (0 + g2*k2*yT+g3*k3*yW*tau)/(1+0)
yC = (0 + c3*k3*yW*tau)/(1+0)
"""
import numpy as np
import matplotlib.pyplot as py
# Parameters
# ------------------------------------------------------------------------------
T = 773 # reaction temperature, K
nstages = 10 # number of CSTR stages
taus = 4 # total solids residence time, s
taug = 0.5 # total gas residence time, s
yfw = 1 # normalized mass fraction of initial wood, (-)
# Function
# ------------------------------------------------------------------------------
def cstr(T, nstages, taus, taug, tv, wood):
tsn = taus/nstages # solids residence time in each stage (s)
tgn = taug/nstages # gas residence time in each stage (s)
Rgas = 8.314 # ideal gas constant (J/mole K)
# kinetics parameters
phi = 0.80 # Max tar yield fraction
FC = 0.14 # Wt. fraction fixed C
t1 = 1 # Tar mass formed/mass wood converted in rxn. 1
g2 = 1 # Gas mass formed/mass tar converted in rxn. 2
c3 = FC/(1-phi) # Char mass formed/mass wood converted in rxn. 3
g3 = 1-c3 # Gas mass formed/mass wood converted in rxn. 3
k2 = 4.28e6*
|
np.exp(-107.5e3/Rgas/T)
|
numpy.exp
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.automated_test_util import *
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_sum_impl(test_case, device):
input = flow.tensor(
|
np.random.randn(2, 3)
|
numpy.random.randn
|
import colour
import numpy as np
import cv2
from skimage import color
rgb_colors = [
[0, 0, 0], #black
[255, 0, 0], #red
[255, 120, 0], #orange
[255, 255, 0], #yellow
[125, 255, 0], #spring green
[0, 255, 0], #green
[0, 255, 125], #turquoise
[0, 255, 255], #cyan
[0, 125, 255], #ocean
[0, 0, 255], #blue
[125, 0, 255], #violet
[255, 0, 255], #magenta
[255, 0, 125], #raspberry
[127, 127, 127], #gray
[255, 255, 255], #white
]
N_RGBS = len(rgb_colors)
lab_colors = [cv2.cvtColor(np.reshape(np.concatenate([np.reshape(np.repeat(c[0], [64**2]), [64, 64, 1]),
np.reshape(
|
np.repeat(c[1], [64**2])
|
numpy.repeat
|
import numpy as np
from ukfm import SO2, SE2, SEK2
import matplotlib.pyplot as plt
from scipy.linalg import block_diag
class SLAM2D:
"""
2D SLAM based on robot odometry and unknown landmark position measurements.
See a description of the model in the two references
:cite:`huangObservability2010` , :cite:`HuangA2013`.
:var T: sequence time (s).
:var odo_freq: odometry frequency (Hz).
"""
J = np.array([[0, -1],
[1, 0]])
max_range = 5
"""maximal range of observation (m)."""
min_range = 1
"""minimal range of observation (m)."""
N_ldk = 20
"""number of landmarks :math:`L`."""
class STATE:
"""State of the system.
It represents the orientation and the position of the robot along with
yet observed landmarks.
.. math::
\\boldsymbol{\\chi} \in \\mathcal{M} = \\left\\{ \\begin{matrix}
\\mathbf{C} \in SO(2),
\\mathbf{p} \in \\mathbb R^2,
\\mathbf{p}^l_1 \in \\mathbb R^2,
\ldots,
\\mathbf{p}^l_L \in \\mathbb R^2
\\end{matrix} \\right\\}
:ivar Rot: rotation matrix :math:`\mathbf{C}`.
:ivar p: position of the robot :math:`\mathbf{p}`.
:ivar p_l: position of the landmark :math:`\mathbf{p}^l_1, \ldots,
\mathbf{p}^l_L`.
"""
def __init__(self, Rot, p, p_l=np.zeros((2, 0))):
self.Rot = Rot
self.p = p
self.p_l = p_l
class INPUT:
"""Input of the propagation model.
The input are the robot velocities that can be obtained from a
differential wheel system.
.. math::
\\boldsymbol{\\omega} \in \\mathcal{U} = \\left\\{ \\begin{matrix}
\\mathbf{v} \in \\mathbb R,
\\omega \in \\mathbb R
\\end{matrix} \\right\\}
:ivar v: robot forward velocity :math:`v`.
:ivar gyro: robot orientation velocity :math:`\\omega`.
"""
def __init__(self, v, gyro):
self.v = v
self.gyro = gyro
def __init__(self, T, odo_freq):
# sequence time (s)
self.T = T
# odometry frequency (Hz)
self.odo_freq = odo_freq
# total number of timestamps
self.N = T*odo_freq
# integration step (s)
self.dt = 1/odo_freq
@classmethod
def f(cls, state, omega, w, dt):
""" Propagation function.
.. math::
\\mathbf{C}_{n+1} &= \\mathbf{C}_{n} \\exp\\left(\\left(\\omega +
\\mathbf{w}^{(1)} \\right) dt\\right) \\\\
\\mathbf{p}_{n+1} &= \\mathbf{p}_{n} + \\left( \\mathbf{v}_{n} +
\\mathbf{w}^{(0)} \\right) dt \\\\
\\mathbf{p}_{1,n+1}^l &= \\mathbf{p}_{1,n}^l \\\\
\\vdots \\\\
\\mathbf{p}_{L,n+1}^l &= \\mathbf{p}_{L,n}^l
:var state: state :math:`\\boldsymbol{\\chi}`.
:var omega: input :math:`\\boldsymbol{\\omega}`.
:var w: noise :math:`\\mathbf{w}`.
:var dt: integration step :math:`dt` (s).
"""
new_state = cls.STATE(
Rot=state.Rot.dot(SO2.exp((omega.gyro + w[1])*dt)),
p=state.p + state.Rot.dot(np.hstack([omega.v + w[0], 0]))*dt,
p_l=state.p_l
)
return new_state
@classmethod
def h(cls, state):
"""Observation function for 1 landmark.
.. math::
h\\left(\\boldsymbol{\\chi}\\right) =
\\mathbf{C}^T \\left( \\mathbf{p} - \\mathbf{p}^l\\right)
:var state: state :math:`\\boldsymbol{\\chi}`.
"""
y = state.Rot.T.dot(state.p_l - state.p)
return y
@classmethod
def z(cls, state, y):
"""Augmentation function.
Return a vector of the novel part of the state only.
.. math::
z\\left(\\boldsymbol{\\chi}, \mathbf{y}\\right) =
\\mathbf{C} \\mathbf{y} + \\mathbf{p}
:var state: state :math:`\\boldsymbol{\\chi}`.
:var y: measurement :math:`\\mathbf{y}`.
"""
z = state.Rot.dot(y) + state.p
return z
@classmethod
def aug_z(cls, state, y):
"""Augmentation function. Return the augmented state.
.. math::
\\boldsymbol{\\chi} \\leftarrow \\left(\\boldsymbol{\\chi},
z\\left(\\boldsymbol{\\chi}, \mathbf{y}\\right) \\right)
:var state: state :math:`\\boldsymbol{\\chi}`.
:var y: measurement :math:`\\mathbf{y}`.
"""
new_state = cls.STATE(
Rot=state.Rot,
p=state.p,
p_l=state.Rot.dot(y) + state.p
)
return new_state
@classmethod
def phi(cls, state, xi):
"""Retraction.
.. math::
\\varphi\\left(\\boldsymbol{\\chi}, \\boldsymbol{\\xi}\\right) =
\\left( \\begin{matrix}
\\mathbf{C} \\exp\\left(\\boldsymbol{\\xi}^{(0)}\\right) \\\\
\\mathbf{p} + \\boldsymbol{\\xi}^{(1:3)} \\\\
\\mathbf{p}_1^l + \\boldsymbol{\\xi}^{(3:5)} \\\\
\\vdots \\\\
\\mathbf{p}_L^l + \\boldsymbol{\\xi}^{(3+2L:5+2L)}
\\end{matrix} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(2)
\\times \\mathbb R^{2(L+1)}`.
Its corresponding inverse operation (for robot state only) is
:meth:`~ukfm.SLAM2D.red_phi_inv`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var xi: state uncertainty :math:`\\boldsymbol{\\xi}`.
"""
k = int(xi[3:].shape[0] / 2)
p_ls = state.p_l + np.reshape(xi[3:], (k, 2))
new_state = cls.STATE(
Rot=state.Rot.dot(SO2.exp(xi[0])),
p=state.p + xi[1:3],
p_l=p_ls
)
return new_state
@classmethod
def red_phi(cls, state, xi):
"""Retraction (reduced).
The retraction :meth:`~ukfm.SLAM2D.phi` applied on the robot state only.
"""
new_state = cls.STATE(
Rot=state.Rot.dot(SO2.exp(xi[0])),
p=state.p + xi[1:3],
p_l=state.p_l
)
return new_state
@classmethod
def red_phi_inv(cls, state, hat_state):
"""Inverse retraction (reduced).
.. math::
\\varphi^{-1}_{\\boldsymbol{\\hat{\\chi}}}\\left(\\boldsymbol{\\chi}
\\right) = \\left( \\begin{matrix} \\log\\left(\\mathbf{C}
\\mathbf{\\hat{C}}^T\\right) \\\\
\\mathbf{p} - \\mathbf{\\hat{p}} \\end{matrix} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(2)
\\times \\mathbb R^{2(L+1)}`.
Its corresponding retraction is :meth:`~ukfm.SLAM2D.red_phi`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var hat_state: noise-free state :math:`\\boldsymbol{\hat{\\chi}}`.
"""
xi = np.hstack([SO2.log(hat_state.Rot.dot(state.Rot.T)),
hat_state.p - state.p])
return xi
@classmethod
def aug_phi(cls, state, xi):
"""Retraction used for augmenting state.
The retraction :meth:`~ukfm.SLAM2D.phi` applied on the robot state only.
"""
new_state = cls.STATE(
Rot=state.Rot.dot(SO2.exp(xi[0])),
p=state.p + xi[1:]
)
return new_state
@classmethod
def aug_phi_inv(cls, state, aug_state):
"""Retraction used for augmenting state.
The inverse retraction :meth:`~ukfm.SLAM2D.phi` applied on the landmark
only.
"""
return aug_state.p_l - state.p_l
@classmethod
def up_phi(cls, state, xi):
"""Retraction used for updating state and infering Jacobian.
The retraction :meth:`~ukfm.SLAM2D.phi` applied on the robot state and
one landmark only.
"""
new_state = cls.STATE(
Rot=state.Rot.dot(SO2.exp(xi[0])),
p=state.p + xi[1:3],
p_l=state.p_l + xi[3:5]
)
return new_state
@classmethod
def left_phi(cls, state, xi):
"""Retraction.
.. math::
\\varphi\\left(\\boldsymbol{\\chi}, \\boldsymbol{\\xi}\\right) =
\\left( \\begin{matrix}
\\mathbf{C} \\mathbf{C}_\\mathbf{T} \\\\
\\mathbf{p} + \\mathbf{C} \\mathbf{r}_1 \\\\
\\mathbf{p}_1^l + \\mathbf{C} \\mathbf{r}_2 \\\\
\\vdots \\\\
\\mathbf{p}_L^l + \\mathbf{C} \\mathbf{r}_{1+L} \\\\
\\end{matrix} \\right)
where
.. math::
\\mathbf{T} = \\exp\\left(\\boldsymbol{\\xi}\\right) =
\\begin{bmatrix}
\\mathbf{C}_\\mathbf{T} & \\mathbf{r}_1 & \\cdots &
\\mathbf{r}_{1+L} \\\\
\\mathbf{0}^T & & \\mathbf{I}&
\\end{bmatrix}
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in
SE_{1+L}(2)` with left multiplication.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var xi: state uncertainty :math:`\\boldsymbol{\\xi}`.
"""
chi = SEK2.exp(xi)
new_state = cls.STATE(
Rot=state.Rot.dot(chi[:2, :2]),
p=state.p + state.Rot.dot(chi[:2, 2]),
p_l=state.p_l + state.Rot.dot(chi[:2, 3:]).T
)
return new_state
@classmethod
def left_red_phi(cls, state, xi):
"""Retraction (reduced).
The retraction :meth:`~ukfm.SLAM2D.left_phi` applied on the robot state
only.
"""
return cls.left_phi(state, xi)
@classmethod
def left_red_phi_inv(cls, state, hat_state):
"""Inverse retraction (reduced).
.. math::
\\varphi^{-1}_{\\boldsymbol{\\hat{\\chi}}}
\\left(\\boldsymbol{\\chi}\\right) =
\\log\\left(\\boldsymbol{\\chi}
\\boldsymbol{\\hat{\\chi}}^{-1}\\right)
The robot state is viewed as a element :math:`\\boldsymbol{\chi} \\in
SE(2)`.
Its corresponding retraction is :meth:`~ukfm.SLAM2D.left_red_phi`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var hat_state: noise-free state :math:`\\boldsymbol{\hat{\\chi}}`.
"""
chi = cls.state2chi(state)
hat_chi = cls.state2chi(hat_state)
xi = SEK2.log(SEK2.inv(chi).dot(hat_chi))
return xi
@classmethod
def left_aug_phi(cls, state, xi):
"""Retraction used for augmenting state.
The retraction :meth:`~ukfm.SLAM2D.left_phi` applied on the robot state
only.
"""
chi = SE2.exp(xi)
new_state = cls.STATE(
Rot=state.Rot.dot(chi[:2, :2]),
p=state.p + state.Rot.dot(chi[:2, 2])
)
return new_state
@classmethod
def left_aug_phi_inv(cls, state, aug_state):
"""Retraction used for augmenting state.
The inverse retraction :meth:`~ukfm.SLAM2D.left_phi` applied on the
landmark only.
"""
chi = cls.aug_state2chi(state)
aug_chi = cls.aug_state2chi(aug_state)
return SE2.log(SE2.inv(chi).dot(aug_chi))[1:3]
@classmethod
def left_up_phi(cls, state, xi):
"""Retraction used for updating state and infering Jacobian.
The retraction :meth:`~ukfm.SLAM2D.left_phi` applied on the robot state
and one landmark only.
"""
chi = SEK2.exp(xi)
new_state = cls.STATE(
Rot=state.Rot.dot(chi[:2, :2]),
p=state.p + state.Rot.dot(chi[:2, 2]),
p_l=state.p_l + np.squeeze(state.Rot.dot(chi[:2, 3:]))
)
return new_state
@classmethod
def right_phi(cls, state, xi):
"""Retraction.
.. math::
\\varphi\\left(\\boldsymbol{\\chi}, \\boldsymbol{\\xi}\\right) =
\\left( \\begin{matrix}
\\mathbf{C}_\\mathbf{T} \\mathbf{C} \\\\
\\mathbf{C}_\\mathbf{T}\\mathbf{p} + \\mathbf{r}_1 \\\\
\\mathbf{C}_\\mathbf{T} \\mathbf{p}_1^l + \\mathbf{r}_2 \\\\
\\vdots \\\\
\\mathbf{C}_\\mathbf{T} \\mathbf{p}_L^l + \\mathbf{r}_{1+L} \\\\
\\end{matrix} \\right)
where
.. math::
\\mathbf{T} = \\exp\\left(\\boldsymbol{\\xi}\\right) =
\\begin{bmatrix}
\\mathbf{C}_\\mathbf{T} & \\mathbf{r}_1 & \\cdots &
\\mathbf{r}_{1+L} \\\\
\\mathbf{0}^T & & \\mathbf{I}&
\\end{bmatrix}
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in
SE_{1+L}(2)` with right multiplication.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var xi: state uncertainty :math:`\\boldsymbol{\\xi}`.
"""
chi = SEK2.exp(xi)
p_l = (chi[:2, 3:] + chi[:2, :2].dot(state.p_l.T)).T
new_state = cls.STATE(
Rot=chi[:2, :2].dot(state.Rot),
p=chi[:2, 2] + chi[:2, :2].dot(state.p),
p_l=p_l
)
return new_state
@classmethod
def right_red_phi(cls, state, xi):
"""Retraction (reduced).
The retraction :meth:`~ukfm.SLAM2D.right_phi`.
"""
return cls.right_phi(state, xi)
@classmethod
def right_red_phi_inv(cls, state, hat_state):
"""Inverse retraction (reduced).
.. math::
\\varphi^{-1}_{\\boldsymbol{\\hat{\\chi}}}
\\left(\\boldsymbol{\\chi}\\right) =
\\log\\left(\\boldsymbol{\\hat{\\chi}}^{-1}
\\boldsymbol{\\chi}\\right)
The robot state is viewed as a element :math:`\\boldsymbol{\chi} \\in
SE_{L+1}(2)`.
Its corresponding retraction is :meth:`~ukfm.SLAM2D.right_red_phi`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var hat_state: noise-free state :math:`\\boldsymbol{\hat{\\chi}}`.
"""
chi = cls.state2chi(state)
hat_chi = cls.state2chi(hat_state)
xi = SEK2.log(hat_chi.dot(SEK2.inv(chi)))
return xi
@classmethod
def right_aug_phi(cls, state, xi):
"""Retraction used for augmenting state.
The retraction :meth:`~ukfm.SLAM2D.right_phi` applied on the robot state
only.
"""
chi = SE2.exp(xi)
new_state = cls.STATE(
Rot=chi[:2, :2].dot(state.Rot),
p=chi[:2, :2].dot(state.p) + chi[:2, 2]
)
return new_state
@classmethod
def right_aug_phi_inv(cls, state, aug_state):
"""Retraction used for augmenting state.
The inverse retraction :meth:`~ukfm.SLAM2D.right_phi` applied on the
landmark only.
"""
chi = cls.aug_state2chi(state)
aug_chi = cls.aug_state2chi(aug_state)
return SE2.log(aug_chi.dot(SE2.inv(chi)))[1:3]
@classmethod
def right_up_phi(cls, state, xi):
"""Retraction used for updating state and infering Jacobian.
The retraction :meth:`~ukfm.SLAM2D.right_phi` applied on the robot state
and one landmark only.
"""
chi = SEK2.exp(xi)
new_state = cls.STATE(
Rot=chi[:2, :2].dot(state.Rot),
p=chi[:2, 2] + chi[:2, :2].dot(state.p),
p_l=np.squeeze(chi[:2, 3]) + np.squeeze(chi[:2, :2].dot(state.p_l))
)
return new_state
@classmethod
def state2chi(cls, state):
l = state.p_l.shape[0] + 1
chi = np.eye(l + 2)
chi[:2, :2] = state.Rot
chi[:2, 2] = state.p
chi[:2, 3:] = state.p_l.T
return chi
@classmethod
def aug_state2chi(cls, state):
chi = np.eye(3)
chi[:2, :2] = state.Rot
chi[:2, 2] = np.squeeze(state.p_l)
return chi
@classmethod
def get_states(cls, states, N):
Rots = np.zeros((N, 2, 2))
ps = np.zeros((N, 2))
for n in range(N):
Rots[n] = states[n].Rot
ps[n] = states[n].p
return Rots, ps
@classmethod
def get_cov(cls, list_covs, N):
covs = np.zeros((N, 3+2*cls.N_ldk, 3+2*cls.N_ldk))
for n in range(N):
P = list_covs[n]
covs[n, :P.shape[0], :P.shape[0]] = P
return covs
def errors(self, Rots, hat_Rots, ps, hat_ps):
errors = np.zeros((self.N, 3))
for n in range(self.N):
errors[n, 0] = SO2.log(Rots[n].T.dot(hat_Rots[n]))
errors[:, 1:] = ps-hat_ps
return errors
def plot_traj(self, states, ldks):
Rots, ps = self.get_states(states, self.N)
fig, ax = plt.subplots(figsize=(10, 6))
ax.set(xlabel='$x$ (m)', ylabel='$y$ (m)', title="Robot position")
plt.plot(ps[:, 0], ps[:, 1], linewidth=2, c='black')
ax.scatter(ldks[:, 0], ldks[:, 1], c='red')
ax.legend([r'true position',
r'landmarks'])
ax.axis('equal')
def plot_results(self, hat_states, hat_Ps, states, ldks):
Rots, ps = self.get_states(states, self.N)
hat_Rots, hat_ps = self.get_states(hat_states, self.N)
errors = self.errors(Rots, hat_Rots, ps, hat_ps)
fig, ax = plt.subplots(figsize=(9, 6))
ax.set(xlabel='$x$ (m)', ylabel='$y$ (m)', title='Robot position')
plt.plot(ps[:, 0], ps[:, 1], linewidth=2, c='black')
plt.plot(hat_ps[:, 0], hat_ps[:, 1], c='blue')
ax.scatter(ldks[:, 0], ldks[:, 1], c='red')
ax.axis('equal')
ax.legend([r'true position', 'UKF', r'landmarks'])
hat_Ps = self.get_cov(hat_Ps, self.N)
ukf3sigma = 3 * np.sqrt(hat_Ps[:, 0, 0])
fig, ax = plt.subplots(figsize=(9, 6))
ax.set(xlabel='$t$ (s)', ylabel='error (deg)',
title='Orientation error (deg)')
t = np.linspace(0, self.T, self.N)
plt.plot(t, 180/np.pi*errors[:, 0], c='blue')
plt.plot(t, 180/np.pi*ukf3sigma, c='blue', linestyle='dashed')
plt.plot(t, 180/np.pi*(- ukf3sigma), c='blue', linestyle='dashed')
ax.legend([r'UKF', r'$3\sigma$ UKF'])
ax.set_xlim(0, t[-1])
ukf3sigma = 3 * np.sqrt(hat_Ps[:, 1, 1] + hat_Ps[:, 2, 2])
fig, ax = plt.subplots(figsize=(9, 6))
ax.set(xlabel='$t$ (s)', ylabel='error (m)',
title='Robot position error (m)')
plt.plot(t, errors[:, 1], c='blue')
plt.plot(t, ukf3sigma, c='blue', linestyle='dashed')
plt.plot(t, -ukf3sigma, c='blue', linestyle='dashed')
ax.legend([r'UKF', r'$3\sigma$ UKF'])
ax.set_xlim(0, t[-1])
def nees(self, err, Ps, Rots, ps, name):
neess = np.zeros((self.N, 2))
J = np.eye(3)
def err2nees(err, P):
# separate orientation and position
nees_Rot = err[0]**2 / P[0, 0]
nees_p = err[1:3].dot(np.linalg.inv(P[1:3, 1:3]).dot(err[1:3]))/2
return np.array([nees_Rot, nees_p])
for n in range(10, self.N):
# covariance need to be turned
if name == 'STD':
P = Ps[n][:3, :3]
elif name == 'LEFT':
J[1:3, 1:3] = Rots[n]
P = J.dot(Ps[n][:3, :3]).dot(J.T)
else:
J[1:3, 0] = self.J.dot(ps[n])
P = J.dot(Ps[n][:3, :3]).dot(J.T)
neess[n] = err2nees(err[n], P)
return neess
def nees_print(self, ukf_nees, left_ukf_nees, right_ukf_nees, iekf_nees,
ekf_nees):
t = np.linspace(0, self.dt * self.N, self.N)
def f(x):
return np.mean(x, axis=0)
ukf_nees = f(ukf_nees)
left_ukf_nees = f(left_ukf_nees)
right_ukf_nees = f(right_ukf_nees)
iekf_nees = f(iekf_nees)
ekf_nees = f(ekf_nees)
# plot orientation nees
fig, ax = plt.subplots(figsize=(10, 6))
ax.set(xlabel='$t$ (s)', ylabel='orientation NEES',
title='Robot orientation NEES', yscale="log")
plt.plot(t, ukf_nees[:, 0], c='magenta')
plt.plot(t, left_ukf_nees[:, 0], c='green')
plt.plot(t, right_ukf_nees[:, 0], c='cyan')
plt.plot(t, ekf_nees[:, 0], c='red')
plt.plot(t, iekf_nees[:, 0], c='blue')
ax.legend([r'$SO(2) \times \mathbb{R}^{2(1+L)}$ UKF',
r'\textbf{$SE_{1+L}(2)$ UKF (left)}',
r'\textbf{$SE_{1+L}(2)$ UKF (right)}', r'EKF',
r'IEKF [BB17]'])
ax.set_xlim(0, t[-1])
# plot position nees
fig, ax = plt.subplots(figsize=(10, 6))
ax.set(xlabel='$t$ (s)', ylabel='position NEES',
title='Robot position NEES', yscale="log")
plt.plot(t, ukf_nees[:, 1], c='magenta')
plt.plot(t, left_ukf_nees[:, 1], c='green')
plt.plot(t, right_ukf_nees[:, 1], c='cyan')
plt.plot(t, ekf_nees[:, 1], c='red')
plt.plot(t, iekf_nees[:, 1], c='blue')
ax.legend([r'$SO(2) \times \mathbb{R}^{2(1+L)}$ UKF',
r'\textbf{$SE_{1+L}(2)$ UKF (left)}',
r'\textbf{$SE_{1+L}(2)$ UKF (right)}', r'EKF',
r'IEKF [BB17]'])
ax.set_xlim(0, t[-1])
def g(x):
return np.mean(x)
print(' ')
print(' Normalized Estimation Error Squared (NEES) w.r.t. orientation')
print(" -SO(2) x R^(2(1+L)) UKF: % .2f " % g(ukf_nees[:, 0]))
print(" -left SE_{1+L}(2) UKF : % .2f " % g(left_ukf_nees[:, 0]))
print(" -right SE_{1+L}(2) UKF : % .2f " % g(right_ukf_nees[:, 0]))
print(" -EKF : % .2f " % g(ekf_nees[:, 0]))
print(" -IEKF : % .2f " % g(iekf_nees[:, 0]))
print(' ')
print(' Normalized Estimation Error Squared (NEES) w.r.t. position')
print(" -SO(2) x R^(2(1+L)) UKF: % .2f " % g(ukf_nees[:, 1]))
print(" -left SE_{1+L}(2) UKF : % .2f " % g(left_ukf_nees[:, 1]))
print(" -right SE_{1+L}(2) UKF : % .2f " % g(right_ukf_nees[:, 1]))
print(" -EKF : % .2f " % g(ekf_nees[:, 1]))
print(" -IEKF : % .2f " % g(iekf_nees[:, 1]))
def simu_f(self, odo_std, v, gyro):
# create the map
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return x, y
r = v / gyro # radius
ldks = np.zeros((self.N_ldk, 2))
for i in range(self.N_ldk):
rho = r + self.min_range + 2
th = 2 * np.pi * i / self.N_ldk
[x, y] = pol2cart(rho, th)
# shift y w/ r since robot starts at (0,0)
ldks[i] = np.array([x, y + r])
w = np.zeros(2)
omega = []
state = [self.STATE(
Rot=np.eye(2),
p=np.zeros(2),
p_l=ldks
)]
for n in range(1, self.N):
omega.append(self.INPUT(v, gyro))
state.append(self.f(state[n-1], omega[n-1], w, self.dt))
omega[n-1].v = omega[n-1].v + odo_std[0] * np.random.randn(1)
omega[n-1].gyro = omega[n-1].gyro + odo_std[1] * np.random.randn(1)
return state, omega, ldks
def simu_h(self, states, obs_std, ldks):
ys = np.zeros((self.N, self.N_ldk, 3))
ys[:, :, 2] = -1
for n in range(self.N):
Rot = states[n].Rot
p = states[n].p
for i in range(self.N_ldk):
p_l = ldks[i]
r = np.linalg.norm(p_l - p)
if self.max_range > r > self.min_range:
ys[n, i, :2] = Rot.T.dot(
p_l-p) + obs_std*np.random.randn(2)
ys[n, i, 2] = i
return ys
def benchmark_plot(self, ukf_err, left_ukf_err, right_ukf_err, iekf_err,
ekf_err, ps, ukf_ps, left_ukf_ps, right_ukf_ps,
ekf_ps, iekf_ps):
def rmse(errs):
err = np.zeros((errs.shape[1], 2))
err[:, 0] = np.sqrt(np.mean(errs[:, :, 0]**2, axis=0))
err[:, 1] = np.sqrt(np.mean(errs[:, :, 1]**2
+ errs[:, :, 2]**2, axis=0))
return err
ukf_err = rmse(ukf_err)
left_ukf_err = rmse(left_ukf_err)
right_ukf_err = rmse(right_ukf_err)
iekf_err = rmse(iekf_err)
ekf_err = rmse(ekf_err)
# get orientation error
t = np.linspace(0, self.dt * self.N, self.N)
# plot position
fig, ax = plt.subplots(figsize=(12, 6))
ax.set(xlabel='$y$ (m)', ylabel='$x$ (m)',
title='Robot position for a Monte-Carlo run')
plt.plot(ps[:, 0], ps[:, 1], linewidth=2, c='black')
plt.plot(ukf_ps[:, 0], ukf_ps[:, 1], c='magenta')
plt.plot(left_ukf_ps[:, 0], left_ukf_ps[:, 1], c='green')
plt.plot(right_ukf_ps[:, 0], right_ukf_ps[:, 1], c='cyan')
plt.plot(ekf_ps[:, 0], ekf_ps[:, 1], c='red')
plt.plot(iekf_ps[:, 0], iekf_ps[:, 1], c='blue')
ax.axis('equal')
ax.legend([r'true position', r'$SO(2) \times \mathbb{R}^{2(1+L)}$ UKF',
r'\textbf{$SE_{1+L}(2)$ UKF (left)}',
r'\textbf{$SE_{1+L}(2)$ UKF (right)}', r'EKF',
r'IEKF [BB17]'])
# plot attitude error
fig, ax = plt.subplots(figsize=(12, 6))
ax.set(xlabel='$t$ (s)', ylabel='error (deg)',
title='Robot orientation error (deg)')
# error
plt.plot(t, 180/np.pi*ukf_err[:, 0], c='magenta')
plt.plot(t, 180/np.pi*left_ukf_err[:, 0], c='green')
plt.plot(t, 180/np.pi*right_ukf_err[:, 0], c='cyan')
plt.plot(t, 180/np.pi*ekf_err[:, 0], c='red')
plt.plot(t, 180/np.pi*iekf_err[:, 0], c='blue')
ax.legend([r'$SO(2) \times \mathbb{R}^{2(1+L)}$ UKF',
r'\textbf{$SE_{1+L}(2)$ UKF (left)}',
r'\textbf{$SE_{1+L}(2)$ UKF (right)}', r'EKF', r'IEKF [BB17]'])
ax.set_ylim(bottom=0)
ax.set_xlim(0, t[-1])
# plot position error
fig, ax = plt.subplots(figsize=(12, 6))
ax.set(xlabel='$t$ (s)', ylabel='error (m)',
title='Robot position error (m)')
# error
plt.plot(t, ukf_err[:, 1], c='magenta')
plt.plot(t, left_ukf_err[:, 1], c='green')
plt.plot(t, right_ukf_err[:, 1], c='cyan')
plt.plot(t, ekf_err[:, 1], c='red')
plt.plot(t, iekf_err[:, 1], c='blue')
ax.legend([r'$SO(2) \times \mathbb{R}^{2(1+L)}$ UKF',
r'\textbf{$SE_{1+L}(2)$ UKF (left)}',
r'\textbf{$SE_{1+L}(2)$ UKF (right)}', r'EKF',
r'IEKF [BB17]'])
ax.set_ylim(bottom=0)
ax.set_xlim(0, t[-1])
return ukf_err, left_ukf_err, right_ukf_err, iekf_err, ekf_err
@staticmethod
def benchmark_print(ukf_err, left_ukf_err, right_ukf_err, iekf_err,
ekf_err):
def rmse(errs):
return np.sqrt(np.mean(errs**2))
ukf_err_p = '{:.2f}'.format(rmse(ukf_err[:, 1]))
left_ukf_err_p = '{:.2f}'.format(rmse(left_ukf_err[:, 1]))
right_ukf_err_p = '{:.2f}'.format(rmse(right_ukf_err[:, 1]))
ekf_err_p = '{:.2f}'.format(rmse(ekf_err[:, 1]))
iekf_err_p = '{:.2f}'.format(rmse(iekf_err[:, 1]))
ukf_err_rot = '{:.2f}'.format(180/np.pi*rmse(ukf_err[:, 0]))
left_ukf_err_rot = '{:.2f}'.format(180/np.pi*rmse(left_ukf_err[:, 0]))
right_ukf_err_rot = '{:.2f}'.format(
180/np.pi*rmse(right_ukf_err[:, 0]))
ekf_err_rot = '{:.2f}'.format(180/np.pi*rmse(ekf_err[:, 0]))
iekf_err_rot = '{:.2f}'.format(180/np.pi*rmse(iekf_err[:, 0]))
print(' ')
print('Root Mean Square Error w.r.t. orientation (deg)')
print(" -SO(2) x R^(2(1+L)) UKF: " + ukf_err_rot)
print(" -left SE_{1+L}(2) UKF : " + left_ukf_err_rot)
print(" -right SE_{1+L}(2) UKF : " + right_ukf_err_rot)
print(" -EKF : " + ekf_err_rot)
print(" -IEKF : " + iekf_err_rot)
print(' ')
print('Root Mean Square Error w.r.t. position (m)')
print(" -SO(2) x R^(2(1+L)) UKF: " + ukf_err_p)
print(" -left SE_{1+L}(2) UKF : " + left_ukf_err_p)
print(" -right SE_{1+L}(2) UKF : " + right_ukf_err_p)
print(" -EKF : " + ekf_err_p)
print(" -IEKF : " + iekf_err_p)
class EKF:
def __init__(self, state0, P0, f, h, Q, phi,
jacobian_propagation=None, H_num=None, aug=None,
z=None, aug_z=None):
self.state = state0
self.P = P0
self.f = f
self.h = h
self.Q = Q
self.jacobian_propagation = jacobian_propagation
self.H_num = H_num
self.phi = phi
self.new_state = self.state
self.F = np.eye(self.P.shape[0])
self.G = np.zeros((self.P.shape[0], self.Q.shape[0]))
self.H = np.zeros((0, self.P.shape[0]))
self.r = np.zeros(0)
self.R = np.zeros((0, 0))
self.TOL = 1e-9
self.q = Q.shape[0]
# for augmenting state
self.z = z
self.aug_z = aug_z
self.aug = aug
self.J = np.array([[0, -1],
[1, 0]])
def propagation(self, omega, dt):
self.state_propagation(omega, dt)
self.F, self.G = self.jacobian_propagation(omega, dt)
self.cov_propagation()
def state_propagation(self, omega, dt):
w = np.zeros(self.q)
self.new_state = self.f(self.state, omega, w, dt)
def cov_propagation(self):
P = self.F.dot(self.P).dot(self.F.T) + self.G.dot(self.Q).dot(self.G.T)
self.P = (P+P.T)/2
self.state = self.new_state
def state_update(self):
S = self.H.dot(self.P).dot(self.H.T) + self.R
# gain matrix
K = np.linalg.solve(S, self.P.dot(self.H.T).T).T
# innovation
xi = K.dot(self.r)
# update state
self.state = self.phi(self.state, xi)
# update covariance
P = (np.eye(self.P.shape[0])-K.dot(self.H)).dot(self.P)
self.P = (P+P.T)/2
# init for next update
self.H = np.zeros((0, self.P.shape[0]))
self.r = np.zeros(0)
self.R = np.zeros((0, 0))
def ekf_jacobian_update(self, y, idxs, R):
H_idx = np.zeros((2, 5))
H_idx[:, 0] = -self.state.Rot.T.dot(
self.J.dot((self.state.p_l - self.state.p)))
H_idx[:, 1:3] = -self.state.Rot.T
H_idx[:, 3:] = self.state.Rot.T
H = np.zeros((y.shape[0], self.P.shape[0]))
H[:, idxs] = H_idx
# compute residual
r = y - self.h(self.state)
self.H = np.vstack((self.H, H))
self.r = np.hstack((self.r, r))
self.R = block_diag(self.R, R)
def iekf_jacobian_update(self, y, idxs, R):
H_idx = np.zeros((2, 5))
H_idx[:, 1:3] = -self.state.Rot.T
H_idx[:, 3:] = self.state.Rot.T
H = np.zeros((y.shape[0], self.P.shape[0]))
H[:, idxs] = H_idx
# compute residual
r = y - self.h(self.state)
self.H = np.vstack((self.H, H))
self.r = np.hstack((self.r, r))
self.R = block_diag(self.R, R)
def ekf_FG_ana(self, omega, dt):
F = np.eye(self.P.shape[0])
F[1:3, 0] = self.state.Rot.dot(self.J).dot(np.hstack([omega.v, 0]))*dt
G = np.zeros((self.P.shape[0], 2))
G[1:3, 0] = self.state.Rot.dot(np.array([1, 0]))*dt
G[0, 1] = dt
return F, G
def iekf_FG_ana(self, omega, dt):
F = np.eye(self.P.shape[0])
G = np.zeros((self.P.shape[0], 2))
G[1:3, 0] = self.state.Rot.dot(np.array([1, 0]))*dt
G[0, 1] = dt
p_temp = -self.J.dot(np.hstack([np.expand_dims(self.state.p, 1),
self.state.p_l.T]))
G[1:, 1] = np.reshape(p_temp, -1, order='F') * dt
return F, G
def ekf_augment(self, y, aug_idxs, R):
self.state.p_l = np.squeeze(self.state.p_l)
HR =
|
np.zeros((2, 3))
|
numpy.zeros
|
import numpy as np
import torch
import matplotlib.pyplot as plt
from data_load import get_data
from eval_skrnn import load_pretrained_congen, plot_dataset, draw_image
from matplotlib import cm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
bi_mode = 2
data_type = 'interpolation'
temperature = 0.1
interpolation_mode = 'slerp' #'slerp' or 'linear'
n_alpha = 10
alpha_list = np.linspace(0,1,n_alpha)
color = cm.get_cmap('brg', n_alpha*2)
data_type_1 = "cat"
data_type_2 = "cake"
im_nbr_1 = 1000
im_nbr_2 = 1000
"""Redefine subfunctions of function skrnn_sample in model.py"""
def adjust_temp(pi_pdf, temp):
pi_pdf = np.log(pi_pdf) / temp
pi_pdf -= pi_pdf.max()
pi_pdf = np.exp(pi_pdf)
pi_pdf /= pi_pdf.sum()
return pi_pdf
def get_pi_id(x, dist, temp=1.0):
# implementing the cumulative index retrieval
dist = adjust_temp(np.copy(dist.detach().cpu().numpy()), temp)
N = dist.shape[0]
accumulate = 0
for i in range(0, N):
accumulate += dist[i]
if (accumulate >= x):
return i
return -1
def sample_gaussian_2d(mu1, mu2, s1, s2, rho, temp=1.0):
s1 *= temp * temp
s2 *= temp * temp
mean = [mu1, mu2]
cov = [[s1 * s1, rho * s1 * s2], [rho * s1 * s2, s2 * s2]]
x = np.random.multivariate_normal(mean, cov, 1)
return x[0][0], x[0][1]
"""Interpolation function: two sorts of interpolation : linear or spherical-linear"""
def interpolation(sketch_1, sketch_2, alpha, temperature, interp_mode = 'linear'):
hidden_enc = (torch.zeros(bi_mode, 1, hid_dim, device=device), torch.zeros(bi_mode, 1, hid_dim, device=device))
hidden_dec = (torch.zeros(1, 1, hid_dim, device=device), torch.zeros(1, 1, hid_dim, device=device))
"""Compute latent variables of each sketch"""
z_1, hidden_dec_1, mu_1, sigma_1 = encoder(sketch_1, hidden_enc)
z_2, hidden_dec_2, mu_2, sigma_2 = encoder(sketch_2, hidden_enc)
"""Compute latent variables of the interpolated sketch"""
if interp_mode == 'linear':
z = alpha * z_1 + (1.0 - alpha) * z_2
hidden_dec = (alpha * hidden_dec_1[0] + (1.0 - alpha) * hidden_dec_2[0],
alpha * hidden_dec_1[1] + (1.0 - alpha) * hidden_dec_2[1])
else :
cos_theta_z = torch.vdot(z_1.squeeze(0), z_2.squeeze(0))/(torch.norm(z_1.squeeze(0))*torch.norm(z_2.squeeze(0)))
theta_z = torch.acos(cos_theta_z)
z = z_1 * torch.sin(alpha*theta_z)/torch.sin(theta_z) + z_2 * torch.sin((1.0-alpha)*theta_z)/torch.sin(theta_z)
cos_theta_h = torch.vdot(torch.cat([hidden_dec_1[0], hidden_dec_1[1]],2).squeeze(0).squeeze(0),
torch.cat([hidden_dec_2[0], hidden_dec_2[1]],2).squeeze(0).squeeze(0))/(torch.norm(torch.cat([hidden_dec_1[0], hidden_dec_1[1]],2).squeeze(0).squeeze(0))*torch.norm(torch.cat([hidden_dec_2[0], hidden_dec_2[1]],2).squeeze(0).squeeze(0)))
theta_h = torch.acos(cos_theta_h)
hidden_dec = (hidden_dec_1[0] * torch.sin(alpha*theta_h)/torch.sin(theta_h) + hidden_dec_2[0] * torch.sin((1.0-alpha)*theta_h)/torch.sin(theta_h),
hidden_dec_1[1] * torch.sin(alpha*theta_h)/torch.sin(theta_h) + hidden_dec_2[1] * torch.sin((1.0-alpha)*theta_h)/torch.sin(theta_h))
time_step = max_seq_len
end_stroke = time_step
"""Compute the decoded interpolated sketch (as done in skrnn_sample in model.py)"""
start=[0,0,1,0,0]
prev_x = torch.tensor(start,dtype=torch.float, device=device)
strokes = np.zeros((time_step, 5), dtype=np.float32)
mixture_params = []
for i in range(time_step):
gmm_params, hidden_dec = decoder(prev_x.unsqueeze(0).unsqueeze(0), z, hidden_dec)
q, pi, mu1, mu2, s1, s2, rho = gmm_params[0][0],gmm_params[1][0],gmm_params[2][0],gmm_params[3][0],gmm_params[4][0],gmm_params[5][0],gmm_params[6][0]
idx = get_pi_id(np.random.random(), pi, temperature)
eos_id = get_pi_id(np.random.random(), q, temperature)
eos = [0, 0, 0]
eos[eos_id] = 1
next_x1, next_x2 = sample_gaussian_2d(mu1[idx].detach().cpu().numpy(), mu2[idx].detach().cpu().numpy(),
s1[idx].detach().cpu().numpy(), s2[idx].detach().cpu().numpy(),
rho[idx].detach().cpu().numpy())
mixture_params.append([float(mu1[idx].detach().cpu()),float(mu2[idx].detach().cpu()), float(s1[idx].detach().cpu()),
float(s2[idx].detach().cpu()), float(rho[idx].detach().cpu()), q])
strokes[i, :] = [next_x1, next_x2, eos[0], eos[1], eos[2]]
if eos[-1] == 1:
end_stroke = i+1
break
prev_x[0], prev_x[1], prev_x[2], prev_x[3], prev_x[4] = next_x1, next_x2, eos[0], eos[1], eos[2]
mix_params =
|
np.array(mixture_params)
|
numpy.array
|
import os
import math
import numpy as np
from PIL import Image
from skimage.draw import polygon, polygon_perimeter
from sklearn.decomposition import FastICA
def img2_coord(img, init=None):
assert np.max(img) <= 1.0
if init is None:
init = np.zeros((img.shape[0] + 200, img.shape[1] + 200))
init[100:-100, 100:-100] = img
img = init
img_size = img.shape[0]
tile_x = np.tile(np.arange(img_size), (img_size, 1))
tile_y = tile_x.T
mean_x = np.sum(img * tile_x) / np.sum(img)
mean_y = np.sum(img * tile_y) / np.sum(img)
dist_mean_x = np.abs(mean_x - tile_x) * img
dist_mean_y = np.abs(mean_y - tile_y) * img
hypo = np.max(((dist_mean_x * dist_mean_x) + (dist_mean_y * dist_mean_y)))
diff_mean_x = tile_x[img > 0].flatten() - mean_x
diff_mean_y = tile_y[img > 0].flatten() - mean_y
m = np.stack([diff_mean_x, diff_mean_y])
decomposer = FastICA(2)
decomposer.fit(m.T)
Uica = decomposer.mixing_
# print('ICA vectors')
norms = np.sqrt((Uica ** 2).sum(axis=0))
Uica = Uica / np.sqrt((Uica ** 2).sum(axis=0))
if norms[0] > norms[1]:
rotate = -
|
np.arctan2(Uica[0, 0], Uica[1, 0])
|
numpy.arctan2
|
'''
Created on Aug 7, 2019
@author: lab
'''
from ScopeFoundry.data_browser import DataBrowserView
from FoundryDataBrowser.viewers.plot_n_fit import PlotNFit
from FoundryDataBrowser.viewers.scalebars import ConfocalScaleBar
from ScopeFoundry.widgets import RegionSlicer
from ScopeFoundry.helper_funcs import sibling_path
from ScopeFoundry.logged_quantity import LQCollection
from scipy.stats import spearmanr
import os
import time
from datetime import datetime
import h5py
import numpy as np
from qtpy import QtCore, QtWidgets, QtGui
import pyqtgraph as pg
import pyqtgraph.dockarea as dockarea
from lxml import includes
import traceback
class HyperSpectralBaseView(DataBrowserView):
name = 'HyperSpectralBaseView'
def setup(self):
self.data_loaded = False
## Dummy data Structures (override in func:self.load_data())
self.hyperspec_data = np.arange(10*10*34).reshape( (10,10,34) )
self.display_image = self.hyperspec_data.sum(-1)# np.random.rand(10,10)
self.spec_x_array = np.arange(34)
# Call :func:set_scalebar_params() during self.load_data() to add a scalebar!
self.scalebar_type = None
# Will be filled derived maps and x_arrays
self.display_images = dict()
self.spec_x_arrays = dict()
## Graphs and Interface
self.line_colors = ['w', 'r', 'b', 'y', 'm', 'c', 'g']
self.plot_n_fit = PlotNFit(Ndata_lines=2, pens=['g']+self.line_colors)
# Docks
self.ui = self.dockarea = dockarea.DockArea()
self.image_dock = self.dockarea.addDock(name='Image')
self.spec_dock = self.dockarea.addDock(self.plot_n_fit.graph_dock)
self.settings_dock = self.dockarea.addDock(name='settings',
position='left', relativeTo=self.image_dock)
self.export_dock = self.dockarea.addDock(name='export & adv. settings',
position='below', relativeTo=self.settings_dock)
self.dockarea.addDock(self.plot_n_fit.settings_dock,
relativeTo=self.settings_dock, position='below')
self.corr_dock = self.dockarea.addDock(name='correlation',
position='right', relativeTo = self.spec_dock)
# Image View
self.imview = pg.ImageView()
self.imview.getView().invertY(False) # lower left origin
self.image_dock.addWidget(self.imview)
self.graph_layout = self.plot_n_fit.graph_layout
# Rectangle ROI
self.rect_roi = pg.RectROI([20, 20], [20, 20], pen=self.line_colors[0])
self.rect_roi.addTranslateHandle((0.5,0.5))
self.imview.getView().addItem(self.rect_roi)
self.rect_roi.sigRegionChanged[object].connect(self.on_change_rect_roi)
# Point ROI
self.circ_roi = pg.CircleROI( (0,0), (2,2) , movable=True, pen=self.line_colors[1])
#self.circ_roi.removeHandle(self.circ_roi.getHandles()[0])
h = self.circ_roi.addTranslateHandle((0.5,.5))
h.pen = pg.mkPen(pen=self.line_colors[1])
h.update()
self.imview.getView().addItem(self.circ_roi)
self.circ_roi.removeHandle(0)
self.circ_roi_plotline = pg.PlotCurveItem([0], pen=self.line_colors[1])
self.imview.getView().addItem(self.circ_roi_plotline)
self.circ_roi.sigRegionChanged[object].connect(self.on_update_circ_roi)
# Spec plot
self.spec_plot = self.plot_n_fit.plot
self.spec_plot.setLabel('left', 'Intensity', units='counts')
self.rect_plotdata = self.plot_n_fit.data_lines[0]
self.point_plotdata = self.plot_n_fit.data_lines[1]
self.point_plotdata.setZValue(-1)
#settings
S = self.settings
self.default_display_image_choices = ['default', 'sum']
S.New('display_image', str, choices = self.default_display_image_choices, initial = 'default')
S.display_image.add_listener(self.on_change_display_image)
self.default_x_axis_choices = ['default', 'index']
self.x_axis = S.New('x_axis', str, initial = 'default', choices = self.default_x_axis_choices)
self.x_axis.add_listener(self.on_change_x_axis)
bg_subtract_choices = ('None', 'bg_slice', 'costum_const')
self.bg_subtract = S.New('bg_subtract', str, initial='None',
choices=bg_subtract_choices)
self.bg_counts = S.New('bg_value', initial=0, unit='cts/bin')
self.bg_counts.add_listener(self.update_display)
self.binning = S.New('binning', int, initial = 1, vmin=1)
self.binning.add_listener(self.update_display)
self.norm_data = S.New('norm_data', bool, initial = False)
self.norm_data.add_listener(self.update_display)
S.New('default_view_on_load', bool, initial=True)
self.spatial_binning = S.New('spatial_binning', int, initial = 1, vmin=1)
self.spatial_binning.add_listener(self.bin_spatially)
self.show_lines = ['show_circ_line','show_rect_line']
for x in self.show_lines:
lq = S.New(x, bool, initial=True)
lq.add_listener(self.on_change_show_lines)
# Settings Widgets
self.settings_widgets = [] # Hack part 1/2: allows to use settings.New_UI() and have settings defined in scan_specific_setup()
font = QtGui.QFont("Times", 12)
font.setBold(True)
self.x_slicer = RegionSlicer(self.spec_plot, name='x_slice',
#slicer_updated_func=self.update_display,
brush = QtGui.QColor(0,255,0,50),
ZValue=10, font=font, initial=[100,511],
activated=True)
self.bg_slicer = RegionSlicer(self.spec_plot, name='bg_slice',
#slicer_updated_func=self.update_display,
brush = QtGui.QColor(255,255,255,50),
ZValue=11, font=font, initial=[0,80], label_line=0)
self.x_slicer.region_changed_signal.connect(self.update_display)
self.bg_slicer.region_changed_signal.connect(self.update_display)
self.bg_slicer.activated.add_listener(lambda:self.bg_subtract.update_value('bg_slice') if self.bg_slicer.activated.val else None)
self.settings_widgets.append(self.x_slicer.New_UI())
self.settings_widgets.append(self.bg_slicer.New_UI())
## Setting widgets, (w/o logged quantities)
self.update_display_pushButton = QtWidgets.QPushButton(text = 'update display')
self.settings_widgets.append(self.update_display_pushButton)
self.update_display_pushButton.clicked.connect(self.update_display)
self.default_view_pushButton = QtWidgets.QPushButton(text = 'default img view')
self.settings_widgets.append(self.default_view_pushButton)
self.default_view_pushButton.clicked.connect(self.default_image_view)
self.recalc_median_pushButton = QtWidgets.QPushButton(text = 'recalc median map')
self.settings_widgets.append(self.recalc_median_pushButton)
self.recalc_median_pushButton.clicked.connect(self.recalc_median_map)
self.recalc_sum_pushButton = QtWidgets.QPushButton(text = 'recalc sum map')
self.settings_widgets.append(self.recalc_sum_pushButton)
self.recalc_sum_pushButton.clicked.connect(self.recalc_sum_map)
self.delete_current_display_image_pushButton = QtWidgets.QPushButton(text = 'delete image')
self.settings_widgets.append(self.delete_current_display_image_pushButton)
self.delete_current_display_image_pushButton.clicked.connect(self.delete_current_display_image)
#correlation plot
self.corr_layout = pg.GraphicsLayoutWidget()
self.corr_plot = self.corr_layout.addPlot()
self.corr_plotdata = pg.ScatterPlotItem(x=[0,1,2,3,4], y=[0,2,1,3,2], size=17,
pen=pg.mkPen(None), brush=pg.mkBrush(255, 255, 255, 60))
self.corr_plot.addItem(self.corr_plotdata)
self.corr_plotdata.sigClicked.connect(self.corr_plot_clicked)
self.corr_dock.addWidget(self.corr_layout)
self.corr_settings = CS = LQCollection()
self.cor_X_data = self.corr_settings.New('cor_X_data', str, choices = self.default_display_image_choices,
initial = 'default')
self.cor_Y_data = self.corr_settings.New('cor_Y_data', str, choices = self.default_display_image_choices,
initial = 'sum')
self.cor_X_data.add_listener(self.on_change_corr_settings)
self.cor_Y_data.add_listener(self.on_change_corr_settings)
self.corr_ui = self.corr_settings.New_UI()
self.corr_dock.addWidget(self.corr_ui)
# map exporter
self.map_export_settings = MES = LQCollection()
MES.New('include_scale_bar', bool, initial = True)
MES.New('scale_bar_width', float, initial=1, spinbox_decimals = 3)
MES.New('scale_bar_text', str, ro=False)
map_export_ui = MES.New_UI()
self.export_dock.addWidget( map_export_ui )
self.export_maps_as_jpegs_pushButton = QtWidgets.QPushButton('export maps as jpegs')
self.export_maps_as_jpegs_pushButton.clicked.connect(self.export_maps_as_jpegs)
self.export_dock.addWidget( self.export_maps_as_jpegs_pushButton )
self.save_state_pushButton = QtWidgets.QPushButton(text = 'save state')
self.export_dock.addWidget(self.save_state_pushButton)
self.save_state_pushButton.clicked.connect(self.save_state)
# finalize settings widgets
self.scan_specific_setup() # there could more settings_widgets generated here (part 2/2)
hide_settings = ['norm_data', 'show_circ_line','show_rect_line',
'default_view_on_load', 'spatial_binning',
'x_axis']
self.settings_ui = self.settings.New_UI(exclude=hide_settings)
self.settings_dock.addWidget(self.settings_ui)
self.hidden_settings_ui = self.settings.New_UI(include=hide_settings)
self.export_dock.addWidget(self.hidden_settings_ui)
ui_widget = QtWidgets.QWidget()
gridLayout = QtWidgets.QGridLayout()
gridLayout.setSpacing(0)
gridLayout.setContentsMargins(0, 0, 0, 0)
ui_widget.setLayout(gridLayout)
for i,w in enumerate(self.settings_widgets):
gridLayout.addWidget(w, int(i/2), i%2)
self.settings_dock.addWidget(ui_widget)
self.plot_n_fit.add_button('fit_map', self.fit_map)
self.settings_dock.raiseDock()
self.plot_n_fit.settings_dock.setStretch(1, 1)
self.export_dock.setStretch(1,1)
self.settings_dock.setStretch(1, 1)
for layout in [self.settings_ui.layout(), self.export_dock.layout, ]:
VSpacerItem = QtWidgets.QSpacerItem(0, 0,
QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
layout.addItem(VSpacerItem)
def fit_map(self):
x, hyperspec = self.get_xhyperspec_data(apply_use_x_slice=True)
keys,images = self.plot_n_fit.fit_hyperspec(x, hyperspec)
if len(keys) == 1:
self.add_display_image(keys[0], images)
else:
for key, image in zip(keys, images):
self.add_display_image(key, image)
def add_spec_x_array(self, key, array):
self.spec_x_arrays[key] = array
self.settings.x_axis.add_choices(key, allow_duplicates=False)
def add_display_image(self, key, image):
key = self.add_descriptor_suffixes(key)
self.display_images[key] = image
self.settings.display_image.add_choices(key, allow_duplicates=False)
self.cor_X_data.change_choice_list(self.display_images.keys())
self.cor_Y_data.change_choice_list(self.display_images.keys())
self.cor_X_data.update_value(self.cor_Y_data.val)
self.cor_Y_data.update_value(key)
self.on_change_corr_settings()
print('added', key, image.shape)
def add_descriptor_suffixes(self, key):
if self.x_slicer.activated.val:
key += '_x{}-{}'.format(self.x_slicer.start.val, self.x_slicer.stop.val)
if self.settings['bg_subtract'] == 'bg_slice' and self.bg_slicer.activated.val:
key += '_bg{}-{}'.format(self.bg_slicer.start.val, self.bg_slicer.stop.val)
if self.settings['bg_subtract'] == 'costum_count':
key += '_bg{1.2f}'.format(self.bg_counts.val)
return key
def delete_current_display_image(self):
key = self.settings.display_image.val
del self.display_images[key]
self.settings.display_image.remove_choices(key)
self.cor_X_data.remove_choices(key)
self.cor_Y_data.remove_choices(key)
def get_xy(self, ji_slice, apply_use_x_slice=False):
'''
returns processed hyperspec_data averaged over a given spatial slice.
'''
x,hyperspec_dat = self.get_xhyperspec_data(apply_use_x_slice)
y = hyperspec_dat[ji_slice].mean(axis=(0,1))
#self.databrowser.ui.statusbar.showMessage('get_xy(), counts in slice: {}'.format( y.sum() ) )
if self.settings['norm_data']:
y = norm(y)
return (x,y)
def get_bg(self):
bg_subtract_mode = self.bg_subtract.val
if bg_subtract_mode == 'bg_slice' and hasattr(self, 'bg_slicer'):
if not self.bg_slicer.activated:
self.bg_slicer.activated.update_value(True)
bg_slice = self.bg_slicer.slice
bg = self.hyperspec_data[:,:,bg_slice].mean()
self.bg_slicer.set_label(title=bg_subtract_mode,
text='{:1.1f} cts<br>{} bins'.format(bg,bg_slice.stop-bg_slice.start))
elif bg_subtract_mode == 'costum_const':
bg = self.bg_counts.val
self.bg_slicer.set_label('', title=bg_subtract_mode)
else:
bg = 0
#self.bg_slicer.set_label('', title=bg_subtract_mode)
return bg
def get_xhyperspec_data(self, apply_use_x_slice=True):
'''
returns processed hyperspec_data
'''
bg = self.get_bg()
hyperspec_data = self.hyperspec_data
x = self.spec_x_array
if apply_use_x_slice and self.x_slicer.activated.val:
x = x[self.x_slicer.slice]
hyperspec_data = hyperspec_data[:,:,self.x_slicer.slice]
binning = self.settings['binning']
if binning!= 1:
x,hyperspec_data = bin_y_average_x(x, hyperspec_data, binning, -1, datapoints_lost_warning=False)
bg *= binning
msg = 'effective subtracted bg value is binnging*bg ={:0.1f} which is up to {:2.1f}% of max value.'.format(bg, bg/np.max(hyperspec_data)*100 )
self.databrowser.ui.statusbar.showMessage(msg)
if self.settings['norm_data']:
return (x,norm_map(hyperspec_data-bg))
else:
return (x,hyperspec_data-bg)
def on_change_x_axis(self):
key = self.settings['x_axis']
if key in self.spec_x_arrays:
self.spec_x_array = self.spec_x_arrays[key]
self.x_slicer.set_x_array(self.spec_x_array)
self.bg_slicer.set_x_array(self.spec_x_array)
self.spec_plot.setLabel('bottom', key)
self.update_display()
def on_change_display_image(self):
key = self.settings['display_image']
if key in self.display_images:
self.display_image = self.display_images[key]
self.update_display_image()
if self.display_image.shape == (1,1):
self.databrowser.ui.statusbar.showMessage('Can not display single pixel image!')
def scan_specific_setup(self):
#add settings and export_settings. Append widgets to self.settings_widgets and self.export_widgets
pass
def is_file_supported(self, fname):
# override this!
return False
def post_load(self):
# override this!
pass
def on_change_data_filename(self, fname):
self.data_loaded = False
self.reset()
if fname == "0":
return
try:
self.scalebar_type = None
self.load_data(fname)
self.data_loaded = True
if self.settings['spatial_binning'] != 1:
self.hyperspec_data = bin_2D(self.hyperspec_data, self.settings['spatial_binning'])
self.display_image = bin_2D(self.display_image, self.settings['spatial_binning'])
print('on_change_data_filename', self.display_image.sum())
except Exception as err:
HyperSpectralBaseView.load_data(self, fname) # load default dummy data
self.databrowser.ui.statusbar.showMessage("failed to load {}: {}".format(fname, err))
raise(err)
finally:
self.display_images['default'] = self.display_image
self.display_images['sum'] = self.hyperspec_data.sum(axis=-1)
self.spec_x_arrays['default'] = self.spec_x_array
self.spec_x_arrays['index'] = np.arange(self.hyperspec_data.shape[-1])
self.databrowser.ui.statusbar.clearMessage()
self.post_load()
self.add_scalebar()
self.on_change_display_image()
self.on_change_corr_settings()
self.update_display()
self.on_change_x_axis()
print('loaded new file')
if self.settings['default_view_on_load']:
self.default_image_view()
def add_scalebar(self):
''' not intended to use: Call set_scalebar_params() during load_data()'''
if hasattr(self, 'scalebar'):
self.imview.getView().removeItem(self.scalebar)
del self.scalebar
num_px = self.display_image.shape[1] #horizontal dimension!
if self.scalebar_type == None:
#matplotlib export
self.unit_per_px = 1
self.map_export_settings['scale_bar_width'] = int(num_px/4)
self.map_export_settings['scale_bar_text'] = '{} pixels'.format(int(num_px/4))
if self.scalebar_type != None:
kwargs = self.scalebar_kwargs
span = self.scalebar_kwargs['span'] # this is in meter! convert to according to its magnitude
w_meter = span / 4
mag = int(np.log10(w_meter))
conv_fac, unit = {0: (1,'m'),
-1:(1e2,'cm'),-2:(1e3,'mm'), -3:(1e3,'mm'),
-4:(1e6,'\u03bcm'),-5:(1e6,'\u03bcm'), -6:(1e6,'\u03bcm'), #\mu
-7:(1e9,'nm'),-8:(1e9,'nm'), -9:(1e9,'nm'),
-10:(1e10,'\u212b'),
-11:(1e12,'pm'), -12:(1e12,'pm')}[mag]
#matplotlib export
self.unit_per_px = span * conv_fac / num_px
self.map_export_settings['scale_bar_width'] = int(w_meter * conv_fac)
self.map_export_settings['scale_bar_text'] = f'{int(w_meter * conv_fac)} {(unit)}'
if self.scalebar_type == 'ConfocalScaleBar':
self.scalebar = ConfocalScaleBar(num_px=num_px,
**kwargs)
self.scalebar.setParentItem(self.imview.getView())
self.scalebar.anchor((1, 1), (1, 1), offset=kwargs['offset'])
elif self.scalebar_type == None:
self.scalebar = None
def set_scalebar_params(self, h_span, units='m', scalebar_type='ConfocalScaleBar',
stroke_width=10, brush='w', pen='k', offset=(-20, -20)):
'''
call this function during load_data() to add a scalebar!
*h_span* horizontal length of image in units of *units* if positive.
Else, scalebar is in units of pixels (*units* ignored).
*units* SI length unit of *h_span*.
*scalebar_type* is either `None` (no scalebar will be added)
or `"ConfocalScaleBar"` (default).
*stroke_width*, *brush*, *pen* and *offset* affect appearance and
positioning of the scalebar.
'''
assert scalebar_type in [None, 'ConfocalScaleBar']
self.scalebar_type = scalebar_type
span_meter = {'m':1, 'cm':1e-2, 'mm':1e-3, 'um':1e-6,
'nm':1e-9, 'pm':1e-12, 'fm':1e-15}[units] * h_span
self.scalebar_kwargs = {'span':span_meter, 'brush':brush, 'pen':pen,
'width':stroke_width, 'offset':offset}
@QtCore.Slot()
def update_display(self):
# pyqtgraph axes are (x,y), but display_images are in (y,x) so we need to transpose
if self.display_image is not None:
self.update_display_image()
self.on_change_rect_roi()
self.on_update_circ_roi()
def update_display_image(self):
if self.display_image is not None:
self.imview.setImage(self.display_image.T)
def reset(self):
'''
resets the dictionaries
'''
keys_to_delete = list( set(self.display_images.keys()) - set(self.default_display_image_choices) )
for key in keys_to_delete:
del self.display_images[key]
keys_to_delete = list( set(self.spec_x_arrays.keys()) - set(self.default_x_axis_choices) )
for key in keys_to_delete:
del self.spec_x_arrays[key]
self.settings.display_image.change_choice_list(self.default_display_image_choices)
self.settings.x_axis.change_choice_list(self.default_x_axis_choices)
def load_data(self, fname):
"""
override to set hyperspectral dataset and the display image
need to define:
* self.hyperspec_data (shape Ny, Nx, Nspec)
* self.display_image (shape Ny, Nx)
* self.spec_x_array (shape Nspec)
"""
self.hyperspec_data = np.arange(10*10*34).reshape( (10,10,34) )
self.display_image = self.hyperspec_data.sum(-1)
self.spec_x_array = np.arange(34)
@QtCore.Slot(object)
def on_change_rect_roi(self, roi=None):
# pyqtgraph axes are (x,y), but hyperspec is in (y,x,spec) hence axes=(1,0)
roi_slice, roi_tr = self.rect_roi.getArraySlice(self.hyperspec_data, self.imview.getImageItem(), axes=(1,0))
self.rect_roi_slice = roi_slice
x,y = self.get_xy(self.rect_roi_slice, apply_use_x_slice=False)
self.plot_n_fit.update_data(x, y, 0, is_fit_data=False)
x_fit_data, y_fit_data = self.get_xy(self.rect_roi_slice, apply_use_x_slice=True)
self.plot_n_fit.update_fit_data(x_fit_data, y_fit_data)
text = self.plot_n_fit.result_message
title = self.plot_n_fit.state_info + ' rect'
self.x_slicer.set_label(text, title, color = self.line_colors[0])
self.on_change_corr_settings()
@QtCore.Slot(object)
def on_update_circ_roi(self, roi=None):
if roi is None:
roi = self.circ_roi
roi_state = roi.saveState()
x0, y0 = roi_state['pos']
xc = x0 + 1
yc = y0 + 1
Ny, Nx, Nspec = self.hyperspec_data.shape
i = max(0, min(int(xc), Nx-1))
j = max(0, min(int(yc), Ny-1))
self.circ_roi_plotline.setData([xc, i+0.5], [yc, j + 0.5])
self.circ_roi_ji = (j,i)
self.circ_roi_slice = np.s_[j:j+1,i:i+1]
x,y = self.get_xy(self.circ_roi_slice, apply_use_x_slice=False)
self.plot_n_fit.update_data(x, y, 1, is_fit_data=False)
x_fit_data, y_fit_data = self.get_xy(self.circ_roi_slice, apply_use_x_slice=True)
self.plot_n_fit.update_fit_data(x_fit_data, y_fit_data)
text = self.plot_n_fit.result_message
title = self.plot_n_fit.state_info + ' circ'
self.x_slicer.set_label(text, title, color = self.line_colors[1])
self.on_change_corr_settings()
def on_change_show_lines(self):
self.point_plotdata.setVisible(self.settings['show_circ_line'])
self.rect_plotdata.setVisible(self.settings['show_rect_line'])
def default_image_view(self):
'sets rect_roi congruent to imageItem and optimizes size of imageItem to fit the ViewBox'
iI = self.imview.imageItem
h,w = iI.height(), iI.width()
self.rect_roi.setSize((w,h))
self.rect_roi.setPos((0,0))
self.imview.getView().enableAutoRange()
self.spec_plot.enableAutoRange()
def recalc_median_map(self):
x,hyperspec_data = self.get_xhyperspec_data(apply_use_x_slice=True)
median_map = spectral_median_map(hyperspec_data,x)
self.add_display_image('median_map', median_map)
def recalc_sum_map(self):
_,hyperspec_data = self.get_xhyperspec_data(apply_use_x_slice=True)
_sum = hyperspec_data.sum(-1)
self.add_display_image('sum', _sum)
def on_change_corr_settings(self):
if not self.data_loaded:
return
try:
xname = self.corr_settings['cor_X_data']
yname = self.corr_settings['cor_Y_data']
X = self.display_images[xname]
Y = self.display_images[yname]
#Note, the correlation plot is a dimensionality reduction
# (i,j,X,Y) --> (X,Y). To map the scatter points back to the image
# we need to associate every (X,Y) on the correlation plot with
# their indices (i,j); in particular
# indices = [(j0,i0), (j0,i1), ...]
indices = list( np.indices((X.shape)).reshape(2,-1).T )
self.corr_plotdata.setData(X.flat, Y.flat, brush=pg.mkBrush(255, 255, 255, 50),
pen=None, data=indices)
# mark points within rect_roi
mask = np.zeros_like(X, dtype=bool)
mask[self.rect_roi_slice[0:2]] = True
cor_x = X[mask].flatten()
cor_y = Y[mask].flatten()
self.corr_plotdata.addPoints(cor_x, cor_y, brush=pg.mkBrush(255, 255, 204, 60),
pen=pg.mkPen(self.line_colors[0], width=0.5))
# mark circ_roi point
j,i = self.circ_roi_ji
x_circ, y_circ =
|
np.atleast_1d(X[j,i])
|
numpy.atleast_1d
|
import numpy as np
from bertserini_on_telegram.utils.io import print_ts
from transformers.data.processors.squad import SquadResult
from typing import Dict, List, Tuple
from pytorch_lightning import LightningModule
from transformers import BertTokenizer, BertForQuestionAnswering
from pytorch_lightning.utilities.cli import MODEL_REGISTRY
from bertserini_on_telegram.utils.utils_squad import compute_predictions_logits
from transformers.data.metrics.squad_metrics import apply_no_ans_threshold, find_all_best_thresh, get_raw_scores, make_eval_dict, merge_eval
from bertserini_on_telegram.utils.utils_squad import compute_predictions
from transformers.data.metrics.squad_metrics import squad_evaluate
import json
from pprint import pprint
from bertserini_on_telegram.utils.utils_squad import compute_recall, compute_em_k
@MODEL_REGISTRY
class BERTModule(LightningModule):
"""A LightningModule is a neat way to organize the code necessary to train/evaluate/inference a Torch.nn.Module.
Args:
model_name (str, optional): The name of the pretrained model to use.
mu (float): Weights used to compute the aggregated score. Defaults to 0.5.
n_best (int): Number of best results to choose from when computing predictions. Defaults to 10.
results_file (str): Name of the file where to store the optimal F1 threshold to use at inference time. Defaults to "./tmp/results_.json".
Attributes:
model (Torch.nn.Module): The effective Torch module, ready for validation/inference.
tokenizer (BertTokenizer): The tokenizer used to tokenize all texts coming in or out of the model.
"""
def __init__(self,
model_name: str,
results_file: str,
mu: float = 0.5,
n_best: int = 10,):
super().__init__()
self.save_hyperparameters()
print_ts(f'Initializing {" ".join(self.hparams.model_name.split("-"))} for Inference')
self.model = BertForQuestionAnswering.from_pretrained(self.hparams.model_name).cuda()
self.tokenizer = BertTokenizer.from_pretrained(self.hparams.model_name)
self.all_results = []
def get_best_f1_threshold(self):
"""Read the optimal F1 threshold from the filesystem.
Returns:
float: The optimal F1 threshold.
"""
try:
file = open(self.hparams.results_file, 'rb')
except FileNotFoundError:
print(f'Could not find file {self.hparams.results_file}. Remember to run a validation '
'loop first, before doing inference')
exit()
return json.load(file)['best_f1_thresh']
def gradient_step(self, batch, batch_idx):
"""A simple training step (not used yet)
"""
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
}
outputs = self.model(**inputs)
loss = outputs['loss']
return loss
def non_gradient_step(self, batch, batch_idx, dataloader_idx=0):
"""The common step for non_gradient loops (validation/test/inference)
"""
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
}
features = self.trainer.datamodule.features
feature_indices = batch[3]
outputs = self.model(**inputs)
for feature_index in feature_indices:
eval_feature = features[feature_index.item()]
unique_id = int(eval_feature.unique_id)
start_logits = outputs['start_logits'].detach().cpu()
end_logits = outputs['end_logits'].detach().cpu()
result = SquadResult(unique_id, start_logits, end_logits)
self.all_results.append(result)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
"""The validation step
"""
self.non_gradient_step(batch, batch_idx, dataloader_idx)
def on_validation_epoch_end(self):
"""This hook is called after the last validation step.
This is convenient if we want to gather the results of the validation.
"""
print('Computing predictions logits')
predictions = compute_predictions_logits(
np.array(self.trainer.datamodule.new_examples),
np.array(self.trainer.datamodule.features),
np.array(self.all_results),
n_best_size=10,
max_answer_length=378,
do_lower_case=True,
null_score_diff_threshold=0.0,
tokenizer=self.tokenizer,
version_2_with_negative=True,
output_prediction_file="./tmp/out_pred",
output_nbest_file="./tmp/out_nbest",
output_null_log_odds_file="./tmp/out_null_log_odds",
verbose_logging=False,
)
# aggregate bert scores with pyserini
# iterate over all the questions
for i, qid in enumerate(predictions.keys()):
# iterate over all the contexts for a give question
for ctxid, _ in enumerate(predictions[qid]):
# copy pyserini score from datamodule class
predictions[qid][ctxid]['pyserini_score'] = self.trainer.datamodule.pyserini_scores[i][ctxid]
# aggregate bert score with pyserini score with parameter mu
predictions[qid][ctxid]['total_score'] = \
(1 - self.hparams.mu) * predictions[qid][ctxid]['bert_score'] + \
(self.hparams.mu) * predictions[qid][ctxid]['pyserini_score']
em_k = compute_em_k(self.trainer.datamodule.new_examples, predictions)
# sort answers for the different contexts by the total score
# & transform prediction to feed them to squad_evaluate
predictions = {k: sorted(v, key=lambda x: -x['total_score'])[0]['text'] for k, v in predictions.items()}
result = squad_evaluate(self.trainer.datamodule.examples, predictions)
recall = compute_recall(self.trainer.datamodule.new_examples, self.trainer.datamodule.hparams.num_contexts)
pprint(f"em_k: {em_k}")
pprint(f"recall: {recall}")
print(f"other metrics: ")
pprint(result)
def predict_step(self, batch, batch_idx, dataloader_idx=0):
"""The prediction step
"""
self.non_gradient_step(batch, batch_idx, dataloader_idx)
def on_predict_epoch_end(self, results):
"""This hook is called after the last prediction step. This is convenient if we want to gather the results of the prediction.
"""
# Answers contains the n_best candidate answers for all possible question-context (q1, c1), ..., (q1, cn) pairs
print('Computing predictions logits')
predictions = compute_predictions_logits(
np.array(self.trainer.datamodule.examples),
|
np.array(self.trainer.datamodule.features)
|
numpy.array
|
import os
import numpy as np
from keras.utils.data_utils import Sequence
DOWNGRADES = ['bicubic', 'bicubic_jpeg_75', 'bicubic_jpeg_90', 'unknown']
class DIV2KSequence(Sequence):
def __init__(self,
path,
scale=2,
subset='train',
downgrade='bicubic',
image_ids=None,
random_rotate=True,
random_flip=True,
random_crop=True,
crop_size=96,
batch_size=16):
"""
Sequence over a DIV2K subset.
Reads DIV2K images that have been converted to numpy arrays with convert.py.
:param path: path to DIV2K dataset with images stored as numpy arrays.
:param scale: super resolution scale, either 2, 3 or 4.
:param subset: either 'train' or 'valid', referring to training and validation subset, respectively.
:param downgrade: downgrade operator, see DOWNGRADES.
:param image_ids: list of image ids to use from the specified subset. Default is None which means
all image ids from the specified subset.
:param random_rotate: if True images are randomly rotated by 0, 90, 180 or 270 degrees.
:param random_flip: if True images are randomly flipped horizontally.
:param random_crop: if True images are randomly cropped.
:param crop_size: size of crop window in HR image. Only used if random_crop=True.
:param batch_size: size of generated batches.
"""
if not os.path.exists(path):
raise FileNotFoundError(f"Path {path} doesn't exist")
if scale not in [2, 3, 4]:
raise ValueError('scale must be 2, 3 or 4')
if subset not in ['train', 'valid']:
raise ValueError("subset must be 'train' or 'valid'")
if downgrade not in DOWNGRADES:
raise ValueError(f"downgrade must be in {DOWNGRADES}")
if not random_crop and batch_size != 1:
raise ValueError('batch_size must be 1 if random_crop=False')
self.path = path
self.scale = scale
self.subset = subset
self.downgrade = downgrade
if image_ids is None:
if subset == 'train':
self.image_ids = range(1, 801)
else:
self.image_ids = range(801, 901)
else:
self.image_ids = image_ids
self.random_rotate = random_rotate
self.random_flip = random_flip
self.random_crop = random_crop
self.crop_size = crop_size
self.batch_size = batch_size
def __getitem__(self, index):
if self.batch_size == 1:
return self._batch_1(self.image_ids[index])
else:
beg = index * self.batch_size
end = (index + 1) * self.batch_size
return self._batch_n(self.image_ids[beg:end])
def __len__(self):
return int(np.ceil(len(self.image_ids) / self.batch_size))
def _batch_1(self, id):
lr, hr = self._pair(id)
return np.expand_dims(np.array(lr, dtype='uint8'), axis=0), \
np.expand_dims(np.array(hr, dtype='uint8'), axis=0)
def _batch_n(self, ids):
lr_crop_size = self.crop_size // self.scale
hr_crop_size = self.crop_size
lr_batch = np.zeros((len(ids), lr_crop_size, lr_crop_size, 3), dtype='uint8')
hr_batch = np.zeros((len(ids), hr_crop_size, hr_crop_size, 3), dtype='uint8')
for i, id in enumerate(ids):
lr, hr = self._pair(id)
lr_batch[i] = lr
hr_batch[i] = hr
return lr_batch, hr_batch
def _pair(self, id):
lr_path = self._lr_image_path(id)
hr_path = self._hr_image_path(id)
lr = np.load(lr_path)
hr = np.load(hr_path)
if self.random_crop:
lr, hr = _random_crop(lr, hr, self.crop_size, self.scale)
if self.random_flip:
lr, hr = _random_flip(lr, hr)
if self.random_rotate:
lr, hr = _random_rotate(lr, hr)
return lr, hr
def _hr_image_path(self, id):
return os.path.join(self.path, f'DIV2K_{self.subset}_HR', f'{id:04}.npy')
def _lr_image_path(self, id):
return os.path.join(self.path, f'DIV2K_{self.subset}_LR_{self.downgrade}', f'X{self.scale}', f'{id:04}x{self.scale}.npy')
def cropped_sequence(path, scale, subset, downgrade, image_ids=None, batch_size=16):
return DIV2KSequence(path=path, scale=scale, subset=subset, downgrade=downgrade, image_ids=image_ids,
batch_size=batch_size, crop_size=48 * scale)
def fullsize_sequence(path, scale, subset, downgrade, image_ids=None):
return DIV2KSequence(path=path, scale=scale, subset=subset, downgrade=downgrade, image_ids=image_ids,
batch_size=1, random_rotate=False, random_flip=False, random_crop=False)
def _random_crop(lr_img, hr_img, hr_crop_size, scale):
lr_crop_size = hr_crop_size // scale
lr_w = np.random.randint(lr_img.shape[1] - lr_crop_size + 1)
lr_h = np.random.randint(lr_img.shape[0] - lr_crop_size + 1)
hr_w = lr_w * scale
hr_h = lr_h * scale
lr_img_cropped = lr_img[lr_h:lr_h + lr_crop_size, lr_w:lr_w + lr_crop_size]
hr_img_cropped = hr_img[hr_h:hr_h + hr_crop_size, hr_w:hr_w + hr_crop_size]
return lr_img_cropped, hr_img_cropped
def _random_flip(lr_img, hr_img):
if np.random.rand() > 0.5:
return
|
np.fliplr(lr_img)
|
numpy.fliplr
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 10 15:56:32 2020
@author: nolanlem
"""
import numpy as np
import matplotlib.pyplot as plt
import soundfile as sf
import os
import librosa
import glob
from scipy.interpolate import interp1d
os.chdir('/Users/nolanlem/Documents/kura/kura-git/swarm-tapping-study/generators')
#from util.utils import makeDir
import seaborn as sns
sns.set()
sns.set_palette('tab10')
# load mono metronome click, non-spatialized
thesample = './sampleaudio/woodblock_lower.wav'
y, _ = librosa.load(thesample, sr=sr_audio)
y = y*0.5 # reduce amp audiofile
###### load binaural metronome clicks
samples = []
for fi in glob.glob('./sampleaudio/binaural-metros/*.wav'):
y_, _ = librosa.load(fi, mono=False)
samples.append(y_)
# get largest sample in samples array
tmpmax = samples[0][0].shape[0]
for deg in range(len(samples)):
maxnum = samples[deg][0].shape[0]
if maxnum > tmpmax:
tmpmax = maxnum
largestsampnum = tmpmax
# PER STIMULUS TIME HISTORGRAM
def PSTH(x, N, len_y):
# x = spikes/taps matrix
# N = 0.1 seconds = 0.1*22050 = 2205 samples
spike_blocks = np.linspace(0, x.shape[1], int(len_y/N))
spike_blocks_int = [int(elem) for elem in spike_blocks]
mx = []
for i in range(1, len(spike_blocks_int)):
tapblock_mx = np.nanmean(spikes[:, spike_blocks_int[i-1]:spike_blocks_int[i]])
block_mx = tapblock_mx*np.ones(spike_blocks_int[i] - spike_blocks_int[i-1])
mx.extend(block_mx)
return mx
def calculateCOP(window, period_samps, dist_type = 'uniform'):
if dist_type == 'uniform':
window = [elem for elem in window if elem <= period_samps]
if dist_type == 'gaussian':
window = np.array(window) + int(period_samps/2)
window = [elem for elem in window if elem <= period_samps and elem >= 0]
bininterp = interp1d([0, period_samps], [0, 2*np.pi])
phases = bininterp(window)
R = np.nansum(np.exp(phases*1j))/N
R_mag = np.abs(R)
R_ang = np.angle(R)
R_mag_traj.append(R_mag)
R_ang_traj.append(R_ang)
return R_mag, R_ang
def makeAudio(events, iteration, stimdir, spatial_flag=False):
eventsinsamples = librosa.time_to_samples(events,sr=sr_audio)
# audiobufffers for spatial and mono audio
audiobuffer_L = np.zeros(max(eventsinsamples) + largestsampnum)
audiobuffer_R = np.zeros(max(eventsinsamples) + largestsampnum)
y_mono = y
for startpos in eventsinsamples:
random_deg = np.random.randint(100)
y_l = samples[random_deg][0]
y_r = samples[random_deg][1]
if spatial_flag == True:
audiobuffer_L[startpos:(startpos + len(y_l))] = audiobuffer_L[startpos:(startpos + len(y_l))] + y_l
audiobuffer_R[startpos:(startpos + len(y_r))] = audiobuffer_R[startpos:(startpos + len(y_r))] + y_r
if spatial_flag == False:
audiobuffer_L[startpos:(startpos + len(y_mono))] = audiobuffer_L[startpos:(startpos + len(y_mono))] + y_mono
audiobuffer_R[startpos:(startpos + len(y_mono))] = audiobuffer_R[startpos:(startpos + len(y_mono))] + y_mono
#audio_l = np.sum(audiobuffer_L, axis=0)
#audio_r = np.sum(audiobuffer_R, axis=0)
audio_l = 0.8*audiobuffer_L/max(audiobuffer_L)
audio_r = 0.8*audiobuffer_R/max(audiobuffer_R)
audio = np.array([audio_l, audio_r])
audiofi = os.path.join(stimdir, dist_type[0] + '_' + binaural_str[0] + '_' + str(N) + '_' + str(iteration) + '.wav')
sf.write(audiofi, audio.T, samplerate=sr_audio)
print('creating', audiofi)
return audio
def getKDE(events):
eventsinsamples = librosa.time_to_samples(events)
taps = np.zeros(max(eventsinsamples)+1)
for spike in eventsinsamples:
np.put(taps, spike, 1)
blocksize = int(sr_audio/10)
blocks = np.arange(0, len(taps), blocksize)
mx = []
for j in range(1, len(blocks)):
tapblock_mx = np.mean(taps[blocks[j-1]:blocks[j]])
block_mx = tapblock_mx*np.ones(blocksize)
mx.extend(block_mx)
gmx = gaussian_filter1d(mx, 1000)
gmx = gmx/max(gmx)
gmx -= 2 # move it down below wf amplitude space
return gmx
def removeAudioStims(thestimdir):
for folder in thestimdir:
if os.path.exists(folder):
for fi in glob.glob(folder + "/*.wav"):
os.remove(fi)
for png in glob.glob(folder + '/*.png'):
os.remove(png)
# util function for
def round2dec(num2round):
roundednum = np.round(num2round,2)
return roundednum
#%%##########################################
########## INITIALIZE GLOBAL PARAMS #########
###########################################
N = 40
sr_model = 20
sr_audio = 22050
####### set the binaural flag and DISTRIBUTION TYPE (uniform, gaussian) #######
binaural_flag = True # binaural audio or no?
dist_type = 'gaussian'# which probability density function (uniform, or gaussian)
if binaural_flag == True:
binaural_str = 'binaural'
else:
binaural_str = 'mono'
#### LOOP PARAMS
target_tempos = np.geomspace(start=60, stop=100, num=5)
freq_conds = target_tempos/60. # tempo to distribute events around
period_conds = 1/freq_conds
num_beats = 22 # number of beats
seconds = (1./freq_conds)*num_beats # length of audio to generate
beg_delay = 0.5 # time (secs) to insert in beginning of stim audio
end_delay = 0.5 # time to insert at end fo audio
period_samps = np.array(sr_audio*1./freq_conds, dtype=np.int) # num of samples for 1 Hz isochronous beat where events are distributed aroudn
totalsamps = beg_delay*sr_audio + seconds*sr_audio + end_delay*sr_audio
totalsamps = totalsamps.astype(int)
totalsecs = totalsamps/sr_audio
stimdirs = []
for tmp in target_tempos:
rootdir = os.path.join('stim-ramp/' + str(N) + '_' + binaural_str, str(int(tmp))) # hold R, or ramps?
stimdirs.append(rootdir)
makeDir(rootdir) # make dir for storing stims
########## RANGE of UNIFORM low and high (l,r) range for uniform distribution
l = np.linspace(-0.1, -1, num_grads)
r =
|
np.linspace(0.1, 1, num_grads)
|
numpy.linspace
|
import pickle
import cv2
from skimage.filters import threshold_otsu, threshold_local
from skimage import measure
from scipy import ndimage
import sys
import numpy as np
import matplotlib.pyplot as plt
import os
import glob
import imageio
import scipy
from scipy import signal
from skimage.feature import peak_local_max
from scipy.signal import find_peaks
from skimage.segmentation import watershed
from skimage.measure import label, regionprops
from scipy.signal import find_peaks
import csv
import pandas as pd
import random
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram, fcluster, leaves_list
import networkx as nx
import scipy
from scipy.linalg import polar
from numpy import linalg as LA
import time_series as ts
import moviepy.editor as mp
##########################################################################################
# visualization on the images
##########################################################################################
##########################################################################################
def visualize_segmentation(folder_name, gaussian_filter_size=1,frame_num=0,include_eps=False):
"""Visualize the results of z-disk and sarcomere segmentation."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# --> visualize segmentation
raw_img = np.load('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/frame-%04d.npy'%(frame_num))
# plot of segmented z disks
box = -1
laplacian = cv2.Laplacian(raw_img,cv2.CV_64F)
laplacian = ndimage.gaussian_filter(laplacian, gaussian_filter_size)
contour_thresh = threshold_otsu(laplacian)
contour_image = laplacian
contours = measure.find_contours(contour_image,contour_thresh)
total = 0
contour_list = []
for n, contour in enumerate(contours):
total += 1
if contour.shape[0] >= 8:
contour_list.append(contour)
band_data = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/segmented_bands/frame-%04d_bands.txt'%(frame_num))
z_disc_x = band_data[:,0]
z_disc_y = band_data[:,1]
# --> import sarcomeres
sarc_data = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/segmented_sarc/frame-%04d_sarc_data.txt'%(frame_num))
sarc_x = sarc_data[:,2]
sarc_y = sarc_data[:,3]
fig, axs = plt.subplots(1,2,figsize=(10,5))
axs[0].imshow(raw_img, cmap=plt.cm.gray); axs[0].set_title('z-disks -- frame %i, %i found'%(frame_num,len(contour_list)))
for kk in range(0,len(contour_list)):
cont = contour_list[kk]
axs[0].plot(cont[:,1],cont[:,0])
axs[0].set_xticks([]); axs[0].set_yticks([])
axs[1].imshow(raw_img, cmap=plt.cm.gray); axs[1].set_title('sarcomeres -- frame %i, %i found'%(frame_num,sarc_x.shape[0]))
axs[1].plot(sarc_y,sarc_x,'r*',markersize=3)
axs[1].set_xticks([]); axs[1].set_yticks([])
plt.savefig(out_analysis + '/visualize_segmentation_%04d'%(frame_num))
if include_eps:
plt.savefig(out_analysis + '/visualize_segmentation_%04d.eps'%(frame_num))
return
##########################################################################################
def get_frame_matrix(folder_name, frame):
"""Get the npy matrix for a frame of the movie."""
if frame < 10: file_root = '_matrices/frame-000%i'%(frame)
elif frame < 100: file_root = '_matrices/frame-00%i'%(frame)
else: file_root = '_matrices/frame-0%i'%(frame)
root = 'ALL_MOVIES_MATRICES/' + folder_name + file_root + '.npy'
raw_img = np.load(root)
return raw_img
##########################################################################################
def visualize_contract_anim_movie(folder_name,re_run_timeseries=False, use_re_run_timeseries=False, keep_thresh=0.75,include_eps=False,single_frame=False):
"""Visualize the results of tracking."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
if single_frame:
num_frames = 1
if use_re_run_timeseries:
tag_vis = 'for_plotting_'
if re_run_timeseries:
ts.timeseries_all(folder_name, keep_thresh, True)
else:
tag_vis = ''
plot_info_frames_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/' + tag_vis + 'plotting_all_frames.pkl'
ALL_frames_above_thresh = pickle.load( open( plot_info_frames_fname , "rb" ) )
plot_info_x_pos_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/' + tag_vis + 'plotting_all_x.pkl'
ALL_x_pos_above_thresh = pickle.load( open( plot_info_x_pos_fname , "rb" ) )
plot_info_y_pos_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/' + tag_vis + 'plotting_all_y.pkl'
ALL_y_pos_above_thresh = pickle.load( open( plot_info_y_pos_fname , "rb" ) )
sarc_data_normalized_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/' + tag_vis + 'tracking_results_leng.txt'
all_normalized = np.loadtxt(sarc_data_normalized_fname)
if use_re_run_timeseries:
out_plots = out_analysis + '/for_plotting_contract_anim'
else:
out_plots = out_analysis + '/contract_anim'
if not os.path.exists(out_plots): os.makedirs(out_plots)
# --> plot every frame, plot every sarcomere according to normalized fraction length
color_matrix = np.zeros(all_normalized.shape)
for kk in range(0,all_normalized.shape[0]):
for jj in range(0,all_normalized.shape[1]):
of = all_normalized[kk,jj]
if of < -.2: color_matrix[kk,jj] = 0
elif of > .2: color_matrix[kk,jj] = 1
else: color_matrix[kk,jj] = of*2.5 + .5
img_list = []
for t in range(0,num_frames):
if t < 10: file_root = '/frame-000%i'%(t)
elif t < 100: file_root = '/frame-00%i'%(t)
else: file_root = '/frame-0%i'%(t)
img = get_frame_matrix(folder_name,t)
plt.figure()
plt.imshow(img, cmap=plt.cm.gray)
for kk in range(0,all_normalized.shape[0]):
if t in ALL_frames_above_thresh[kk]:
ix = np.argwhere(np.asarray(ALL_frames_above_thresh[kk]) == t)[0][0]
col = (1-color_matrix[kk,t], 0 , color_matrix[kk,t])
yy = ALL_y_pos_above_thresh[kk][ix]
xx = ALL_x_pos_above_thresh[kk][ix]
plt.scatter(yy,xx,s=15,color=col,marker='o')
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([])
plt.savefig(out_plots + '/' + file_root + '_length')
if include_eps:
plt.savefig(out_plots + '/' + file_root + '_length.eps')
plt.close()
img_list.append(imageio.imread(out_plots + '/' + file_root + '_length.png'))
if num_frames > 1:
imageio.mimsave(out_plots + '/contract_anim.gif', img_list)
# clip = mp.VideoFileClip(out_plots + '/contract_anim.gif')
# clip.write_videofile( 'Kehan_Tracked_Movies/' + folder_name + '.mp4') # put all movies in one folder
return
##########################################################################################
# plot the spatial graph
##########################################################################################
##########################################################################################
def visualize_spatial_graph(folder_name,include_eps=False):
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
######################################################################################
out_graph = 'ALL_MOVIES_PROCESSED' + '/' + folder_name + '/graph'
with open(out_graph + '/graph.pkl', 'rb') as f: G = pickle.load(f)
with open(out_graph + '/pos.pkl', 'rb') as f: pos = pickle.load(f)
# plot spatial graph
G2 = nx.Graph()
nodes_list = list(G.nodes())
for kk in range(0,len(nodes_list)): G2.add_node(nodes_list[kk])
edges_list = list(G.edges())
orient_list = []
for kk in range(0,len(edges_list)):
# define the angle of the edge
node_1_ix = edges_list[kk][0]
node_2_ix = edges_list[kk][1]
x1 = pos[node_1_ix][0]
x2 = pos[node_2_ix][0]
y1 = pos[node_1_ix][1]
y2 = pos[node_2_ix][1]
rad = ((x1-x2)**2.0 + (y1-y2)**2.0)**0.5
x_val = (x2-x1)/rad
y_val = (y2-y1)/rad
ang = np.abs(np.dot([1,0],[x_val,y_val]))
orient_list.append(ang)
G2.add_edge(node_1_ix,node_2_ix,weight=ang)
# for each node, determine local alignment --
node_val_list = []
for kk in range(0,len(nodes_list)):
ix = nodes_list[kk]
ed_li = list(G.edges(ix))
val = 0
num = 0
for jj in range(0,len(ed_li)):
for ii in range(jj+1,len(ed_li)):
node_1a_ix = ed_li[jj][0]
node_1b_ix = ed_li[jj][1]
node_2a_ix = ed_li[ii][0]
node_2b_ix = ed_li[ii][1]
x1a = pos[node_1a_ix][0]
x1b = pos[node_1b_ix][0]
y1a = pos[node_1a_ix][1]
y1b = pos[node_1b_ix][1]
x2a = pos[node_2a_ix][0]
x2b = pos[node_2b_ix][0]
y2a = pos[node_2a_ix][1]
y2b = pos[node_2b_ix][1]
rad1 = ((x1a-x1b)**2.0 + (y1a-y1b)**2.0)**0.5
rad2 = ((x2a-x2b)**2.0 + (y2a-y2b)**2.0)**0.5
vec1 = [(x1a-x1b)/rad1,(y1a-y1b)/rad1]
vec2 = [(x2a-x2b)/rad2,(y2a-y2b)/rad2]
val += np.abs(np.dot( vec1 , vec2 ))
num += 1
if num > 0:
node_val_list.append(val/num)
else:
node_val_list.append(0)
plt.figure(figsize=(5,5))
edges,weights = zip(*nx.get_edge_attributes(G2,'weight').items())
nx.draw(G2,pos,node_color='k',node_size=10, width=2, edge_color=weights, edge_cmap = plt.cm.rainbow)
x_list = []; y_list = []
mi = np.min(node_val_list); ma = np.max(node_val_list)
for kk in range(0,len(nodes_list)):
ix = nodes_list[kk]
x = pos[ix][0]
y = pos[ix][1]
val = 1 - ((node_val_list[kk] - mi) /(ma - mi)*0.75 + 0.25)
if node_val_list[kk] > .9:
plt.plot(x,y,'.',color=(val,val,val),ms=10)
if node_val_list[kk] > .75:
plt.plot(x,y,'.',color=(val,val,val),ms=7.5)
else:
plt.plot(x,y,'.',color=(val,val,val),ms=5)
######################################################################################
plt.savefig(out_analysis + '/' + folder_name + '_spatial_graph')
if include_eps:
plt.savefig(out_analysis + '/' + folder_name + '_spatial_graph.eps')
plt.close()
return
##########################################################################################
# time series plots and analysis
##########################################################################################
##########################################################################################
def DTWDistance(s1, s2):
"""Compute distance based on dynamic time warping (DTW)"""
DTW={}
for i in range(len(s1)):
DTW[(i, -1)] = float('inf')
for i in range(len(s2)):
DTW[(-1, i)] = float('inf')
DTW[(-1, -1)] = 0
for i in range(len(s1)):
for j in range(len(s2)):
dist= (s1[i]-s2[j])**2
DTW[(i, j)] = dist + min(DTW[(i-1, j)],DTW[(i, j-1)], DTW[(i-1, j-1)])
return np.sqrt(DTW[len(s1)-1, len(s2)-1])
##########################################################################################
def cluster_timeseries_plot_dendrogram(folder_name,compute_dist_DTW,compute_dist_euclidean=False):
"""Cluster timeseries and plot a dendrogram that shows the clustering."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
if compute_dist_DTW == False and compute_dist_euclidean == False: load_dist_DTW = True
else: load_dist_DTW = False
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
sarc_data_normalized_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_frames.txt'
arr_frames = np.loadtxt(sarc_data_normalized_fname)
sarc_data_normalized_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_leng.txt'
arr_leng = np.loadtxt(sarc_data_normalized_fname)
X = arr_leng
if compute_dist_DTW:
num_sarc = X.shape[0]
dist_mat = np.zeros((num_sarc,num_sarc))
for kk in range(0,num_sarc):
for jj in range(kk+1,num_sarc):
dist_mat[kk,jj] = DTWDistance(X[kk,:],X[jj,:])
np.savetxt( 'ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dist_mat_DTW.txt',dist_mat)
dist_mat = dist_mat + dist_mat.T
elif load_dist_DTW:
dist_mat = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dist_mat_DTW.txt')
dist_mat = dist_mat + dist_mat.T
elif compute_dist_euclidean:
Y = pdist(X, 'euclidean')
dist_mat = squareform(Y)
dist_v = squareform(dist_mat)
Z = linkage(dist_v , method='ward', metric='euclidean')
ll = leaves_list(Z)
# --> plot dendrogram
plt.figure(figsize=(9,30),frameon=False)
plt.subplot(1,2,1)
# dendrogram
dn1 = dendrogram(Z,orientation='left',color_threshold=0, above_threshold_color='k') #,truncate_mode='lastp')
ordered = dn1['leaves'] #from bottom to top
if compute_dist_DTW or load_dist_DTW:
np.savetxt('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dendrogram_order_DTW.txt',np.asarray(ordered))
else:
np.savetxt('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dendrogram_order_euc.txt',np.asarray(ordered))
ax = plt.gca()
ax.xaxis.set_visible(False)
plt.subplot(1,2,2)
ax = plt.gca()
for kk in range(0,len(ordered)):
ix = ordered[kk]
col = (1-kk/len(ordered), kk/len(ordered) , 1- kk/len(ordered))
plt.plot(X[ix,:] + kk*.3,c=col)
plt.tight_layout()
plt.ylim((-.4,kk*.3+.35))
plt.axis('off')
if compute_dist_DTW or load_dist_DTW:
plt.savefig('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dendrogram_DTW.pdf')
else:
plt.savefig('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dendrogram_euclidean.pdf')
return
##########################################################################################
def plot_normalized_tracked_timeseries(folder_name,include_eps=False):
"""Create a plot of the normalized tracked time series."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
sarc_data_normalized_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_leng.txt'
all_normalized = np.loadtxt(sarc_data_normalized_fname)
plt.figure()
plt.plot(all_normalized.T,linewidth=.25)
plt.plot(np.median(all_normalized.T,axis=1),'k-',linewidth=3,label='median curve')
plt.plot(np.mean(all_normalized.T,axis=1),'--',color=(.5,.5,.5),linewidth=3,label='mean curve')
plt.legend()
plt.legend()
plt.xlabel('frame')
plt.ylabel('normalized length')
plt.title('timeseries data, tracked and normalized, %i sarcomeres'%(all_normalized.shape[0]))
plt.ylim((-.1,.1))
plt.legend
plt.tight_layout()
plt.savefig('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/timeseries_tracked_normalized')
if include_eps:
plt.savefig('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/timeseries_tracked_normalized.eps')
return
##########################################################################################
def plot_untracked_absolute_timeseries(folder_name,include_eps=False):
"""Create a plot of the un-tracked absolute sarcomere lengths."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
ALL_PIX_LEN = []; med = []; ix = []; num_sarc = []
for frame in range(0,num_frames):
if frame < 10: file_root = '/frame-000%i'%(frame)
elif frame < 100: file_root = '/frame-00%i'%(frame)
else: file_root = '/frame-0%i'%(frame)
fname = external_folder_name + '/' + folder_name + '/segmented_sarc/' + file_root + '_sarc_data.txt'
data = np.loadtxt(fname)
pix_len = data[:,4]
ALL_PIX_LEN.append(pix_len)
med.append(np.median(pix_len))
ix.append(frame+1)
num_sarc.append(len(pix_len))
# --> create a violin plot of everything
plt.figure(figsize=(12,6))
plt.subplot(3,1,1)
ax = plt.gca()
ax.violinplot(ALL_PIX_LEN)
plt.plot(ix,med,'ro',label='median')
plt.legend()
plt.xlabel('frame')
plt.ylabel('sarc len in pixels')
plt.title(folder_name + ' absolute sarcomere length untracked')
plt.subplot(3,1,2)
plt.plot(ix,med,'k-')
plt.plot(ix,med,'ro',label='median')
plt.legend()
plt.xlabel('frame')
plt.ylabel('sarc len in pixels')
plt.subplot(3,1,3)
plt.plot(ix,num_sarc,'k-')
plt.plot(ix,num_sarc,'go')
plt.xlabel('frame')
plt.ylabel('# sarc segmented')
plt.savefig( external_folder_name + '/' + folder_name + '/analysis/absolute_sarc_length_untracked')
if include_eps:
plt.savefig( external_folder_name + '/' + folder_name + '/analysis/absolute_sarc_length_untracked.eps')
return
##########################################################################################
def compute_timeseries_individual_parameters(folder_name,include_eps=False):
"""Compute and save timeseries time constants (contraction time, relaxation time, flat time, period, offset, etc.)."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
input_distance = 10; input_width = 5 # <-- might need to adjust?
sarc_data_normalized_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_frames.txt'
arr_frames = np.loadtxt(sarc_data_normalized_fname)
sarc_data_normalized_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_leng.txt'
arr_leng = np.loadtxt(sarc_data_normalized_fname)
sarc_data_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_leng_NOT_NORMALIZED.txt'
arr_leng_not_normalized = np.loadtxt(sarc_data_fname)
pix_leng_median = []; pix_leng_mean = []; pix_leng_min = []; pix_leng_max = []; perc_sarc_short = []
fra_mean_contract_time = []; fra_mean_relax_time = []; fra_mean_flat_time = []; fra_mean_period = []; fra_to_first = []
idx_sarc = []; num_peak_all = []
for zz in range(0,arr_frames.shape[0]):
idx_sarc.append(zz)
x = arr_frames[zz,:]
data_pixels = arr_leng_not_normalized[zz,:]
data = arr_leng[zz,:]
data_med = signal.medfilt(data,5) # optional median filter
deriv = np.gradient(data,x)
# go through and group into category by derivative
count_C = 0; count_R = 0; count_F = 0
thresh_flat = 0.005*(np.max(data_med) - np.min(data_med))/0.2
for kk in range(0,x.shape[0]):
if deriv[kk] > thresh_flat: count_R += 1
elif deriv[kk] < -1.0*thresh_flat: count_C += 1
else: count_F += 1
# detect peaks and valleys
th = .00; di = input_distance; wi = input_width # parameters
# distance Required minimal horizontal distance (>= 1) in samples between neighbouring peaks. Smaller peaks are removed first until the condition is fulfilled for all remaining peaks.
#widthnumber or ndarray or sequence, optional Required width of peaks in samples. Either a number, None, an array matching x or a 2-element sequence of the former. The first element is always interpreted as the minimal and the second, if supplied, as the maximal required width.
peaks_U, _ = find_peaks(data_med,threshold=th,distance=di,width=wi)
peaks_L, _ = find_peaks(-1.0*data_med,threshold=th,distance=di,width=wi)
#num_peaks = 0.5 * peaks_U.shape[0] + 0.5 * peaks_L.shape[0]
#num_peaks = peaks_L.shape[0]
num_peaks = 0
for kk in range(0,peaks_L.shape[0]):
if data_med[peaks_L[kk]] < np.mean(data_med) - thresh_flat:
num_peaks += 1
if num_peaks == 0: num_peaks = 999999
mean_C = count_C / num_peaks
mean_R = count_R / num_peaks
mean_F = count_F / num_peaks
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# save everything #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
pix_leng_median.append(np.median(data_pixels))
pix_leng_mean.append(np.mean(data_pixels))
mi = np.min(data_pixels); pix_leng_min.append(mi)
ma = np.max(data_pixels); pix_leng_max.append(ma)
perc_sarc_short.append( (ma - mi)/(ma) * 100 )
fra_mean_contract_time.append(mean_C)
fra_mean_relax_time.append(mean_R)
fra_mean_flat_time.append(mean_F)
fra_mean_period.append(x.shape[0] / num_peaks)
if peaks_L.shape[0] > 0:
fra_to_first.append(peaks_L[0])
else:
fra_to_first.append(0)
num_peak_all.append(num_peaks)
### --> plot parameters
plt.figure(figsize=(7,7))
plt.subplot(2,2,1)
plt.hist(fra_mean_contract_time)
plt.plot([np.median(fra_mean_contract_time),np.median(fra_mean_contract_time)],[0,10],'r--')
plt.xlabel('frames')
plt.title('median_contract: %.2f'%(np.median(fra_mean_contract_time)))
plt.tight_layout()
plt.subplot(2,2,2)
plt.hist(fra_mean_relax_time)
plt.plot([np.median(fra_mean_relax_time),np.median(fra_mean_relax_time)],[0,10],'r--')
plt.xlabel('frames')
plt.title('median_relax: %.2f'%(np.median(fra_mean_relax_time)))
plt.tight_layout()
plt.subplot(2,2,3)
plt.hist(fra_mean_flat_time)
plt.plot([np.median(fra_mean_flat_time),np.median(fra_mean_flat_time)],[0,10],'r--')
plt.xlabel('frames')
plt.title('median_flat: %.2f'%(np.median(fra_mean_flat_time)))
plt.tight_layout()
plt.subplot(2,2,4)
plt.hist(fra_mean_period)
plt.plot([np.median(fra_mean_period),np.median(fra_mean_period)],[0,10],'r--')
plt.xlabel('frames')
plt.title('median_period: %.2f'%(np.median(fra_mean_period)))
plt.tight_layout()
plt.savefig(out_analysis + '/histogram_time_constants')
if include_eps:
plt.savefig(out_analysis + '/histogram_time_constants.eps')
num_sarc = len(idx_sarc)
arr = np.zeros((num_sarc,12))
arr[:,0] = np.asarray(idx_sarc)
arr[:,1] = np.asarray(pix_leng_median)
arr[:,2] = np.asarray(pix_leng_mean)
arr[:,3] = np.asarray(pix_leng_min)
arr[:,4] = np.asarray(pix_leng_max)
arr[:,5] = np.asarray(perc_sarc_short)
arr[:,6] = np.asarray(fra_mean_contract_time)
arr[:,7] = np.asarray(fra_mean_relax_time)
arr[:,8] = np.asarray(fra_mean_flat_time)
arr[:,9] = np.asarray(fra_mean_period)
arr[:,10] = np.asarray(fra_to_first)
arr[:,11] = np.asarray(num_peak_all)
np.savetxt(out_analysis + '/timeseries_parameters_info.txt', arr)
# --> save as excel spreadsheet
writer = pd.ExcelWriter(out_analysis + '/timeseries_parameters_info.xlsx', engine='xlsxwriter')
all_col = ['idx', 'pix_leng_median', 'pix_leng_mean', 'pix_leng_min', 'pix_leng_max', 'perc_sarc_short', 'frames_mean_contract', 'frames_mean_relax', 'frames_mean_flat', 'frames_mean_period', 'frames_to_first', 'num_peaks']
df = pd.DataFrame(np.asarray(arr), columns=all_col)
df.to_excel(writer, sheet_name='summary_stats')
arr = arr_leng
df2 = pd.DataFrame(np.asarray(arr))
df2.to_excel(writer, sheet_name='full_time_series', columns = arr_frames[0,:])
writer.save()
return
##########################################################################################
def sample(mu_track,num_track,vals_all):
"""Sample mu from the total population -- match #tracked."""
num_run = 1000
mu_samp = []
for jj in range(0,num_run):
ix = []
for kk in range(0,num_track):
ix.append(random.randint(0,len(vals_all)-1))
samp = vals_all[ix]
mu_samp.append(mu_track - np.mean(samp))
return mu_samp
##########################################################################################
def compute_mu_ang(ang_list):
"""Compute the mean of an angle."""
x_total = 0
y_total = 0
for kk in range(0,len(ang_list)):
ang = ang_list[kk]
x_total += np.cos(ang)
y_total += np.sin(ang)
x_mean = x_total / len(ang_list)
y_mean = y_total / len(ang_list)
ang = np.arctan2(y_mean, x_mean)
r = np.sqrt(x_mean**2.0 + y_mean**2.0)
return ang, r
##########################################################################################
def sample_ang(mu_track_ang, mu_track_r,num_track,vals_all):
"""Sample angle from the total population -- match #tracked."""
num_run = 1000
mu_samp_ang = []
mu_samp_r = []
for jj in range(0,num_run):
ix = []
for kk in range(0,num_track):
ix.append(random.randint(0,len(vals_all)-1))
samp = vals_all[ix]
ang, r = compute_mu_ang(samp)
mu_samp_ang.append(mu_track_ang - ang)
mu_samp_r.append(mu_track_r - r)
return mu_samp_ang, mu_samp_r
##########################################################################################
def compare_tracked_untracked(folder_name,include_eps=False):
"""Compare the tracked and untracked populations by random sampling the untracked population."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
ALL_PIX_LEN = []; ALL_PIX_WID = []; ALL_PIX_ANG = []
med = []; ix = []; num_sarc = []
for frame in range(0,num_frames):
if frame < 10: file_root = '/frame-000%i'%(frame)
elif frame < 100: file_root = '/frame-00%i'%(frame)
else: file_root = '/frame-0%i'%(frame)
fname = external_folder_name + '/' + folder_name + '/segmented_sarc/' + file_root + '_sarc_data.txt'
data = np.loadtxt(fname)
pix_len = data[:,4]; pix_wid = data[:,5]; pix_ang = data[:,6]
ALL_PIX_LEN.append(pix_len); ALL_PIX_WID.append(pix_wid); ALL_PIX_ANG.append(pix_ang)
med.append(np.median(pix_len)); ix.append(frame+1); num_sarc.append(len(pix_len))
# --> import data
sarc_data_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_leng_NOT_NORMALIZED.txt'
tracked_leng = np.loadtxt(sarc_data_fname)
sarc_data_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_wid.txt'
tracked_wid = np.loadtxt(sarc_data_fname)
sarc_data_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_ang.txt'
tracked_ang = np.loadtxt(sarc_data_fname)
# --> compute the mean number NOT tracked
num_not = 0
for kk in range(0,len(ALL_PIX_LEN)): num_not += len(ALL_PIX_LEN[kk])
num_not = num_not / len(ALL_PIX_LEN); num_tracked = tracked_leng.shape[0]
# --> sample the length from the tracked population
mu_samp_ALL_len = []
for frame_num in range(0,num_frames):
len_all = ALL_PIX_LEN[frame_num]
len_tracked = list(tracked_leng[:,frame_num])
mu_track = np.mean(len_tracked)
num_track = len(len_tracked)
vals_all = len_all
mu_samp = sample(mu_track, num_track, vals_all)
mu_samp_ALL_len.append(mu_samp)
plt.figure(figsize=(25,5))
plt.boxplot(mu_samp_ALL_len)
plt.plot([0,num_frames],[-.5,-.5],'k--')
plt.plot([0,num_frames],[.5,.5],'k--')
plt.title('comparison of length in pixels, approx %i untracked, %i tracked'%(num_not,num_tracked))
plt.xlabel('frame number')
plt.ylabel(r'$\mu_{track}-\mu_{all}$')
plt.savefig(out_analysis + '/length_compare_box_plots')
if include_eps:
plt.savefig(out_analysis + '/length_compare_box_plots.eps')
# --> sample the width from the tracked population
mu_samp_ALL_wid = []
for frame_num in range(0,num_frames):
wid_all = ALL_PIX_WID[frame_num]
wid_tracked = list(tracked_wid[:,frame_num])
mu_track = np.mean(wid_tracked)
num_track = len(wid_tracked)
vals_all = wid_all
mu_samp = sample(mu_track, num_track, vals_all)
mu_samp_ALL_wid.append(mu_samp)
plt.figure(figsize=(25,5))
plt.boxplot(mu_samp_ALL_wid)
plt.plot([0,num_frames],[-.5,-.5],'k--')
plt.plot([0,num_frames],[.5,.5],'k--')
plt.title('comparison of width in pixels, approx %i untracked, %i tracked'%(num_not,num_tracked))
plt.xlabel('frame number')
plt.ylabel(r'$\mu_{track}-\mu_{all}$')
plt.savefig(out_analysis + '/width_compare_box_plots')
if include_eps:
plt.savefig(out_analysis + '/width_compare_box_plots.eps')
# --> sample the angle from the tracked population
mu_samp_ALL_ang = []; mu_samp_ALL_rad = []
for frame_num in range(0,num_frames):
ang_all = ALL_PIX_ANG[frame_num]
ang_tracked = list(tracked_ang[:,frame_num])
mu_track_ang, mu_track_r = compute_mu_ang(ang_tracked)
num_track = len(ang_tracked)
vals_all = ang_all
mu_samp_ang, mu_samp_r = sample_ang(mu_track_ang, mu_track_r,num_track,vals_all)
mu_samp_ALL_ang.append(mu_samp_ang)
mu_samp_ALL_rad.append(mu_samp_r)
plt.figure(figsize=(25,10))
plt.subplot(2,1,1)
plt.boxplot(mu_samp_ALL_ang)
plt.plot([0,num_frames],[-1*np.pi/8,-1*np.pi/8],'k--')
plt.plot([0,num_frames],[np.pi/8,np.pi/8],'k--')
plt.title('comparison of angle in radians, approx %i untracked, %i tracked'%(num_not,num_tracked))
plt.xlabel('frame number')
plt.ylabel(r'$\mu_{track}-\mu_{all}$')
plt.subplot(2,1,2)
plt.boxplot(mu_samp_ALL_rad)
plt.plot([0,num_frames],[0,0],'r--',label='uniform')
plt.plot([0,num_frames],[1,1],'k--',label='oriented')
plt.title('comparison of angle radius in pixels, approx %i untracked, %i tracked'%(num_not,num_tracked))
plt.xlabel('frame number')
plt.ylabel(r'$\mu_{track}-\mu_{all}$')
plt.legend()
plt.savefig(out_analysis + '/angle_compare_box_plots')
if include_eps:
plt.savefig(out_analysis + '/angle_compare_box_plots.eps')
return
##########################################################################################
##########################################################################################
# compute time series correlations -- on graph distance and euclidean distance
##########################################################################################
##########################################################################################
def compute_cross_correlation(sig1, sig2):
"""Compute the normalized cross correlation between two signals."""
sig1_norm = (sig1 - np.mean(sig1)) / (np.std(sig1) * sig1.shape[0])
sig2_norm = (sig2 - np.mean(sig2)) / (np.std(sig2))
val = np.correlate(sig1_norm,sig2_norm)
return val
##########################################################################################
def dist_val2(subgraphs,node_1,node_2):
"""Compute the network distance between two nodes."""
for sg in subgraphs:
node_1_in = sg.has_node(node_1)
node_2_in = sg.has_node(node_2)
if node_1_in and node_2_in:
dist = nx.shortest_path_length(sg,source=node_1,target=node_2)
return dist
return 99999
##########################################################################################
def get_euclid_dist_from_avg_pos(x_vec_1,y_vec_1,x_vec_2,y_vec_2):
"""Return the average euclidian distance between two sarcomeres."""
dist_vec = (( x_vec_1 - x_vec_2 )**2.0 + ( y_vec_1 - y_vec_2 )**2.0)**(1.0/2.0)
return np.mean(dist_vec)
##########################################################################################
def preliminary_spatial_temporal_correlation_info(folder_name,compute_network_distances=True,include_eps=False):
"""Perform a preliminary analysis of spatial/temporal correlation."""
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# --> import sarcomere
sarc_data_normalized_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_frames.txt'
arr_frames = np.loadtxt(sarc_data_normalized_fname)
sarc_data_normalized_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_leng.txt'
arr_leng = np.loadtxt(sarc_data_normalized_fname)
sarc_data_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_leng_NOT_NORMALIZED.txt'
arr_leng_not_normalized = np.loadtxt(sarc_data_fname)
# --> import raw image
raw_img = np.load('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/frame-0000.npy')
# --> import graph
out_graph = external_folder_name + '/' + folder_name + '/graph'
with open(out_graph + '/graph.pkl', 'rb') as f: G = pickle.load(f)
out_graph = folder_name + '/graph/basic_graph.png'
graph = plt.imread(external_folder_name + '/' + folder_name + '/graph/basic_graph.png')
# --> import sarcomere info
sarc_data_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/tracking_results/tracking_results_sarcomeres.txt'
sarc_data = np.loadtxt(sarc_data_fname)
# --> import sarcomere position info
sarc_x_pos_data_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_x_pos.txt'
sarc_x_pos_data = np.loadtxt(sarc_x_pos_data_fname )
sarc_y_pos_data_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_y_pos.txt'
sarc_y_pos_data = np.loadtxt(sarc_y_pos_data_fname )
# --> import z-disc data
zdisc_data_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/tracking_results/tracking_results_zdisks.txt'
zdisc_data = np.loadtxt(zdisc_data_fname)
particle = zdisc_data[:,2]
# --> import index information
sarc_idx_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_sarc_idx_above_thresh.txt'
sarc_idx = np.loadtxt(sarc_idx_fname)
all_frames = sarc_data[:,0]; all_particles = sarc_data[:,2]
all_z_1 = sarc_data[:,5]; all_z_2 = sarc_data[:,6]
unique_particles = np.unique(all_particles).astype('int')
organized_data_z1 = np.zeros((unique_particles.shape[0],num_frames))
organized_data_z2 = np.zeros((unique_particles.shape[0],num_frames))
for kk in range(0,sarc_data.shape[0]):
part = int(all_particles[kk])
frame = int(all_frames[kk])
idx_in_frame = np.where(zdisc_data[:,0] == frame)
disc_data = zdisc_data[idx_in_frame[0],:]
part_idx = np.argmin(np.abs(unique_particles - part))
ZLID1 = int(all_z_1[kk])
ZLID2 = int(all_z_2[kk])
orig_disc_idx = disc_data[:,1].astype(int)
check = np.where(orig_disc_idx == ZLID1)[0]
if check.shape[0] == 0:
continue
else:
ZGID1_idx = check[0]
ZGID1 = int(disc_data[ZGID1_idx,2])
check = np.where(orig_disc_idx == ZLID2)[0]
if check.shape[0] == 0:
continue
else:
ZGID2_idx = check[0]
ZGID2 = int(disc_data[ZGID2_idx,2])
organized_data_z1[part_idx,frame] = ZGID1
organized_data_z2[part_idx,frame] = ZGID2
# --> for each sarcomere identify which z discs it belongs to
Z_disc_1 = []; Z_disc_2 = []
for kk in range(0,sarc_idx.shape[0]):
idx = int(sarc_idx[kk])
z_idx_1 = organized_data_z1[idx,:]
if np.sum(z_idx_1) == 0:
z_idx_1 = z_idx_1
else:
z_idx_1 = z_idx_1[z_idx_1>0]
z_idx_2 = organized_data_z2[idx,:]
if np.sum(z_idx_2) == 0:
z_idx_2 = z_idx_2
else:
z_idx_2 = z_idx_2[z_idx_2>0]
Z_disc_1.append(int(scipy.stats.mode(z_idx_1)[0][0]))
Z_disc_2.append(int(scipy.stats.mode(z_idx_2)[0][0]))
# get graph distances and correlation scores
graph_dist_all = []; corr_score_all = []; euclid_dist_all = []
if compute_network_distances:
for jj in range(0,sarc_idx.shape[0]):
for kk in range(jj+1,sarc_idx.shape[0]):
jj_idx = [Z_disc_1[jj], Z_disc_2[jj]]
kk_idx = [Z_disc_1[kk], Z_disc_2[kk]]
dist_all_combos = []
for j in jj_idx:
for k in kk_idx:
subgraphs = (G.subgraph(c).copy() for c in nx.connected_components(G))
dist = dist_val2(subgraphs,j,k)
dist_all_combos.append(dist)
sig1 = arr_leng[jj,:]
sig2 = arr_leng[kk,:]
corr_score = compute_cross_correlation(sig1, sig2)
corr_score_all.append(corr_score)
graph_dist_all.append( np.min(dist_all_combos) )
x_vec_1 = sarc_x_pos_data[jj,:]; y_vec_1 = sarc_y_pos_data[jj,:]
x_vec_2 = sarc_x_pos_data[kk,:]; y_vec_2 = sarc_y_pos_data[kk,:]
euclid_dist = get_euclid_dist_from_avg_pos(x_vec_1,y_vec_1,x_vec_2,y_vec_2)
euclid_dist_all.append(euclid_dist)
np.savetxt(out_analysis + '/graph_dist_all.txt',np.asarray(graph_dist_all))
np.savetxt(out_analysis + '/euclid_dist_all.txt',np.asarray(euclid_dist_all))
np.savetxt(out_analysis + '/corr_score_all.txt',np.asarray(corr_score_all))
else:
graph_dist_all = np.loadtxt(out_analysis + '/graph_dist_all.txt')
euclid_dist_all = np.loadtxt(out_analysis + '/euclid_dist_all.txt')
corr_score_all = np.loadtxt(out_analysis + '/corr_score_all.txt')
graph_dist_all = np.asarray(graph_dist_all).astype('int')
euclid_dist_all = np.asarray(euclid_dist_all)
corr_score_all = np.asarray(corr_score_all)
########## --> make plot
plt.figure(figsize=(30,4))
# raw image
plt.subplot(1,5,1)
plt.imshow(raw_img)
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([])
plt.title(folder_name + ' raw image')
plt.tight_layout()
# graph
plt.subplot(1,5,2)
plt.imshow(graph)
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([])
plt.title(folder_name + ' graph')
plt.tight_layout()
# histogram
plt.subplot(1,5,3)
n, bins, patches = plt.hist(corr_score_all,range=(-1,1),rwidth=.8,color=(.5,.5,.5))
plt.xlim((-1.1,1.1))
plt.xlabel('normalized cross-correlation')
plt.title('timeseries comparison')
ma = np.max(n)
plt.plot([0,0],[0,ma],'g--',label='no correlation')
plt.plot([np.median(corr_score_all),np.median(corr_score_all)],[0,ma],'b-',label='median: %.2f'%(np.median(corr_score_all)))
plt.legend()
plt.tight_layout()
# euclidean
plt.subplot(1,5,4)
x_coord = []; y_coord = []; num_in_bin = []
for kk in range(0,5):
ix_1 = euclid_dist_all > kk*20
ix_2 = euclid_dist_all < (kk +1)*20
ix = []
for jj in range(0,np.asarray(euclid_dist_all).shape[0]):
if ix_1[jj] == True and ix_2[jj] == True:
ix.append(jj)
x_coord.append(kk*20 + 5)
me = np.mean(corr_score_all[ix])
num_in_bin.append(len(corr_score_all[ix]))
y_coord.append(me)
plt.plot(x_coord,y_coord,'.',color=(1.0,.5,.5),markersize=20,label='binned means')
maxi = np.max(x_coord)
plt.plot([0,maxi],[0,0],'g--',label='no correlation')
mean_all = np.mean(corr_score_all)
plt.plot([0,maxi],[mean_all,mean_all],'b-',label='mean all: %.2f'%(mean_all))
plt.xlabel('timeseries comparison wrt euclidian distance (pixels)')
plt.ylabel('normalized cross-correlation')
plt.grid(True)
plt.title('timeseries comparison wrt distance')
plt.legend()
plt.ylim((-1.05,1.05))
plt.tight_layout()
# network
plt.subplot(1,5,5)
dist_bins = []
for kk in range(0,5): dist_bins.append(kk)
x_coord = []; y_coord = []; num_in_bin = [ ]
for di in dist_bins:
ix = graph_dist_all == int(di)
corr_score = corr_score_all[ix]
if corr_score.shape[0] > 3:
x_coord.append(di)
y_coord.append(np.mean(corr_score))
num_in_bin.append(len(corr_score))
ix = graph_dist_all < 9999
corr_score = corr_score_all[ix]
mean_connected = np.mean(corr_score)
mean_all = np.mean(corr_score_all)
plt.plot(x_coord,y_coord,'.',color=(1.0,.5,.5),markersize=20,label='binned means')
maxi = np.max(dist_bins)
plt.plot([0,maxi],[mean_connected, mean_connected],'r--',label='mean connected: %.2f'%(mean_connected))
plt.plot([0,maxi],[0,0],'g--',label='no correlation')
plt.plot([0,maxi],[mean_all,mean_all],'b-',label='mean all: %.2f'%(mean_all))
plt.legend(loc=4)
plt.xlabel('distance along network')
plt.ylabel('normalized cross-correlation')
plt.grid(True)
plt.title('timeseries comparison wrt network distance')
plt.ylim((-1.05,1.05))
plt.tight_layout()
plt.savefig(out_analysis + '/preliminary_spatial_analysis')
if include_eps:
plt.savefig(out_analysis + '/preliminary_spatial_analysis.eps')
return
##########################################################################################
# compute F
##########################################################################################
##########################################################################################
def compute_F_whole_movie(folder_name,include_eps=False):
"""Compute and return the average deformation gradient for the whole movie."""
# set up folders
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# compute Lambda from x_pos and y_pos
x_pos = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_x_pos.txt')
y_pos = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_y_pos.txt')
num_sarc = x_pos.shape[0]
num_time = x_pos.shape[1]
num_vec = int((num_sarc * num_sarc - num_sarc) / 2.0)
Lambda_list = []
for tt in range(0,num_time):
Lambda = np.zeros((2,num_vec))
ix = 0
for kk in range(0,num_sarc):
for jj in range(kk+1,num_sarc):
x_vec = x_pos[kk,tt] - x_pos[jj,tt]
y_vec = y_pos[kk,tt] - y_pos[jj,tt]
Lambda[0,ix] = x_vec
Lambda[1,ix] = y_vec
ix += 1
Lambda_list.append(Lambda)
F_list = []; F11_list = []; F22_list = []; F12_list = []; F21_list = []
J_list = []
for tt in range(0,num_time):
Lambda_0 = Lambda_list[0]
Lambda_t = Lambda_list[tt]
term_1 = np.dot( Lambda_t , np.transpose(Lambda_0) )
term_2 = np.linalg.inv( np.dot( Lambda_0 , np.transpose(Lambda_0) ) )
F = np.dot(term_1 , term_2)
F_vec = [F[0,0],F[0,1],F[1,0],F[1,1]]
F_list.append(F_vec)
F11_list.append(F[0,0] - 1.0)
F22_list.append(F[1,1] - 1.0)
F12_list.append(F[0,1])
F21_list.append(F[1,0])
J_list.append(F[0,0]*F[1,1] - F[0,1]*F[1,0])
np.savetxt(out_analysis + '/recovered_F.txt',np.asarray(F_list))
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.plot(F11_list,'r--',linewidth=5, label='F11 recovered')
plt.plot(F22_list,'g--',linewidth=4, label='F22 recovered')
plt.plot(F12_list,'c:',label='F12 recovered')
plt.plot(F21_list,'b:',label='F21 recovered')
plt.legend()
plt.title('recovered deformation gradient')
plt.xlabel('frames');
plt.subplot(1,2,2)
plt.plot(J_list,'k-',label='Jacobian')
plt.xlabel('frames');
plt.legend()
plt.title('det of deformation gradient')
plt.savefig(out_analysis + '/recovered_F_plot')
if include_eps:
plt.savefig(out_analysis + '/recovered_F_plot.eps')
return
##########################################################################################
def adjust_F_if_movie_starts_not_contracted(folder_name,include_eps=False):
"""Adjust and return the average deformation gradient for the whole movie -- useful if first frame is not the relaxed state."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
F_list = np.loadtxt(out_analysis + '/recovered_F.txt')
J_list = [] #F_vec = [F[0,0],F[0,1],F[1,0],F[1,1]]
for kk in range(0,F_list.shape[0]):
F00 = F_list[kk,0]; F01 = F_list[kk,1]; F10 = F_list[kk,2]; F11 = F_list[kk,3]
J_list.append(F00*F11 - F01*F10)
arg_max = np.argmax(J_list)
# compute Lambda from x_pos and y_pos
x_pos = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_x_pos.txt')
y_pos = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_y_pos.txt')
num_sarc = x_pos.shape[0]
num_time = x_pos.shape[1]
num_vec = int((num_sarc * num_sarc - num_sarc) / 2.0)
Lambda_list = []
for tt in range(0,num_time):
Lambda = np.zeros((2,num_vec))
ix = 0
for kk in range(0,num_sarc):
for jj in range(kk+1,num_sarc):
x_vec = x_pos[kk,tt] - x_pos[jj,tt]
y_vec = y_pos[kk,tt] - y_pos[jj,tt]
Lambda[0,ix] = x_vec
Lambda[1,ix] = y_vec
ix += 1
Lambda_list.append(Lambda)
F_list = []; F11_list = []; F22_list = []; F12_list = []; F21_list = []
J_list = []
for tt in range(0,num_time):
Lambda_0 = Lambda_list[arg_max]
Lambda_t = Lambda_list[tt]
term_1 = np.dot( Lambda_t , np.transpose(Lambda_0) )
term_2 = np.linalg.inv( np.dot( Lambda_0 , np.transpose(Lambda_0) ) )
F = np.dot(term_1 , term_2)
F_vec = [F[0,0],F[0,1],F[1,0],F[1,1]]
F_list.append(F_vec)
F11_list.append(F[0,0] - 1.0)
F22_list.append(F[1,1] - 1.0)
F12_list.append(F[0,1])
F21_list.append(F[1,0])
J_list.append(F[0,0]*F[1,1] - F[0,1]*F[1,0])
np.savetxt(out_analysis + '/recovered_F.txt',np.asarray(F_list))
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.plot(F11_list,'r--',linewidth=5, label='F11 recovered')
plt.plot(F22_list,'g--',linewidth=4, label='F22 recovered')
plt.plot(F12_list,'c:',label='F12 recovered')
plt.plot(F21_list,'b:',label='F21 recovered')
plt.legend()
plt.title('recovered deformation gradient')
plt.xlabel('frames');
plt.subplot(1,2,2)
plt.plot(J_list,'k-',label='Jacobian')
plt.xlabel('frames');
plt.legend()
plt.title('det of deformation gradient')
plt.savefig(out_analysis + '/recovered_F_plot')
if include_eps:
plt.savefig(out_analysis + '/recovered_F_plot.eps')
return
##########################################################################################
def analyze_J_full_movie(folder_name,include_eps=False):
"""Analyze the Jacobian -- report timeseries parmeters. Must first run compute_F_whole_movie()."""
# set up folders
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# import the deformation gradient.
F_list = np.loadtxt(out_analysis + '/recovered_F.txt')
num_frames = F_list.shape[0]; x = []
J_list = []
for kk in range(0,num_frames):
F00 = F_list[kk,0]; F01 = F_list[kk,1]; F10 = F_list[kk,2]; F11 = F_list[kk,3]
J_list.append(F00*F11 - F01*F10)
x.append(kk)
J_list = np.asarray(J_list)
x = np.asarray(x)
# compute the parameters of the timeseries
plt.figure(figsize=(4,4))
plt.plot(J_list,'k-')
data = J_list
data_med = signal.medfilt(data,5)
deriv = np.gradient(data,x)
count_C = 0; count_R = 0; count_F = 0
thresh_flat = 0.01*(np.max(J_list) - np.min(J_list))
pix_leng_median = []; pix_leng_mean = []; pix_leng_min = []; pix_leng_max = []; perc_sarc_short = []
fra_mean_contract_time = []; fra_mean_relax_time = []; fra_mean_flat_time = []; fra_mean_period = []; fra_to_first = []
idx_sarc = []; num_peak_all = []
for kk in range(0,x.shape[0]):
if deriv[kk] > thresh_flat:
count_R += 1
plt.plot(x[kk],J_list[kk],'o',color=(.5,.5,.5))
elif deriv[kk] < -1.0*thresh_flat:
count_C += 1
plt.plot(x[kk],J_list[kk],'o',color=(.5,0,0))
else:
count_F += 1
plt.plot(x[kk],J_list[kk],'o',color=(0,0,.5))
# detect peaks and valleys
input_distance = 10; input_width = 5
th = .00; di = input_distance; wi = input_width # parameters
peaks_U, _ = find_peaks(data_med,threshold=th,distance=di,width=wi)
peaks_L, _ = find_peaks(-1.0*data_med,threshold=th,distance=di,width=wi)
#num_peaks = 0.5 * peaks_U.shape[0] + 0.5 * peaks_L.shape[0]
num_peaks = peaks_L.shape[0]
if num_peaks == 0: num_peaks = 999999
mean_C = count_C / num_peaks
mean_R = count_R / num_peaks
mean_F = count_F / num_peaks
plt.grid()
#plt.plot(x[peaks_U],data[peaks_U],'rx',markersize=10)
plt.plot(x[peaks_L],data[peaks_L],'rx',markersize=13)
plt.title('frames contract: %i, relax: %i, flat: %i'%(count_C,count_R,count_F))
plt.xlabel('frame number')
plt.ylabel('determinate of average F')
plt.tight_layout()
plt.savefig(out_analysis + '/recovered_F_plot_timeseries')
if include_eps:
plt.savefig(out_analysis + '/recovered_F_plot_timeseries.eps')
return
##########################################################################################
def visualize_F_full_movie(folder_name,include_eps=False):
"""Visualize the eigenvalues of F -- plot timeseries next to the movie. Must first run compute_F_whole_movie()."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis/F_movie'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# import the deformation gradient.
F_list = np.loadtxt(external_folder_name + '/' + folder_name + '/analysis/recovered_F.txt')
num_frames = F_list.shape[0]; x = []
J_list = []
R_list = []
U_list = []
F_list_mat = []
lambda_1_list = []; vec_1_list = []
lambda_2_list = []; vec_2_list = []
th_list = []
for kk in range(0,num_frames):
F00 = F_list[kk,0]; F01 = F_list[kk,1]; F10 = F_list[kk,2]; F11 = F_list[kk,3]
J_list.append(F00*F11 - F01*F10)
x.append(kk)
R, U = polar(np.asarray([[F00,F01],[F10,F11]]))
R_list.append(R); U_list.append(U); F_list_mat.append(np.asarray([[F00,F01],[F10,F11]]))
w, v = LA.eig(U)
lambda_1_list.append(np.min(w)); lambda_2_list.append(np.max(w))
v = np.dot(R, v)
vec_1_list.append(v[:,np.argmin(w)]); vec_2_list.append(v[:,np.argmax(w)])
th_list.append(np.arccos(v[0,0]))
J_list = np.asarray(J_list)
x = np.asarray(x)
J_min = np.min(J_list)
img_list = []
# --> plot
for kk in range(0,num_frames):
raw_img = get_frame_matrix(folder_name, kk)
x_pos_mean = raw_img.shape[0]/2.0; y_pos_mean = raw_img.shape[1]/2.0
plt.figure(figsize=(10*.7,5*.7))
plt.subplot(1,2,1)
plt.imshow(raw_img, cmap=plt.cm.gray)
rad = .2*np.min([raw_img.shape[0],raw_img.shape[1]]); th = np.linspace(0,2.0*np.pi,100)
plt.plot([y_pos_mean-rad*vec_1_list[kk][1],y_pos_mean+rad*vec_1_list[kk][1]],[x_pos_mean-rad*vec_1_list[kk][0],x_pos_mean+rad*vec_1_list[kk][0]],'-',color=(255/255,204/255,203/255),linewidth=0.3)
plt.plot([y_pos_mean-rad*vec_2_list[kk][1],y_pos_mean+rad*vec_2_list[kk][1]],[x_pos_mean-rad*vec_2_list[kk][0],x_pos_mean+rad*vec_2_list[kk][0]],'-',color=(0.5,0.5,0.5),linewidth=0.3)
#plt.plot([y_pos_mean,y_pos_mean],[x_pos_mean-rad,x_pos_mean+rad],'-',color=(255/255,204/255,203/255),linewidth=0.2)
# add in eigenvector directions
x_vec = []; y_vec = [] ; x_vec_circ = []; y_vec_circ = []
scale = np.asarray([[.9,0],[0,.9]])
for jj in range(0,100):
v = np.asarray([rad*np.cos(th[jj]),rad*np.sin(th[jj])])
#v_def = np.dot(np.dot(F_list_mat[jj],scale),v)
nest1 = np.dot(F_list_mat[kk],F_list_mat[kk])
nest2 = np.dot(F_list_mat[kk],nest1)
nest3 = np.dot(F_list_mat[kk],nest2)
nest4 = np.dot(F_list_mat[kk],nest3)
nest5 = np.dot(F_list_mat[kk],nest4)
nest6 = np.dot(F_list_mat[kk],nest5)
nest7 = np.dot(F_list_mat[kk],nest6)
nest8 = np.dot(F_list_mat[kk],nest7)
v_def = np.dot(nest8,v)
x_vec.append(v_def[0] + x_pos_mean); y_vec.append(v_def[1] + y_pos_mean)
x_vec_circ.append(x_pos_mean + v[0]); y_vec_circ.append(y_pos_mean + v[1])
plt.plot(y_vec_circ,x_vec_circ,'-',color=(255/255,204/255,203/255),linewidth=0.3)
plt.plot(y_vec,x_vec,'-',color=(255/255,204/255,203/255),linewidth=1.0)
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([]);
plt.subplot(1,2,2)
plt.plot(x,lambda_1_list,'-',color='k',linewidth=1,label='λ1')
plt.plot(x,lambda_2_list,'-',color=(0.5,0.5,0.5),linewidth=1,label='λ2')
plt.plot(x[kk],lambda_1_list[kk],'o',mfc=(.7,0,0),mec=(0,0,0),markersize=7)
plt.plot(x[kk],lambda_2_list[kk],'o',mfc=(.7,0,0),mec=(0.5,0.5,0.5),markersize=7)
plt.xlabel('frame number')
plt.legend()
plt.tight_layout()
plt.savefig(out_analysis + '/frame_%04d'%(kk))
if include_eps:
plt.savefig(out_analysis + '/frame_%i.eps'%(kk))
plt.close()
img_list.append(imageio.imread(out_analysis + '/frame_%04d.png'%(kk)))
imageio.mimsave(out_analysis + '/F_anim.gif', img_list)
return
##########################################################################################
def save_lambda_from_F(folder_name,include_eps=False):
"""Visualize the eigenvalues of F -- plot timeseries next to the movie. Must first run compute_F_whole_movie()."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# import the deformation gradient.
F_list = np.loadtxt(external_folder_name + '/' + folder_name + '/analysis/recovered_F.txt')
num_frames = F_list.shape[0]; x = []
J_list = []
R_list = []
U_list = []
F_list_mat = []
lambda_1_list = []; vec_1_list = []
lambda_2_list = []; vec_2_list = []
th_list = []
for kk in range(0,num_frames):
F00 = F_list[kk,0]; F01 = F_list[kk,1]; F10 = F_list[kk,2]; F11 = F_list[kk,3]
J_list.append(F00*F11 - F01*F10)
x.append(kk)
R, U = polar(np.asarray([[F00,F01],[F10,F11]]))
R_list.append(R); U_list.append(U); F_list_mat.append(np.asarray([[F00,F01],[F10,F11]]))
w, v = LA.eig(U)
lambda_1_list.append(np.min(w)); lambda_2_list.append(np.max(w))
v = np.dot(R, v)
vec_1_list.append(v[:,np.argmin(w)]); vec_2_list.append(v[:,
|
np.argmax(w)
|
numpy.argmax
|
from mcot.core._scripts.surface import gradient
import numpy as np
from scipy import sparse, spatial
from numpy import testing
from mcot.core.surface.test_data import triangle_mesh, mesh_to_cortex
def test_histogram_intersection():
first = np.array([1., 0])
second = np.array([0., 1])
balanced =
|
np.array([1., 1])
|
numpy.array
|
import argparse
import os, sys
# Supress matplotlib display
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from sklearn import neighbors
from sklearn.externals import joblib
import munk
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-ms', '--munk_scores', required=True)
parser.add_argument('-rs', '--resnik_scores', required=True)
parser.add_argument('-o', '--output', required=True)
parser.add_argument('-lw', '--line_width', type=float, required=False, default=2)
parser.add_argument('-fs', '--font_size', type=float, required=False, default=12)
return parser.parse_args()
def main(args):
munk_data = joblib.load(args.munk_scores)
homologs = munk_data['homologs']
landmarks = munk_data['landmarks']
A_nodes = munk_data['A_nodes']
B_nodes = munk_data['B_nodes']
munk_scores_raw = munk_data['X']
A_n2i = dict((n, i) for i, n in enumerate(A_nodes))
B_n2i = dict((n, i) for i, n in enumerate(B_nodes))
# Get Resnik scores
resnik_data = np.load(args.resnik_scores)
resnik_A_n2i = resnik_data['leftIndex'][()]
resnik_B_n2i = resnik_data['rightIndex'][()]
resnik_scores_raw = resnik_data['Rscore']
print('# A genes with scores:', len(set(A_nodes) & set(resnik_A_n2i.keys())))
print('# B genes with scores:', len(set(B_nodes) & set(resnik_B_n2i.keys())))
A_landmarks, B_landmarks = [set(ls) for ls in zip(*landmarks)]
A_scored_nodes = sorted((set(A_nodes) & set(resnik_A_n2i.keys())) - A_landmarks)
B_scored_nodes = sorted((set(B_nodes) & set(resnik_B_n2i.keys())) - B_landmarks)
print('# scored pairs:', len(A_scored_nodes) * len(B_scored_nodes))
munk_scores = []
resnik_scores = []
r_A_idxs = [resnik_A_n2i[node] for node in A_scored_nodes]
r_B_idxs = [resnik_B_n2i[node] for node in B_scored_nodes]
h_A_idxs = [A_n2i[node] for node in A_scored_nodes]
h_B_idxs = [B_n2i[node] for node in B_scored_nodes]
for r_A, h_A in zip(r_A_idxs, h_A_idxs):
for r_B, h_B in zip(r_B_idxs, h_B_idxs):
munk_scores.append(munk_scores_raw[h_A, h_B])
resnik_scores.append(resnik_scores_raw[r_A, r_B])
munk_scores = np.asarray(munk_scores)
resnik_scores = np.asarray(resnik_scores)
sort_idxs = np.argsort(munk_scores)[::-1]
n_scores = len(sort_idxs)
rand_idxs = np.random.permutation(n_scores)
resnik_scores = np.take(resnik_scores, sort_idxs)
rand_resnik_scores = np.take(resnik_scores, rand_idxs)
#smooth over bins
binsize = 100000
binned_scores = n_scores - (n_scores % binsize)
resnik_scores = np.nanmean(resnik_scores[:binned_scores].reshape((-1,binsize)), axis=1)
rand_resnik_scores = np.nanmean(rand_resnik_scores[:binned_scores].reshape((-1,binsize)), axis=1)
n_bins = len(resnik_scores)
print('# bins', n_bins)
plt.figure()
plt.plot(np.arange(n_bins), rand_resnik_scores,
label='Ranked randomly', lw = args.line_width)
plt.plot(
|
np.arange(n_bins)
|
numpy.arange
|
import taichi as ti
import numpy as np
from billiard_game_single_ball import rectify_positions_and_velocities
EPS = 1e-5
def normalize_vector(vector):
# Corresponding module: normalize, need sequential code
length = np.sqrt((vector ** 2).sum())
if np.isclose(length, 0.):
return vector, length
else:
return vector / length, length
def calc_next_pos_and_velocity(pos_wc, velocity_wc, delta_t, drag_coefficient, g):
# Corresponding verilog module: calc_next_p_and_v, the friction_coeff in calc_next_p_and_v should be drag_coefficient * g
speed = np.sqrt((velocity_wc ** 2).sum()) # can use normalize_vector instead
if np.isclose(speed, 0.0):
next_pos_wc = pos_wc
velocity_wc_next = np.zeros_like(velocity_wc)
else:
v_dir = velocity_wc / speed
drag_force = drag_coefficient * g * v_dir
velocity_wc_next = velocity_wc - delta_t * drag_force
avg_velocity = (velocity_wc_next + velocity_wc) / 2.
displacement = avg_velocity * delta_t
next_pos_wc = pos_wc + displacement
return next_pos_wc, velocity_wc_next
def two_ball_collides(ball1_pos, ball2_pos, radius):
# Corresponding verilog module: ball_collision_detect
diff = ball1_pos - ball2_pos
return (diff ** 2).sum() < (2 * radius) ** 2
def rectify_positions_in_collision(ball1_pos, ball2_pos, radius):
"""
Avoid multiple fake collision when both ball have overlap before collision and low speed after collision
"""
# Corresponding module: rectify_p_in_collision
collide_direction, length = normalize_vector(ball1_pos - ball2_pos)
diff = 2 * radius - length + EPS
rectified_ball1_pos = ball1_pos + diff / 2. * collide_direction
rectified_ball2_pos = ball2_pos - diff / 2. * collide_direction
return rectified_ball1_pos, rectified_ball2_pos
def calc_after_collision_velocity(ball1_pos, ball2_pos, ball1_velocity, ball2_velocity):
# Corresponding module: calc_after_collision_v
# Notice: Not tested in Python
# position and velocity of local frame origin w.r.t world coordinate
local_frame_origin_wc = ball1_pos
local_frame_velocity_wc = ball1_velocity
# lc for local coordinate
ball2_v_lc = ball2_velocity - local_frame_velocity_wc
ball2_pos_lc = ball2_pos - local_frame_origin_wc
ball1_v_lc =
|
np.zeros_like(ball1_velocity)
|
numpy.zeros_like
|
import numpy as np
from uncertainties import ufloat
from scipy.stats import sem
import uncertainties.unumpy as unp
u, tt = np.genfromtxt('python/daten/totzeit.txt', unpack=True)
tt = tt*10**(-6)
ttmean = np.mean(tt)
ttsem = sem(tt)
ttm = ufloat(ttmean, ttsem)
print('ttm', ttm)
tt = tt/u
ttmean =
|
np.mean(tt)
|
numpy.mean
|
# -*- coding: utf-8 -*-
"""
Interface to Faster R-CNN object proposals.
"""
import logging
import utool as ut
import vtool as vt
from os.path import abspath, dirname, expanduser, join, exists # NOQA
import numpy as np
import sys
import cv2
(print, rrr, profile) = ut.inject2(__name__, '[faster r-cnn]')
logger = logging.getLogger('wbia')
# SCRIPT_PATH = abspath(dirname(__file__))
SCRIPT_PATH = abspath(expanduser(join('~', 'code', 'py-faster-rcnn')))
if not ut.get_argflag('--no-faster-rcnn'):
try:
assert exists(SCRIPT_PATH)
def add_path(path):
# if path not in sys.path:
sys.path.insert(0, path)
# Add pycaffe to PYTHONPATH
pycaffe_path = join(SCRIPT_PATH, 'caffe-fast-rcnn', 'python')
add_path(pycaffe_path)
# Add caffe lib path to PYTHONPATH
lib_path = join(SCRIPT_PATH, 'lib')
add_path(lib_path)
import caffe
ut.reload_module(caffe)
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
# from fast_rcnn.nms_wrapper import nms
except AssertionError:
logger.info(
'WARNING Failed to find py-faster-rcnn. ' 'Faster R-CNN is unavailable'
)
# if ut.SUPER_STRICT:
# raise
except ImportError:
logger.info('WARNING Failed to import fast_rcnn. ' 'Faster R-CNN is unavailable')
# if ut.SUPER_STRICT:
# raise
VERBOSE_SS = ut.get_argflag('--verbdss') or ut.VERBOSE
CONFIG_URL_DICT = {
# 'pretrained-fast-vgg-pascal' : 'https://wildbookiarepository.azureedge.net/models/pretrained.fastrcnn.vgg16.pascal.prototxt', # Trained on PASCAL VOC 2007
'pretrained-vgg-pascal': 'https://wildbookiarepository.azureedge.net/models/pretrained.fasterrcnn.vgg16.pascal.prototxt', # Trained on PASCAL VOC 2007
'pretrained-zf-pascal': 'https://wildbookiarepository.azureedge.net/models/pretrained.fasterrcnn.zf.pascal.prototxt', # Trained on PASCAL VOC 2007
'pretrained-vgg-ilsvrc': 'https://wildbookiarepository.azureedge.net/models/pretrained.fasterrcnn.vgg16.ilsvrc.prototxt', # Trained on ILSVRC 2014
'pretrained-zf-ilsvrc': 'https://wildbookiarepository.azureedge.net/models/pretrained.fasterrcnn.zf.ilsvrc.prototxt', # Trained on ILSVRC 2014
'default': 'https://wildbookiarepository.azureedge.net/models/pretrained.fasterrcnn.vgg16.pascal.prototxt', # Trained on PASCAL VOC 2007
None: 'https://wildbookiarepository.azureedge.net/models/pretrained.fasterrcnn.vgg16.pascal.prototxt', # Trained on PASCAL VOC 2007
}
def _parse_weight_from_cfg(url):
return url.replace('.prototxt', '.caffemodel')
def _parse_classes_from_cfg(url):
return url.replace('.prototxt', '.classes')
def _parse_class_list(classes_filepath):
# Load classes from file into the class list
assert exists(classes_filepath)
class_list = []
with open(classes_filepath) as classes:
for line in classes.readlines():
line = line.strip()
if len(line) > 0:
class_list.append(line)
return class_list
def detect_gid_list(ibs, gid_list, downsample=True, verbose=VERBOSE_SS, **kwargs):
"""
Args:
gid_list (list of int): the list of IBEIS image_rowids that need detection
downsample (bool, optional): a flag to indicate if the original image
sizes should be used; defaults to True
True: ibs.get_image_detectpaths() is used
False: ibs.get_image_paths() is used
Kwargs (optional): refer to the Faster R-CNN documentation for configuration settings
Args:
ibs (wbia.IBEISController): image analysis api
gid_list (list of int): the list of IBEIS image_rowids that need detection
downsample (bool, optional): a flag to indicate if the original image
sizes should be used; defaults to True
Kwargs:
detector, config_filepath, weights_filepath, verbose
Yields:
tuple: (gid, gpath, result_list)
CommandLine:
python -m wbia.algo.detect.fasterrcnn detect_gid_list --show
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.detect.fasterrcnn import * # NOQA
>>> from wbia.core_images import LocalizerConfig
>>> import wbia
>>> ibs = wbia.opendb('testdb1')
>>> gid_list = ibs.get_valid_gids()
>>> config = {'verbose': True}
>>> downsample = False
>>> results_list = detect_gid_list(ibs, gid_list, downsample, **config)
>>> results_list = list(results_list)
>>> print('result lens = %r' % (map(len, list(results_list))))
>>> print('result[0] = %r' % (len(list(results_list[0][2]))))
>>> config = {'verbose': True}
>>> downsample = False
>>> results_list = detect_gid_list(ibs, gid_list, downsample, **config)
>>> results_list = list(results_list)
>>> print('result lens = %r' % (map(len, list(results_list))))
>>> print('result[0] = %r' % (len(list(results_list[0][2]))))
>>> ut.quit_if_noshow()
>>> import wbia.plottool as pt
>>> ut.show_if_requested()
Yields:
results (list of dict)
"""
# Get new gpaths if downsampling
if downsample:
gpath_list = ibs.get_image_detectpaths(gid_list)
neww_list = [vt.open_image_size(gpath)[0] for gpath in gpath_list]
oldw_list = [oldw for (oldw, oldh) in ibs.get_image_sizes(gid_list)]
downsample_list = [oldw / neww for oldw, neww in zip(oldw_list, neww_list)]
orient_list = [1] * len(gid_list)
else:
gpath_list = ibs.get_image_paths(gid_list)
downsample_list = [None] * len(gpath_list)
orient_list = ibs.get_image_orientation(gid_list)
# Run detection
results_iter = detect(gpath_list, verbose=verbose, **kwargs)
# Upscale the results
_iter = zip(downsample_list, gid_list, orient_list, results_iter)
for downsample, gid, orient, (gpath, result_list) in _iter:
# Upscale the results back up to the original image size
for result in result_list:
if downsample is not None and downsample != 1.0:
for key in ['xtl', 'ytl', 'width', 'height']:
result[key] = int(result[key] * downsample)
bbox = (
result['xtl'],
result['ytl'],
result['width'],
result['height'],
)
bbox_list = [bbox]
bbox = bbox_list[0]
result['xtl'], result['ytl'], result['width'], result['height'] = bbox
yield (gid, gpath, result_list)
def detect(
gpath_list,
config_filepath,
weight_filepath,
class_filepath,
sensitivity,
verbose=VERBOSE_SS,
use_gpu=True,
use_gpu_id=0,
**kwargs,
):
"""
Args:
gpath_list (list of str): the list of image paths that need proposal candidates
Kwargs (optional): refer to the Faster R-CNN documentation for configuration settings
Returns:
iter
"""
cfg.TEST.HAS_RPN = True # Use RPN for proposals
# Get correct config if specified with shorthand
config_url = None
if config_filepath in CONFIG_URL_DICT:
config_url = CONFIG_URL_DICT[config_filepath]
config_filepath = ut.grab_file_url(config_url, appname='wbia', check_hash=True)
# Get correct weights if specified with shorthand
if weight_filepath in CONFIG_URL_DICT:
if weight_filepath is None and config_url is not None:
config_url_ = config_url
else:
config_url_ = CONFIG_URL_DICT[weight_filepath]
weight_url = _parse_weight_from_cfg(config_url_)
weight_filepath = ut.grab_file_url(weight_url, appname='wbia', check_hash=True)
if class_filepath is None:
class_url = _parse_classes_from_cfg(config_url)
class_filepath = ut.grab_file_url(
class_url, appname='wbia', check_hash=True, verbose=verbose
)
class_list = _parse_class_list(class_filepath)
# Need to convert unicode strings to Python strings to support Boost Python
# call signatures in caffe
prototxt_filepath = str(config_filepath) # alias to Caffe nomenclature
caffemodel_filepath = str(weight_filepath) # alias to Caffe nomenclature
assert exists(prototxt_filepath), 'Specified prototxt file not found'
assert exists(caffemodel_filepath), 'Specified caffemodel file not found'
if use_gpu:
caffe.set_mode_gpu()
caffe.set_device(use_gpu_id)
cfg.GPU_ID = use_gpu_id
else:
caffe.set_mode_cpu()
net = caffe.Net(prototxt_filepath, caffemodel_filepath, caffe.TEST)
# Warm-up network on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in range(2):
_, _ = im_detect(net, im)
results_list_ = []
for gpath in gpath_list:
image = cv2.imread(gpath)
score_list, bbox_list = im_detect(net, image)
# Compile results
result_list_ = []
for class_index, class_name in enumerate(class_list[1:]):
class_index += 1 # because we skipped background
class_boxes = bbox_list[:, 4 * class_index : 4 * (class_index + 1)]
class_scores = score_list[:, class_index]
dets_list = np.hstack((class_boxes, class_scores[:, np.newaxis]))
dets_list = dets_list.astype(np.float32)
# # Perform NMS
# keep_list = nms(dets_list, nms_sensitivity)
# dets_list = dets_list[keep_list, :]
# Perform sensitivity check
keep_list = np.where(dets_list[:, -1] >= sensitivity)[0]
dets_list = dets_list[keep_list, :]
for (xtl, ytl, xbr, ybr, conf) in dets_list:
xtl = int(
|
np.around(xtl)
|
numpy.around
|
import random
import numpy as np
import math
import networkx as nx
class Person:
"""The person class stores information about a person in the sim."""
counter = 1
def __init__ (self, mother, father, birthYear, age, sex, house, sec, cr, pcr, wage, inc, wlt, iw, fw, we, status, independence):
self.mother = mother
self.motherID = -1 # For pickle
self.father = father
self.fatherID = -1 # For pickle
self.age = age
self.status = status
self.lifeExpectancy = 0
self.independentStatus = independence
self.maternityStatus = False
self.children = []
self.childrenID = [] # For pickle
self.yearMarried = []
self.yearDivorced = []
self.deadYear = 0
self.yearInTown = 0
self.outOfTownStudent = False
self.birthdate = birthYear
self.wage = wage
self.income = inc
self.lastIncome = inc
self.workingPeriods = 0
self.cumulativeIncome = 0
self.potentialIncome = inc
self.wealth = wlt
self.financialWealth = 0
self.wealthSpentOnCare = 0
self.incomeExpenses = 0
self.wealthPV = 0
self.wealthForCare = 0
self.initialIncome = iw
self.finalIncome = fw
self.workExperience = we
self.careNeedLevel = 0
self.socialWork = 0
# Unmet Need variables
self.careNetwork = nx.DiGraph()
self.careDemand = 0
self.unmetCareNeed = 0
self.cumulativeUnmetNeed = 0
self.totalDiscountedShareUnmetNeed = 0
self.averageShareUnmetNeed = 0
self.totalDiscountedTime = 0
self.classRank = cr
self.parentsClassRank = pcr
self.dead = False
self.partner = None
if sex == 'random':
self.sex = random.choice(['male', 'female'])
else:
self.sex = sex
self.house = house
self.houseID = -1 # For pickle
self.sec = sec
self.careAvailable = 0
self.residualSupply = 0
self.movedThisYear = False
# Kinship network variables
self.hoursChildCareDemand = 0
self.netChildCareDemand = 0
self.unmetChildCareNeed = 0
self.hoursSocialCareDemand = 0
self.unmetSocialCareNeed = 0
self.informalChildCareReceived = 0
self.formalChildCareReceived = 0
self.publicChildCareContribution = 0
self.informalSocialCareReceived = 0
self.formalSocialCareReceived = 0
self.childWork = 0
self.socialWork = 0
self.outOfWorkChildCare = 0
self.outOfWorkSocialCare = 0
self.residualWorkingHours = 0
self.availableWorkingHours = 0
self.residualInformalSupplies = [0.0, 0.0, 0.0, 0.0]
self.residualInformalSupply = 0
self.hoursInformalSupplies = [0.0, 0.0, 0.0, 0.0]
self.maxFormalCareSupply = 0
self.totalSupply = 0
self.informalSupplyByKinship = [0.0, 0.0, 0.0, 0.0]
self.formalSupplyByKinship = [0.0, 0.0, 0.0, 0.0]
self.careForFamily = False
self.networkSupply = 0
self.networkTotalSupplies = []
self.weightedTotalSupplies = []
self.networkInformalSupplies = []
self.networkFormalSocialCareSupplies = []
self.careSupplyFromWealth = 0
self.suppliers = []
self.id = Person.counter
Person.counter += 1
class Population:
"""The population class stores a collection of persons."""
def __init__ (self, initial, startYear, minStartAge, maxStartAge,
workingAge, incomeInitialLevels, incomeFinalLevels,
incomeGrowthRate, workDiscountingTime, wageVar, weeklyHours):
self.allPeople = []
self.livingPeople = []
for i in range(int(initial)/2):
ageMale = random.randint(minStartAge, maxStartAge)
ageFemale = ageMale - random.randint(-2,5)
if ( ageFemale < 24 ):
ageFemale = 24
birthYear = startYear - random.randint(minStartAge,maxStartAge)
classes = [0, 1, 2, 3, 4]
probClasses = [0.2, 0.35, 0.25, 0.15, 0.05]
classRank = np.random.choice(classes, p = probClasses)
workingTime = 0
for i in range(int(ageMale)-int(workingAge[classRank])):
workingTime *= workDiscountingTime
workingTime += 1
dKi = np.random.normal(0, wageVar)
initialWage = incomeInitialLevels[classRank]*math.exp(dKi)
dKf = np.random.normal(dKi, wageVar)
finalWage = incomeFinalLevels[classRank]*math.exp(dKf)
c =
|
np.math.log(initialWage/finalWage)
|
numpy.math.log
|
"""
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.utils.misc import NumpyRNGContext
from .pure_python_weighted_npairs_per_object_xy import pure_python_weighted_npairs_per_object_xy
from ..weighted_npairs_per_object_xy import weighted_npairs_per_object_xy
from ...tests.cf_helpers import generate_3d_regular_mesh
from ...tests.cf_helpers import generate_thin_cylindrical_shell_of_points
__all__ = ('test_weighted_npairs_per_object_xy_brute_force_pbc', )
fixed_seed = 43
def test_weighted_npairs_per_object_xy_brute_force_pbc():
"""
"""
npts1, npts2 = 500, 111
with NumpyRNGContext(fixed_seed):
data1 = np.random.random((npts1, 2))
data2 = np.random.random((npts2, 2))
w2 = np.random.rand(npts2)
rp_bins = np.array((0.01, 0.1, 0.2, 0.3))
xperiod, yperiod = 1, 1
xarr1, yarr1 = data1[:, 0], data1[:, 1]
xarr2, yarr2 = data2[:, 0], data2[:, 1]
counts, python_weighted_counts = pure_python_weighted_npairs_per_object_xy(
xarr1, yarr1, xarr2, yarr2, w2, rp_bins, xperiod, yperiod)
cython_weighted_counts = weighted_npairs_per_object_xy(data1, data2, w2, rp_bins, period=1)
assert np.allclose(cython_weighted_counts, python_weighted_counts)
# Verify the PBC enforcement is non-trivial
cython_weighted_counts = weighted_npairs_per_object_xy(data1, data2, w2, rp_bins)
assert not np.allclose(cython_weighted_counts, python_weighted_counts)
def test_weighted_npairs_per_object_xy_brute_force_no_pbc():
"""
"""
npts1, npts2 = 500, 111
with NumpyRNGContext(fixed_seed):
data1 = np.random.random((npts1, 2))
data2 = np.random.random((npts2, 2))
w2 = np.random.rand(npts2)
rp_bins = np.array((0.01, 0.1, 0.2, 0.3))
xperiod, yperiod = np.inf, np.inf
xarr1, yarr1 = data1[:, 0], data1[:, 1]
xarr2, yarr2 = data2[:, 0], data2[:, 1]
counts, python_weighted_counts = pure_python_weighted_npairs_per_object_xy(
xarr1, yarr1, xarr2, yarr2, w2, rp_bins, xperiod, yperiod)
cython_weighted_counts = weighted_npairs_per_object_xy(data1, data2, w2, rp_bins)
assert np.allclose(cython_weighted_counts, python_weighted_counts)
# Verify the PBC enforcement is non-trivial
cython_weighted_counts = weighted_npairs_per_object_xy(data1, data2, w2, rp_bins, period=1)
assert not np.allclose(cython_weighted_counts, python_weighted_counts)
def test_regular_grid1():
""" For ``sample1`` a regular grid and ``sample2`` a tightly locus of points
in the immediate vicinity of a grid node, verify that the returned counts
are correct with scalar inputs for proj_search_radius and cylinder_half_length
"""
period = 1
num_pts_per_dim = 5
centers = generate_3d_regular_mesh(num_pts_per_dim)
num_cyl = centers.shape[0]
num_ptcl = 100
particles = generate_thin_cylindrical_shell_of_points(num_ptcl, 0.01, 0.1,
xc=0.101, yc=0.101, zc=0.101, seed=fixed_seed)
masses = np.logspace(2, 5, particles.shape[0])
rp_bins =
|
np.array((0.005, 0.02))
|
numpy.array
|
#
# Module to handle SOWFA boundary data that belong in
# casedir/constant/boundaryData
#
# Written by <NAME> (<EMAIL>)
#
import os
import numpy as np
import matplotlib.pyplot as plt
from windtools.io.series import TimeSeries
contour_colormap = 'RdBu_r' # more soothing blues and reds
pointsheader = """/*--------------------------------*- C++ -*----------------------------------*\\
| ========= | |
| \\\\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\\\ / O peration | Version: 2.4.x |
| \\\\ / A nd | Web: www.OpenFOAM.org |
| \\\\/ M anipulation | |
\\*---------------------------------------------------------------------------*/
FoamFile
{{
version 2.0;
format {fmt:s};
class vectorField;
location "constant/boundaryData/{patchName:s}";
object points;
}}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
{N:d}
("""
dataheader = """/*--------------------------------*- C++ -*----------------------------------*\\
| ========= | |
| \\\\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\\\ / O peration | Version: 2.4.x |
| \\\\ / A nd | Web: www.OpenFOAM.org |
| \\\\/ M anipulation | |
\\*---------------------------------------------------------------------------*/
FoamFile
{{
version 2.0;
format {fmt:s};
class {patchType:s}AverageField;
location "constant/boundaryData/{patchName:s}/{timeName:s}";
object values;
}}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Average
{avgValue:s}
{N:d}
("""
def write_points(fname,x,y,z,patchName='patch',
fmt='%f',order='C'):
"""Write out a points file which should be stored in
constant/boundaryData/patchName/points
"""
N = len(x)
assert(N == len(y) == len(z))
if len(x.shape) > 1:
x = x.ravel(order=order)
y = y.ravel(order=order)
z = z.ravel(order=order)
N = len(x)
dpath = os.path.split(fname)[0]
if not os.path.isdir(dpath):
os.makedirs(dpath)
fmtstr = '({:s} {:s} {:s})'.format(fmt,fmt,fmt)
np.savetxt(fname,
np.stack((x,y,z)).T, fmt=fmtstr,
header=pointsheader.format(patchName=patchName,N=N,fmt='ascii'),
footer=')',
comments='')
def write_data(fname,
data,
patchName='patch',
timeName=0,
avgValue=None):
"""Write out a boundaryData file which should be stored in
constant/boundarydata/patchName/timeName/fieldName
Parameters
----------
fname : str
Output data file name
data : numpy.ndarray
Field data to be written out, with shape (3,N) for vectors
and shape (N) for scalars; 2-D or 3-D data should be flattened
beforehand.
patchName : str, optional
Name of the boundary patch
timeName : scalar or str, optional
Name of corresponding time directory
avgValue : scalar or list-like, optional
To set avgValue in the data file; probably not used.
@author: ewquon
"""
dims = data.shape
N = dims[-1]
if len(dims) == 1:
patchType = 'scalar'
if avgValue is None:
avgValueStr = '0'
else:
avgValueStr = str(avgValue)
elif len(dims) == 2:
patchType = 'vector'
assert(dims[0] == 3)
if avgValue is None:
avgValueStr = '(0 0 0)'
else:
avgValueStr = '(' + ' '.join([str(val) for val in list(avgValue)]) + ')'
else:
print('ERROR: Unexpected number of dimensions! No data written.')
return
dpath = os.path.split(fname)[0]
if not os.path.isdir(dpath):
os.makedirs(dpath)
headerstr = dataheader.format(patchType=patchType,
patchName=patchName,
timeName=str(timeName),
avgValue=avgValueStr,
N=N,
fmt='ascii')
if patchType == 'vector':
np.savetxt(fname,
data.T, fmt='(%g %g %g)',
header=headerstr, footer=')',
comments='')
elif patchType == 'scalar':
np.savetxt(fname,
data.reshape((N,1)), fmt='%g',
header=headerstr, footer=')',
comments='')
def get_unique_points_from_list(ylist,zlist,NY=None,NZ=None,order='F'):
"""Detects y and z (1-D arrays) from a list of points on a
structured grid. Makes no assumptions about the point
ordering
"""
ylist = np.array(ylist)
zlist = np.array(zlist)
N = len(zlist)
assert(N == len(ylist))
if (NY is not None) and (NZ is not None):
# use specified plane dimensions
assert(NY*NZ == N)
y = ylist.reshape((NY,NZ))[:,0]
elif zlist[1]==zlist[0]:
# y changes faster, F-ordering
NY = np.nonzero(zlist > zlist[0])[0][0]
NZ = int(N / NY)
assert(NY*NZ == N)
y = ylist[:NY]
z = zlist.reshape((NY,NZ),order='F')[0,:]
elif ylist[1]==ylist[0]:
# z changes faster, C-ordering
NZ = np.nonzero(ylist > ylist[0])[0][0]
NY = int(N / NZ)
assert(NY*NZ == N)
z = zlist[:NZ]
y = ylist.reshape((NY,NZ),order='C')[:,0]
else:
print('Unrecognized point distribution')
print('"y" :',len(ylist),ylist)
print('"z" :',len(zlist),zlist)
return ylist,zlist,False
return y,z,True
def read_points(fname,tol=1e-6,return_const=False,**kwargs):
"""Returns a 2D set of points if one of the coordinates is constant
otherwise returns a 3D set of points. Assumes that the points are on a
structured grid.
"""
N = None
points = None
iread = 0
with open(fname,'r') as f:
while N is None:
try:
N = int(f.readline())
except ValueError: pass
else:
points = np.zeros((N,3))
print('Reading',N,'points from',fname)
for line in f:
line = line[:line.find('\\')].strip()
try:
points[iread,:] = [ float(val) for val in line[1:-1].split() ]
except (ValueError, IndexError): pass
else:
iread += 1
assert(iread == N)
#constX = np.all(points[:,0] == points[0,0])
#constY = np.all(points[:,1] == points[0,1])
#constZ = np.all(points[:,2] == points[0,2])
constX = np.max(points[:,0]) - np.min(points[0,0]) < tol
constY = np.max(points[:,1]) - np.min(points[0,1]) < tol
constZ = np.max(points[:,2]) - np.min(points[0,2]) < tol
print('Constant in x/y/z :',constX,constY,constZ)
if not (constX or constY):
print('Warning: boundary is not constant in X or Y?')
if constX:
x0 = np.mean(points[:,0])
ylist = points[:,1]
zlist = points[:,2]
elif constY:
x0 = np.mean(points[:,1])
ylist = points[:,0]
zlist = points[:,2]
elif constZ:
x0 = np.mean(points[:,2])
ylist = points[:,0]
zlist = points[:,1]
else:
print('Unexpected boundary orientation, returning full list of points')
return points
y,z,is_structured = get_unique_points_from_list(ylist,zlist,**kwargs)
assert(is_structured)
if return_const:
return x0,y,z
else:
return y,z
def read_vector_data(fname,Ny=None,Nz=None,order='C',verbose=False):
"""Read vector field data from a structured boundary data patch"""
N = None
data = None
iread = 0
with open(fname,'r') as f:
for line in f:
if N is None:
try:
N = int(line)
if (Ny is not None) and (Nz is not None):
if not N == Ny*Nz:
Ny = None
Nz = None
data = np.zeros((N,3))
if verbose: print('Reading',N,'vectors from',fname)
except ValueError: pass
elif not line.strip() in ['','(',')',';'] \
and not line.strip().startswith('//'):
data[iread,:] = [ float(val) for val in line.strip().strip('()').split() ]
iread += 1
assert(iread == N)
if (Ny is not None) and (Nz is not None):
vectorField = np.zeros((3,Ny,Nz))
for i in range(3):
vectorField[i,:,:] = data[:,i].reshape((Ny,Nz),order=order)
else:
vectorField = data.T
return vectorField
def read_scalar_data(fname,Ny=None,Nz=None,order='C',verbose=False):
"""Read scalar field data from a structured boundary data patch"""
N = None
data = None
iread = 0
with open(fname,'r') as f:
for line in f:
if (N is None) or N < 0:
try:
if N is None:
avgval = float(line)
N = -1 # skip first scalar, which is the average field value (not used)
else:
assert(N < 0)
N = int(line) # now read the number of points
if (Ny is not None) and (Nz is not None):
if not N == Ny*Nz:
Ny = None
Nz = None
data = np.zeros(N)
if verbose: print('Reading',N,'scalars from',fname)
except ValueError: pass
elif not line.strip() in ['','(',')',';'] \
and not line.strip().startswith('//'):
data[iread] = float(line)
iread += 1
assert(iread == N)
if (Ny is not None) and (Nz is not None):
scalarField = data.reshape((Ny,Nz),order=order)
else:
scalarField = data
return scalarField
class BoundaryData(object):
"""Object to handle boundary data"""
def __init__(self,
bdpath,Ny=None,Nz=None,order='F',
fields=['U','T','k'],
verbose=True):
"""Process timeVaryingMapped* boundary data located in in
constant/boundaryData/<name>
"""
self.dpath = bdpath
assert(os.path.isdir(bdpath))
self.name = os.path.split(bdpath)[-1]
self.ts = TimeSeries(bdpath,dirs=True,verbose=verbose)
self.t = np.array(self.ts.times)
self.Ntimes = self.ts.Ntimes
kwargs = {}
if (Ny is not None) and (Nz is not None):
kwargs = dict(Ny=Ny, Nz=Nz)
self.y, self.z = read_points(os.path.join(bdpath,'points'), **kwargs)
self.Ny = len(self.y)
self.Nz = len(self.z)
#self.yy, self.zz = np.meshgrid(self.y, self.z, indexing='ij')
dy = np.diff(self.y)
dz = np.diff(self.z)
assert(np.all(dy==dy[0])) # uniform spacing assumed
assert(np.all(dz==dz[0]))
dy = dy[0]
dz = dz[0]
self.yy, self.zz = np.meshgrid(np.arange(self.Ny+1)*dy + self.y[0]-dy/2,
|
np.arange(self.Nz+1)
|
numpy.arange
|
import unittest
import numpy.testing as npt
import numpy as np
from gym_anm.simulator import Simulator
from gym_anm.simulator.solve_load_flow import solve_pfe_newton_raphson
class TestSimulatorTransition(unittest.TestCase):
def setUp(self):
self.delta_t = 0.5
self.lamb = 100
self.baseMVA = 1
self.places = 4
self.rtol = 1e-4
def test_2bus_2dev(self):
"""Solve a single-branch 2-bus AC load flow."""
# Network definition.
network = {
'baseMVA': self.baseMVA,
'bus': np.array([[0, 0, 50, 1., 1.],
[1, 1, 50, 1.1, 0.9]]),
'branch': np.array([[0, 1, 0.01, 0.1, 0., 32, 1, 0]]),
'device': np.array([
[0, 0, 0, None, 200, -200, 200, -200, None, None, None, None,
None, None, None], # slack
[1, 1, -1, 0.2, 0, -10, None, None, None, None, None, None, None,
None, None] # load
])
}
simulator = Simulator(network, self.delta_t, self.lamb)
# Set device fixed power injections.
N = 50
for i, pq in enumerate(np.random.uniform(-1, 0, size=(N, 2))):
simulator.devices[1].p = pq[0]
simulator.devices[1].q = pq[1]
# Set bus injections (same as device injections).
simulator.buses[1].p = simulator.devices[1].p
simulator.buses[1].q = simulator.devices[1].q
# My own implementation.
solve_pfe_newton_raphson(simulator)
self._check_pfe_solution(simulator)
def test_3bus_4dev(self):
"""Solve load flow with a loop network (no transformers)."""
# Network definition.
network = {
'baseMVA': self.baseMVA,
'bus': np.array([[0, 0, 50, 1., 1.],
[1, 1, 50, 1.1, 0.9],
[2, 1, 50, 1.1, 0.9]]),
'branch': np.array([[0, 1, 0.01, 0.1, 0., 30, 1, 0],
[1, 2, 0.02, 0.3, 0.2, 30, 1, 0],
[2, 0, 0.05, 0.2, 0.1, 30, 1, 0]]),
'device': np.array([
[0, 0, 0, None, 200, -200, 200, -200, None, None, None, None, None, None, None], # slack
[1, 1, -1, 0.2, 0, -10, None, None, None, None, None, None, None, None, None], # load
[2, 1, 1, None, 200, 0, 200, -200, None, None, None, None, None, None, None], # gen
[3, 2, 2, None, 200, 0, 200, -200, None, None, None, None, None, None, None], # renewable
[4, 2, 3, None, 200, -200, 200, -200, None, None, None, None, 100, 0, 0.9] # storage
])
}
simulator = Simulator(network, self.delta_t, self.lamb)
# Set device fixed power injections.
N = 50
pq_load = np.random.uniform(-1, 0, (N, 2))
pq_gen = np.random.uniform(0, 1, (N, 2))
pq_ren = np.random.uniform(0, 1, (N, 2))
pq_des =
|
np.random.uniform(-1, 1, (N, 2))
|
numpy.random.uniform
|
from __future__ import absolute_import, print_function, division
import logging
import numpy as np
import os
from tornado import gen
from .generic_models import GenericModelHandler, async_crossval, axes_grid
from ..models import REGRESSION_MODELS
class RegressionModelHandler(GenericModelHandler):
def get(self, fignum):
'''Download predictions as CSV.'''
fig_data = self.get_fig_data(int(fignum))
if fig_data is None:
self.write('Oops, something went wrong. Try again?')
return
if fig_data.last_plot != 'regression_preds':
self.write('No plotted data to download.')
return
all_ds = self.request_many_ds()
if not all_ds:
self.write('No datasets selected.')
return
# collect primary keys for row labels
all_pkeys = []
for ds in all_ds:
dv = ds.view(mask=fig_data.filter_mask[ds])
all_pkeys.extend(dv.get_primary_keys())
# get data from the scatterplots
names, actuals, preds = [], [], []
for ax in fig_data.figure.axes:
if not ax.collections:
break
names.append(ax.get_title())
scat = ax.collections[0]
actual, pred = scat.get_offsets().T
preds.append(pred)
# HACK: if there are 6 lines on the plot, it's a boxplot, and thus
# there are no actual values to report. Instead, they're random jitter.
if len(ax.lines) == 6:
actual.fill(np.nan)
actuals.append(actual)
fname = os.path.basename(self.request.path)
self.set_header('Content-Type', 'text/plain')
self.set_header('Content-Disposition',
'attachment; filename='+fname)
# first header line: spectrum,foo,,bar,,baz,
self.write('Spectrum,' + ',,'.join(names) + ',\n')
# secondary header: ,Actual,Pred,Actual,Pred,Actual,Pred
self.write(',' + ','.join(['Actual,Pred']*len(names)) + '\n')
if actuals and preds:
actuals = np.column_stack(actuals)
preds = np.column_stack(preds)
for key, aa, pp in zip(all_pkeys, actuals, preds):
row = ','.join('%g,%g' % t for t in zip(aa, pp))
self.write('%s,%s\n' % (key, row))
self.finish()
@gen.coroutine
def post(self):
res = self.validate_inputs()
if res is None:
return
fig_data, all_ds_views, ds_kind, wave, X = res
variables = self.collect_variables(all_ds_views,
self.get_arguments('pred_meta[]'))
regress_kind = self.get_argument('regress_kind')
variate_kind = self.get_argument('variate_kind')
model_cls = REGRESSION_MODELS[regress_kind][variate_kind]
params = dict(pls=int(self.get_argument('pls_comps')),
lasso=float(self.get_argument('lasso_alpha')),
lars=int(self.get_argument('lars_num_channels')))
do_train = self.get_argument('do_train', None)
if do_train is None:
if len(variables) == 0:
self.visible_error(400, "No variables to predict.")
return
no_crossval = (len(variables) > 1 and variate_kind == 'multi' and
regress_kind == 'lasso')
if no_crossval:
msg = "Cross validation for %svariate %s is not yet supported." % (
variate_kind, regress_kind.title())
self.visible_error(400, msg)
return
# set up cross validation info
folds = int(self.get_argument('cv_folds'))
stratify_meta = self.get_argument('cv_stratify', '')
if stratify_meta:
vals, _ = self.collect_one_variable(all_ds_views, stratify_meta)
_, stratify_labels = np.unique(vals, return_inverse=True)
else:
stratify_labels = None
num_vars = 1 if variate_kind == 'multi' else len(variables)
cv_args = (X, variables)
cv_kwargs = dict(num_folds=folds, labels=stratify_labels)
logging.info('Running %d-fold (%s) cross-val for %s', folds,
stratify_meta, model_cls.__name__)
if regress_kind == 'pls':
comps = np.arange(int(self.get_argument('cv_min_comps')),
int(self.get_argument('cv_max_comps')) + 1)
cv_kwargs['comps'] = comps
plot_kwargs = dict(xlabel='# components')
elif regress_kind == 'lasso':
plot_kwargs = dict(xlabel='alpha', logx=True)
else:
chans = np.arange(int(self.get_argument('cv_min_chans')),
int(self.get_argument('cv_max_chans')) + 1)
cv_kwargs['chans'] = chans
plot_kwargs = dict(xlabel='# channels')
# run the cross validation
yield gen.Task(async_crossval, fig_data, model_cls, num_vars, cv_args,
cv_kwargs, **plot_kwargs)
return
if bool(int(do_train)):
# train on all the data
model = model_cls(params[regress_kind], ds_kind, wave)
logging.info('Training %s on %d inputs, predicting %d vars',
model, X.shape[0], len(variables))
model.train(X, variables)
fig_data.pred_model = model
else:
# use existing model
model = fig_data.pred_model
if model.ds_kind != ds_kind:
logging.warning('Mismatching model kind. Expected %r, got %r', ds_kind,
model.ds_kind)
# use the model's variables, with None instead of actual values
dummy_vars = {key: (None, name) for key, name in
zip(model.var_keys, model.var_names)}
# use the actual variables if we have them
for key in model.var_keys:
if key in variables:
dummy_vars[key] = variables[key]
variables = dummy_vars
# make sure we're using the same wavelengths
if wave.shape != model.wave.shape or not
|
np.allclose(wave, model.wave)
|
numpy.allclose
|
"""Class for ingesting data, processing it, and sending L0 visibilities onwards."""
import time
import math
import logging
import asyncio
import enum
import argparse
import textwrap
import functools
from collections import deque
import gc
from typing import (
Mapping, Dict, List, Tuple, Deque, Set, Iterable, Callable, Awaitable,
Optional, TypeVar, Any
) # noqa: F401
import numpy as np
import numpy.lib.stride_tricks as _stride_tricks # noqa: F401 # Keeps mypy happy
import astropy.units as u
import aiohttp
import spead2
import spead2.send
import spead2.recv
import spead2.send.asyncio
import spead2.recv.asyncio
import katsdpsigproc.accel
from katsdpsigproc import resource
import katsdpsigproc.rfi.device as rfi
import katsdpmodels.fetch.aiohttp
import katsdpmodels.band_mask
import katsdpmodels.rfi_mask
import katsdpservices
from katdal import SpectralWindow
from katdal.flags import CAM, DATA_LOST, INGEST_RFI, STATIC
import katpoint
import katsdptelstate.aio
from katsdptelstate.endpoint import endpoints_to_str, Endpoint
from . import utils, receiver, sender, sigproc
from .utils import Sensor
logger = logging.getLogger(__name__)
# Attributes that are required for data to be correctly ingested
CBF_CRITICAL_ATTRS = frozenset([
'n_chans', 'n_chans_per_substream', 'n_accs', 'bls_ordering',
'bandwidth', 'center_freq', 'input_labels',
'sync_time', 'int_time', 'scale_factor_timestamp', 'ticks_between_spectra'])
_M = TypeVar('_M', bound='katsdpmodels.models.Model')
class Status(enum.Enum):
"""State of the ingest state machine"""
INIT = 0
WAIT_DATA = 1
CAPTURING = 2
COMPLETE = 3
class DeviceStatus(enum.Enum):
"""Standard katcp device status"""
OK = 0
DEGRADED = 1
FAIL = 2
class _TimeAverage:
"""Manages a collection of dumps that are averaged together at a specific
cadence.
This object never sees dump contents directly, only dump indices. When an
index is added that is not part of the current group, `flush` is called.
Parameters
----------
ratio : int
Number of input dumps per output dump
flush
Asynchronous callback which is called once a set of dumps is ready to
be averaged. It is passed the output dump index.
Attributes
----------
ratio : int
number of input dumps per output dump
_start_idx : int
Index of first dump in the current group, or ``None`` if no dumps have been seen.
There is at least one dump in the current group if and only if this is
not ``None``.
"""
def __init__(self, ratio: int, flush: Callable[[int], Awaitable[None]]) -> None:
self.ratio = ratio
self.flush = flush
self._start_idx: Optional[int] = None
def _warp_start(self, idx: int) -> None:
"""Set :attr:`start_idx` to the smallest multiple of ratio that is <= idx."""
self._start_idx = idx // self.ratio * self.ratio
async def add_index(self, idx: int) -> None:
"""Record that a dump with a given index has arrived and is about to be processed.
This may call the `flush` callback given to the constructor.
"""
if self._start_idx is None:
self._warp_start(idx)
elif idx >= self._start_idx + self.ratio:
await self.flush(self._start_idx // self.ratio)
self._warp_start(idx)
async def finish(self, flush: bool = True) -> None:
"""Flush if not empty and `flush` is true, and reset to initial state"""
if self._start_idx is not None and flush:
await self.flush(self._start_idx // self.ratio)
self._start_idx = None
def _mid_timestamp_rel(time_average: _TimeAverage, recv: receiver.Receiver, idx: int) -> float:
"""Convert an output dump index into a timestamp.
Parameters
----------
time_average : :class:`_TimeAverage`
Averager, used to get the ratio of input to output dumps
recv : :class:`.receiver.Receiver`
Receiver, used to get CBF attributes, start timestamp and interval
idx : int
Output dump index
Returns
-------
ts_rel : float
Time in seconds from CBF sync time to the middle of the dump
"""
ts_raw = (idx + 0.5) * time_average.ratio * recv.interval + recv.timestamp_base
return ts_raw / recv.cbf_attr['scale_factor_timestamp']
def _split_array(x: np.ndarray, dtype) -> np.ndarray:
"""Return a view of x which has one extra dimension. Each element is x is
treated as some number of elements of type `dtype`, whose size must divide
into the element size of `x`."""
in_dtype = x.dtype
out_dtype = np.dtype(dtype)
if in_dtype.hasobject or out_dtype.hasobject:
raise ValueError('dtypes containing objects are not supported')
if in_dtype.itemsize % out_dtype.itemsize != 0:
raise ValueError('item size does not evenly divide')
interface = dict(x.__array_interface__)
if interface.get('mask', None) is not None:
raise ValueError('masked arrays are not supported')
interface['shape'] = x.shape + (in_dtype.itemsize // out_dtype.itemsize,)
if interface['strides'] is not None:
interface['strides'] = x.strides + (out_dtype.itemsize,)
interface['typestr'] = out_dtype.str
interface['descr'] = out_dtype.descr
return np.asarray(np.lib.stride_tricks.DummyArray(interface, base=x))
def _fast_unique(x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Faster version of :func:`np.unique` for specific case.
It is equivalent to ``np.unique(x, axis=0, return_inverse=True)``, where
``x`` must be a 2D array.
It treats each row as raw binary data, so the semantics may change if there
are different encodings of the same value (such as -0.0 and +0.0).
"""
# Implementation is based on
# https://github.com/numpy/numpy/issues/11136#issue-325345618. In short, each
# row is viewed as a binary blob, which for some reason makes it hundreds of
# times faster.
x =
|
np.ascontiguousarray(x)
|
numpy.ascontiguousarray
|
import numpy
import h5py
from copy import deepcopy
TRIAL_DURATION = 10
PRE_TRIAL_DURATION = 2.5
############
# File I/O #
############
# The Plexon .nex file was imported into Octave using the readexFile.m
# script and then saved as a HD5 file using Ocatve's
# "save -hdf5 file_name.hd5 variable". This was then imported into
# python using h5py.
#
# In that format, the commands below should work. This is obviously
# ideosyncratic to our system.
def extract_spikes(hd5_file, neuron_num=0):
"""Extracts the spiking data from the hdf5 file. Returns an array of
spike times.
Keyword arguments:
neuron_num -- the index of the neuron you would like to access.
"""
with h5py.File(hd5_file, "r+") as f:
neuron_list = f['NF']['value']['neurons']['value']
if len(neuron_list) <= 10:
neuron_str = "_" + str(neuron_num)
else:
neuron_str = "_" + "0" * (2 - len(str(neuron_num))) + str(neuron_num)
timestamps = numpy.array(neuron_list[neuron_str]['value']['timestamps']['value'][0])
return(timestamps)
def extract_events(hd5_file):
"""Extracts the timestamps of the events stored in the hdf5 file."""
events = {}
with h5py.File(hd5_file, "r+") as f:
event_list = f['NF']['value']['events']['value']
for key in event_list.iterkeys():
if key == 'dims':
continue
name = event_list[key]['value']['name']['value']
# The hdf5 that results file contains the names not as strings
# but as an array of integers which code for the string in
# ASCII format.
name_str = ''.join(map(chr, name))
try:
timestamps = numpy.array(event_list[key]['value']['timestamps']['value'][0])
except:
timestamps = numpy.array([], dtype='float64')
events[name_str] = timestamps
return(events)
def load_events_spikes_script(neuron_num=0, spike_files=None, event_files=None, exception=None, variables=None, **kwargs):
"""Extracts spikes and events
"""
event_set = [extract_events(f) for f in event_files]
if type(neuron_num) is int:
if exception is not None:
spike_set = [event_set[i][exception[neuron_num]] for i in range(len(event_set))]
else:
spike_set = [extract_spikes(f, neuron_num) for f in spike_files]
elif (type(neuron_num) is list) or (type(neuron_num) is tuple):
spike_set = []
for num in neuron_num:
if exception is not None:
spike_set_temp = [event_set[i][exception[num]] for i in range(len(event_set))]
else:
spike_set_temp = [extract_spikes(f, num) for f in spike_files]
spike_set.append(spike_set_temp)
return(event_set, spike_set)
######################
# Basic Calculations #
######################
def create_complete_table(event_set, spike_set, variable_maps, pre_trial_duration=PRE_TRIAL_DURATION, trial_duration=TRIAL_DURATION, stim_variables=['T', 'F'], action_variables=['NPT', 'NPF']):
assert len(action_variables) == len(stim_variables)
stimuli = []
stimuli_time = []
actions = []
correctness = []
nose_pokes = []
num_neurons = len(spike_set)
responses = [[] for i in xrange(num_neurons)]
all_trial_times = []
for events, spikes_list, variable_map in zip(event_set, zip(*spike_set), variable_maps):
for stim_variable, action_variable in zip(stim_variables, action_variables):
trial_times = events[variable_map[stim_variable]]
try:
nose_poke_times = numpy.array(events[variable_map[action_variable]])
except:
nose_poke_times = None
try:
correct_times = events[variable_map[stim_variable + '+']]
except:
correct_times = None
all_trial_times.extend(trial_times)
for i, trial_time in enumerate(trial_times):
stimuli.append(stim_variable)
stimuli_time.append(trial_time)
# finding correctness of trial
if correct_times is None:
correctness.append('U')
elif trial_time in correct_times:
correctness.append('+')
else:
correctness.append('-')
# finding nosepoke time
if nose_poke_times is None:
nose_pokes.append(numpy.nan) # This conflates no response with unkown response. There is probably a better systemc
actions.append('U')
else:
if i == len(trial_times) - 1:
index = (nose_poke_times > trial_time)
else:
index = (nose_poke_times > trial_time)*(nose_poke_times < trial_times[i+1])
if sum(index) == 0:
nose_poke_time = numpy.nan
actions.append('W')
else:
nose_poke_time = nose_poke_times[index][0] - trial_time
actions.append('NP')
nose_pokes.append(nose_poke_time)
for i, spikes in enumerate(spikes_list):
current_response = spikes[(spikes >= trial_time - pre_trial_duration)*(spikes < (trial_time + trial_duration))] - trial_time
responses[i].append(deepcopy(current_response))
sort_index =
|
numpy.argsort(all_trial_times)
|
numpy.argsort
|
"""
@package ion_functions.test.adcp_functions
@file ion_functions/test/test_adcp_functions.py
@author <NAME>, <NAME>, <NAME>
@brief Unit tests for adcp_functions module
"""
from nose.plugins.attrib import attr
from ion_functions.test.base_test import BaseUnitTestCase
import numpy as np
from ion_functions.data import adcp_functions as af
from ion_functions.data.adcp_functions import ADCP_FILLVALUE
from ion_functions.data.generic_functions import SYSTEM_FILLVALUE
@attr('UNIT', group='func')
class TestADCPFunctionsUnit(BaseUnitTestCase):
def setUp(self):
"""
Implemented by:
2014-02-06: <NAME>. Initial Code.
2015-06-12: <NAME>. Changed raw beam data to type int. This
change did not affect any previously written unit tests.
"""
# set test inputs -- values from DPS
self.b1 = np.array([[-0.0300, -0.2950, -0.5140, -0.2340, -0.1880,
0.2030, -0.3250, 0.3050, -0.2040, -0.2940]]) * 1000
self.b2 = np.array([[0.1800, -0.1320, 0.2130, 0.3090, 0.2910,
0.0490, 0.1880, 0.3730, -0.0020, 0.1720]]) * 1000
self.b3 = np.array([[-0.3980, -0.4360, -0.1310, -0.4730, -0.4430,
0.1880, -0.1680, 0.2910, -0.1790, 0.0080]]) * 1000
self.b4 = np.array([[-0.2160, -0.6050, -0.0920, -0.0580, 0.4840,
-0.0050, 0.3380, 0.1750, -0.0800, -0.5490]]) * 1000
# the data type of the raw beam velocities is int;
# set b1-b4 to int so that fill replacement can be tested.
self.b1 = self.b1.astype(int)
self.b2 = self.b2.astype(int)
self.b3 = self.b3.astype(int)
self.b4 = self.b4.astype(int)
#
self.echo = np.array([[0, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250]])
self.sfactor = 0.45
# units of compass data are in centidegrees.
self.heading = 9841
self.pitch = 69
self.roll = -254
self.orient = 1
self.lat = 50.0000
self.lon = -145.0000
self.depth = 0.0
self.ntp = 3545769600.0 # May 12, 2012
# set expected results -- velocity profiles in earth coordinates
# (values in DPS)
self.uu = np.array([[0.2175, -0.2814, -0.1002, 0.4831, 1.2380,
-0.2455, 0.6218, -0.1807, 0.0992, -0.9063]])
self.vv = np.array([[-0.3367, -0.1815, -1.0522, -0.8676, -0.8919,
0.2585, -0.8497, -0.0873, -0.3073, -0.5461]])
self.ww = np.array([[0.1401, 0.3977, 0.1870, 0.1637, 0.0091,
-0.1290, 0.0334, -0.3017, 0.1384, 0.1966]])
# set expected results -- magnetic variation correction applied
# (computed in Matlab using above values and mag_var.m)
self.uu_cor = np.array([[0.1099, -0.3221, -0.4025, 0.2092, 0.9243,
-0.1595, 0.3471, -0.1983, 0.0053, -1.0261]])
self.vv_cor = np.array([[-0.3855, -0.0916, -0.9773, -0.9707, -1.2140,
0.3188, -0.9940, -0.0308, -0.3229, -0.2582]])
# set the expected results -- error velocity
self.ee = np.array([[0.789762, 0.634704, -0.080630, 0.626434, 0.064090,
0.071326, -0.317352, 0.219148, 0.054787, 0.433129]])
# set the expected results -- echo intensity conversion from counts to dB
self.dB = np.array([[0.00, 11.25, 22.50, 33.75, 45.00, 56.25, 67.50,
78.75, 90.00, 101.25, 112.50]])
def test_adcp_beam(self):
"""
Directly tests DPA functions adcp_beam_eastward, adcp_beam_northward,
adcp_beam_vertical, and adcp_beam_error.
Tests adcp_beam2ins, adcp_ins2earth and magnetic_correction functions
for ADCPs that output data in beam coordinates. All three functions
must return the correct output for final tests cases to work.
Values based on those defined in DPS:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-06: <NAME>. Added tests to confirm arrays of
arrays can be processed (in other words, vectorized the
code).
2015-06-23: <NAME>. Revised documentation. Added unit test
for the function adcp_beam_error.
Notes:
The original suite of tests within this function did not provide a
test for adcp_beam_error. However, adcp_beam_error and vadcp_beam_error
are identical functions, and vadcp_beam_error is implicitly tested in the
test_vadcp_beam function when the 4th output argument of adcp_beam2inst
is tested. Therefore values to directly test adcp_beam_error were
then derived from the function itself and included as part of the unit
test within this code (test_adcp_beam).
"""
# single record case
got_uu_cor = af.adcp_beam_eastward(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient,
self.lat, self.lon, self.depth, self.ntp)
got_vv_cor = af.adcp_beam_northward(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient,
self.lat, self.lon, self.depth, self.ntp)
got_ww = af.adcp_beam_vertical(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient)
got_ee = af.adcp_beam_error(self.b1, self.b2, self.b3, self.b4)
# test results
np.testing.assert_array_almost_equal(got_uu_cor, self.uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, self.vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww, self.ww, 4)
np.testing.assert_array_almost_equal(got_ee, self.ee, 4)
# reset the test inputs for multiple records
b1 = np.tile(self.b1, (24, 1))
b2 = np.tile(self.b2, (24, 1))
b3 = np.tile(self.b3, (24, 1))
b4 = np.tile(self.b4, (24, 1))
heading = np.ones(24, dtype=np.int) * self.heading
pitch = np.ones(24, dtype=np.int) * self.pitch
roll = np.ones(24, dtype=np.int) * self.roll
orient = np.ones(24, dtype=np.int) * self.orient
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
depth = np.ones(24) * self.depth
ntp = np.ones(24) * self.ntp
# reset outputs for multiple records
uu_cor = np.tile(self.uu_cor, (24, 1))
vv_cor = np.tile(self.vv_cor, (24, 1))
ww = np.tile(self.ww, (24, 1))
ee = np.tile(self.ee, (24, 1))
# multiple record case
got_uu_cor = af.adcp_beam_eastward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
got_vv_cor = af.adcp_beam_northward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
got_ww = af.adcp_beam_vertical(b1, b2, b3, b4,
heading, pitch, roll, orient)
got_ee = af.adcp_beam_error(b1, b2, b3, b4)
# test results
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww, ww, 4)
np.testing.assert_array_almost_equal(got_ee, ee, 4)
def test_adcp_beam_with_fill(self):
"""
Directly tests DPA functions adcp_beam_eastward, adcp_beam_northward,
adcp_beam_vertical, and adcp_beam_error when system fill values and
ADCP fill values (bad value sentinels) are present in the data stream.
Non-fill values are based on those used in test_adcp_beam in this module.
Implemented by:
2013-06-24: <NAME>. Initial code.
Notes:
"""
# for convenience
sfill = SYSTEM_FILLVALUE
afill = ADCP_FILLVALUE
### set input data
# units of compass data are in centidegrees.
heading = np.array([9841])
pitch = np.array([69])
roll = np.array([-254])
missingroll = np.array([sfill])
orient = np.array([1])
lat = np.array([50.0000])
lon = np.array([-145.0000])
depth = np.array([0.0])
ntp = np.array([3545769600.0]) # May 12, 2012
###
# for positional clarity, input beam and expected velocities will be explicitly
# enumerated for each single time record test case.
###
### single time record case; missing roll data
## the ADCP does not use its bad flag sentinel for compass data, only beam data.
## however, it is possible that CI could supply the system fillvalue for missing compass data.
# input data
# beam velocity units are mm/s
b1_x1 = np.array([[-30, -295, -514, -234, -188, 203, -325, 305, -204, -294]])
b2_x1 = np.array([[180, -132, 213, 309, 291, 49, 188, 373, -2, 172]])
b3_x1 = np.array([[-398, -436, -131, -473, -443, 188, -168, 291, -179, 8]])
b4_x1 = np.array([[-216, -605, -92, -58, 484, -5, 338, 175, -80, -549]])
# expected results if all good beam and compass data
# these will be used later in the multiple time record test
uu_x0 = np.array([[0.1099, -0.3221, -0.4025, 0.2092, 0.9243,
-0.1595, 0.3471, -0.1983, 0.0053, -1.0261]])
vv_x0 = np.array([[-0.3855, -0.0916, -0.9773, -0.9707, -1.2140,
0.3188, -0.9940, -0.0308, -0.3229, -0.2582]])
ww_x0 = np.array([[0.1401, 0.3977, 0.1870, 0.1637, 0.0091,
-0.1290, 0.0334, -0.3017, 0.1384, 0.1966]])
ee_x0 = np.array([[0.789762, 0.634704, -0.080630, 0.626434, 0.064090,
0.071326, -0.317352, 0.219148, 0.054787, 0.433129]])
# expected results for all good beam data, missing roll data;
# nans for all results except for the error velocity, which does not depend on the compass
uu_x1 = uu_x0 * np.nan
vv_x1 = vv_x0 * np.nan
ww_x1 = ww_x0 * np.nan
ee_x1 = np.copy(ee_x0)
uu_calc = af.adcp_beam_eastward(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient, lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient, lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient)
ee_calc = af.adcp_beam_error(b1_x1, b2_x1, b3_x1, b4_x1)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_x1, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x1, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x1, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x1, 4)
### single time record case; missing and bad-flagged beam data, good compass data
# input data
b1_x2 = np.array([[sfill, -295, -514, -234, -188, 203, -325, afill, -204, -294]])
b2_x2 = np.array([[sfill, -132, 213, 309, 291, 49, 188, afill, -2, sfill]])
b3_x2 = np.array([[sfill, -436, -131, -473, -443, 188, -168, afill, -179, 8]])
b4_x2 = np.array([[sfill, -605, -92, -58, afill, -5, 338, afill, -80, -549]])
# expected
uu_x2 = np.array([[np.nan, -0.3221, -0.4025, 0.2092, np.nan,
-0.1595, 0.3471, np.nan, 0.0053, np.nan]])
vv_x2 = np.array([[np.nan, -0.0916, -0.9773, -0.9707, np.nan,
0.3188, -0.9940, np.nan, -0.3229, np.nan]])
ww_x2 = np.array([[np.nan, 0.3977, 0.1870, 0.1637, np.nan,
-0.1290, 0.0334, np.nan, 0.1384, np.nan]])
ee_x2 = np.array([[np.nan, 0.634704, -0.080630, 0.626434, np.nan,
0.071326, -0.317352, np.nan, 0.054787, np.nan]])
# calculated
uu_calc = af.adcp_beam_eastward(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient)
ee_calc = af.adcp_beam_error(b1_x2, b2_x2, b3_x2, b4_x2)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_x2, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x2, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x2, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x2, 4)
### multiple (5) record case
## reset the test inputs for 5 time records
# 1st time record is the bad/missing beam data case above
# 2nd time record is a missing heading data case
# 3rd time record is all good data
# 4th time record is bad/missing beam and missing pitch data.
# 5th time record is missing orientation data
b1 = np.vstack((b1_x2, b1_x1, b1_x1, b1_x2, b1_x1))
b2 = np.vstack((b2_x2, b2_x1, b2_x1, b2_x2, b2_x1))
b3 = np.vstack((b3_x2, b3_x1, b3_x1, b3_x2, b3_x1))
b4 = np.vstack((b4_x2, b4_x1, b4_x1, b4_x2, b4_x1))
heading = np.hstack((heading, sfill, heading, heading, heading))
pitch = np.hstack((pitch, pitch, pitch, sfill, pitch))
roll = np.tile(roll, 5)
orient = np.hstack((orient, orient, orient, orient, sfill))
lat = np.tile(lat, 5)
lon = np.tile(lon, 5)
depth = np.tile(depth, 5)
ntp = np.tile(ntp, 5)
# set expected outputs for these 5 records
# notes:
# (1) heading is not used in the calculation of vertical velocity,
# therefore the second entry to ww_xpctd is good data out (ww_x0),
# not nans as resulted from the missingroll test.
# (2) pitch is not used in the calculation of error velocity, so that
# in the mixed case (time record 4) the error velocity should be
# the same as that for the pure bad/missing beam case (ee_x2, 1st
# and 4th entries in ee_xpctd).
# (3) the orientation argument affects the roll calculation, so that
# when its value is missing (5th time record) the expected result
# would be the same as if the roll value were missing. therefore
# the 5th column entries are all x1 results.
uu_xpctd = np.vstack((uu_x2, uu_x1, uu_x0, uu_x1, uu_x1))
vv_xpctd = np.vstack((vv_x2, vv_x1, vv_x0, vv_x1, vv_x1))
ww_xpctd = np.vstack((ww_x2, ww_x0, ww_x0, ww_x1, ww_x1))
ee_xpctd = np.vstack((ee_x2, ee_x1, ee_x0, ee_x2, ee_x1))
# calculated
uu_calc = af.adcp_beam_eastward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1, b2, b3, b4,
heading, pitch, roll, orient)
ee_calc = af.adcp_beam_error(b1, b2, b3, b4)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_xpctd, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_xpctd, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_xpctd, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_xpctd, 4)
def test_adcp_earth(self):
"""
Tests magnetic_correction function for ADCPs set to output data in the
Earth Coordinate system.
Values were not defined in DPS, were recreated using test values above:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by:
2014-02-06: <NAME>. Initial code.
2015-06-10: <NAME>.
Changed adcp_ins2earth to require the units of the compass
data to be in centidegrees.
"""
# set the test data
u, v, w, e = af.adcp_beam2ins(self.b1, self.b2, self.b3, self.b4)
### old adcp_ins2earth returned 3 variables (CEW)
# adcp_ins2earth now requires compass data in units of centidegrees (RAD)
uu, vv, ww = af.adcp_ins2earth(u, v, w, self.heading, self.pitch,
self.roll, self.orient)
# test the magnetic variation correction
got_uu_cor = af.adcp_earth_eastward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
np.testing.assert_array_almost_equal(got_uu_cor, self.uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, self.vv_cor, 4)
# reset the test inputs for multiple records using the integer inputs.
uu = np.tile(uu, (24, 1))
vv = np.tile(vv, (24, 1))
depth = np.ones(24) * self.depth
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
ntp = np.ones(24) * self.ntp
# reset expected results for multiple records
uu_cor = np.tile(self.uu_cor, (24, 1))
vv_cor = np.tile(self.vv_cor, (24, 1))
# compute the results for multiple records
got_uu_cor = af.adcp_earth_eastward(uu, vv, depth, lat, lon, ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, depth, lat, lon, ntp)
# test the magnetic variation correction
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
def test_adcp_earth_int_input_velocity_data(self):
"""
Tests adcp_earth_eastward and adcp_earth_northward using int type raw velocity data,
as will be supplied by CI. Also tests the almost trivial functions adcp_earth_vertical
and adcp_earth_error (unit change).
Input raw velocity values were derived from the float unit test in test_adcp_earth
by rounding the uu and vv float output from adcp_ins2earth. These int inputs failed
the assert_array_almost_equal unit tests (decimals=4) in test_adcp_earth because of
round-off error but passed when the agreement precision was relaxed to decimals=3.
This is taken as justification to more precisely calculate the expected values for
unit tests in the current module from adcp_earth_eastward and adcp_earth_northward
themselves (the very modules being tested), using as input the type int raw velocity
data. Because these DPA functions were used to derive their own check data, the
original (float type input velocity data) unit tests are retained in the
test_adcp_earth function.
The tests in this module will be used to derive unit tests checking the replacement
of ADCP int bad value sentinels (-32768) with Nans; these tests require that the
raw velocity data be of type int.
Implemented by:
2014-06-16: <NAME>. Initial code.
"""
# set the input test data [mm/sec]
uu = np.array([[218, -281, -100, 483, 1238, -245, 622, -181, 99, -906]])
vv = np.array([[-337, -182, -1052, -868, -892, 258, -850, -87, -307, -546]])
ww = np.array([[140, 398, 187, 164, 9, -129, 33, -302, 138, 197]])
ee = np.array([[790, 635, 81, 626, 64, 71, -317, 219, 55, 433]])
# expected values, calculated using adcp_earth_eastward and adcp_earth_northward
uu_cor = np.array([[0.11031103, -0.32184604, -0.40227939, 0.20903718, 0.92426103,
-0.15916447, 0.34724837, -0.19849871, 0.00522179, -1.02580274]])
vv_cor = np.array([[-0.38590734, -0.09219615, -0.97717720, -0.97109035, -1.21410442,
0.31820696, -0.99438552, -0.03046741, -0.32252555, -0.25822614]])
# expected values, calculated by changing units from mm/s to m/s
ww_vel = ww / 1000.0
ee_vel = ee / 1000.0
# test the magnetic variation correction using type integer inputs for the velocities.
got_uu_cor = af.adcp_earth_eastward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
# and the unit change functions
got_ww_vel = af.adcp_earth_vertical(ww)
got_ee_vel = af.adcp_earth_error(ee)
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww_vel, ww_vel, 4)
np.testing.assert_array_almost_equal(got_ee_vel, ee_vel, 4)
# reset the test inputs for multiple records using the integer inputs.
uu = np.tile(uu, (24, 1))
vv = np.tile(vv, (24, 1))
ww = np.tile(ww, (24, 1))
ee = np.tile(ee, (24, 1))
depth = np.ones(24) * self.depth
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
ntp = np.ones(24) * self.ntp
# reset expected results for multiple records
uu_cor = np.tile(uu_cor, (24, 1))
vv_cor = np.tile(vv_cor, (24, 1))
ww_vel = np.tile(ww_vel, (24, 1))
ee_vel = np.tile(ee_vel, (24, 1))
# compute the results for multiple records
got_uu_cor = af.adcp_earth_eastward(uu, vv, depth, lat, lon, ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, depth, lat, lon, ntp)
got_ww_vel = af.adcp_earth_vertical(ww)
got_ee_vel = af.adcp_earth_error(ee)
# test the magnetic variation correction
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
# and the unit change functions
np.testing.assert_array_almost_equal(got_ww_vel, ww_vel, 4)
np.testing.assert_array_almost_equal(got_ee_vel, ee_vel, 4)
def test_adcp_earth_with_fill(self):
"""
Tests adcp_earth_eastward, adcp_earth_northward, adcp_earth_vertical and
adcp_earth_error when system fill values and ADCP fill values (bad value
sentinels) are present in the data stream.
Non-fill test values come from the function test_adcp_earth_int_input_velocity_data
in this module.
Implemented by:
2014-06-25: <NAME>. Initial code.
"""
# for convenience
sfill = SYSTEM_FILLVALUE
afill = ADCP_FILLVALUE
### scalar time case
# set the input test data
lat = np.array([50.0000])
lon = np.array([-145.0000])
depth = np.array([0.0])
ntp = np.array([3545769600.0]) # May 12, 2012
# input velocities [mm/sec]
uu_in0 = np.array([[218, sfill, -100, 483, afill, -245]])
vv_in0 = np.array([[sfill, -182, -1052, -868, -892, afill]])
ww_in0 = np.array([[sfill, 398, afill, 164, 9, -129]])
ee_in0 = np.array([[afill, 635, 81, 626, sfill, 71]])
# expected values [m/sec]
uu_x0 = np.array([[np.nan, np.nan, -0.40227, 0.20903, np.nan, np.nan]])
vv_x0 = np.array([[np.nan, np.nan, -0.97717, -0.97109, np.nan, np.nan]])
ww_x0 = np.array([[np.nan, 0.398, np.nan, 0.164, 0.009, -0.129]])
ee_x0 = np.array([[np.nan, 0.635, 0.081, 0.626, np.nan, 0.071]])
# calculated
uu_calc = af.adcp_earth_eastward(uu_in0, vv_in0, depth, lat, lon, ntp)
vv_calc = af.adcp_earth_northward(uu_in0, vv_in0, depth, lat, lon, ntp)
ww_calc = af.adcp_earth_vertical(ww_in0)
ee_calc = af.adcp_earth_error(ee_in0)
# test
np.testing.assert_array_almost_equal(uu_calc, uu_x0, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x0, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x0, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x0, 4)
### multiple time record case
# set the input test data
lat = np.tile(lat, 5)
lon = np.tile(lon, 5)
depth = np.tile(depth, 5)
ntp = np.tile(ntp, 5)
uu_in0 = np.tile(uu_in0, (5, 1))
vv_in0 = np.tile(vv_in0, (5, 1))
ww_in0 = np.tile(ww_in0, (5, 1))
ee_in0 = np.tile(ee_in0, (5, 1))
# expected
uu_x0 = np.tile(uu_x0, (5, 1))
vv_x0 = np.tile(vv_x0, (5, 1))
ww_x0 = np.tile(ww_x0, (5, 1))
ee_x0 = np.tile(ee_x0, (5, 1))
# calculated
uu_calc = af.adcp_earth_eastward(uu_in0, vv_in0, depth, lat, lon, ntp)
vv_calc = af.adcp_earth_northward(uu_in0, vv_in0, depth, lat, lon, ntp)
ww_calc = af.adcp_earth_vertical(ww_in0)
ee_calc = af.adcp_earth_error(ee_in0)
# test
np.testing.assert_array_almost_equal(uu_calc, uu_x0, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x0, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x0, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x0, 4)
def test_adcp_backscatter(self):
"""
Tests echo intensity scaling function (adcp_backscatter) for ADCPs
in order to convert from echo intensity in counts to dB.
Values were not defined in DPS, were created using test values above:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by <NAME>, 2014-02-06
<NAME>, 2015-06-25. Added tests for fill values.
"""
# the single record case
got = af.adcp_backscatter(self.echo, self.sfactor)
np.testing.assert_array_almost_equal(got, self.dB, 4)
# the multi-record case -- inputs
raw = np.tile(self.echo, (24, 1))
sf = np.ones(24) * self.sfactor
# the multi-record case -- outputs
dB = np.tile(self.dB, (24, 1))
got = af.adcp_backscatter(raw, sf)
np.testing.assert_array_almost_equal(got, dB, 4)
### test fill value replacement with nan
# for convenience
sfill = SYSTEM_FILLVALUE
# the adcp bad sentinel fillvalue (requires 2 bytes) is not used for echo
# intensity, which is stored in 1 byte.
# the single time record case
echo_with_fill, xpctd = np.copy(self.echo), np.copy(self.dB)
echo_with_fill[0, 3], xpctd[0, 3] = sfill, np.nan
echo_with_fill[0, 6], xpctd[0, 6] = sfill, np.nan
echo_with_fill[0, 7], xpctd[0, 7] = sfill, np.nan
got = af.adcp_backscatter(echo_with_fill, self.sfactor)
np.testing.assert_array_almost_equal(got, xpctd, 4)
# the multiple time record case
echo_with_fill = np.vstack((echo_with_fill, self.echo, echo_with_fill))
xpctd = np.vstack((xpctd, self.dB, xpctd))
sfactor = np.tile(self.sfactor, (3, 1))
got = af.adcp_backscatter(echo_with_fill, sfactor)
np.testing.assert_array_almost_equal(got, xpctd, 4)
def test_vadcp_beam(self):
"""
Indirectly tests vadcp_beam_eastward, vadcp_beam_northward,
vadcp_beam_vertical_est, and vadcp_beam_vertical_true functions (which
call adcp_beam2ins and adcp_ins2earth) and vadcp_beam_error (which only
calls adcp_beam2ins) for the specialized 5-beam ADCP. Application of
the magnetic correction and conversion from mm/s to m/s is not applied.
Values based on those defined in DPS:
OOI (2012). Data Product Specification for Turbulent Velocity Profile
and Echo Intensity. Document Control Number 1341-00760.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00760_Data_Product_SPEC_VELTURB_OOI.pdf)
Implemented by:
2014-07-24: <NAME>. Initial code.
2015-06-10: <NAME>.
adcp_ins2earth now requires the units of the compass
data to be in centidegrees.
"""
# test inputs
b1 = np.ones((10, 10)).astype(int) * -325
b2 = np.ones((10, 10)).astype(int) * 188
b3 = np.ones((10, 10)).astype(int) * 168
b4 = np.ones((10, 10)).astype(int) * -338
b5 = np.ones((10, 10)).astype(int) * -70
# units of centidegrees
heading = np.array([30, 30, 30, 30, 30,
32, 32, 32, 32, 32]) * 100
pitch = np.array([0, 2, 3, 3, 1, 2, 2, 3, 3, 1]) * 100
roll = np.array([0, 4, 3, 4, 3, 3, 4, 3, 4, 3]) * 100
orient = np.ones(10, dtype=np.int)
# expected outputs
vle = np.array([279.6195, 282.6881, 281.8311, 282.7147,
282.1188, 246.2155, 246.9874, 246.1226,
247.0156, 246.4276]).reshape(-1, 1)
vle = np.reshape(np.tile(vle, 10), (10, 10))
vln = np.array([-1015.5964, -1018.0226, -1018.2595, -1017.9765,
-1017.7612, -1027.3264, -1027.2681, -1027.4749,
-1027.2230, -1026.9870]).reshape(-1, 1)
vln = np.reshape(np.tile(vln, 10), (10, 10))
vlu = np.array([81.6756, 3.3916, 3.5950, -9.4974,
29.4154, 16.5077, 3.3916, 3.5950,
-9.4974, 29.4154]).reshape(-1, 1)
vlu = np.reshape(np.tile(vlu, 10), (10, 10))
evl = np.array([34.1128, 34.1128, 34.1128, 34.1128,
34.1128, 34.1128, 34.1128, 34.1128,
34.1128, 34.1128]).reshape(-1, 1)
evl = np.reshape(np.tile(evl, 10), (10, 10))
w5 = np.array([70.0000, -8.2485, -8.0487, -21.1287,
17.7575, 4.8552, -8.2485, -8.0487,
-21.1287, 17.7575]).reshape(-1, 1)
w5 = np.reshape(np.tile(w5, 10), (10, 10))
# test the transformations
u, v, w_est, e = af.adcp_beam2ins(b1, b2, b3, b4)
uu, vv, ww_est = af.adcp_ins2earth(u, v, w_est, heading, pitch, roll, orient)
_, _, ww_true = af.adcp_ins2earth(u, v, b5, heading, pitch, roll, orient)
# compare the results
np.testing.assert_array_almost_equal(uu, vle, 4)
np.testing.assert_array_almost_equal(vv, vln, 4)
np.testing.assert_array_almost_equal(ww_est, vlu, 4)
np.testing.assert_array_almost_equal(e, evl, 4)
np.testing.assert_array_almost_equal(ww_true, w5, 4)
#### KEEP: RAD 2015-06-22:
"""
## Given that these unit tests have validated the VADCP DPA functions, use these
## vadcp functions to generate values for unit tests with (a) type integer inputs
## (b) that use the vadcp functions themselves, instead of their constituent sub-
## routines, so that unit tests checking the trapping of CI fill values (-999999999)
## and ADCP instrument bad value sentinels (-32768) can be constructed.
#lat = np.ones(10) * self.lat
#lon = np.ones(10) * self.lon
#z = np.ones(10) * self.depth
#dt = np.ones(10) * self.ntp
#
#vle = af.vadcp_beam_eastward(b1, b2, b3, b4, heading, pitch, roll, orient, lat, lon, z, dt)
#vln = af.vadcp_beam_northward(b1, b2, b3, b4, heading, pitch, roll, orient, lat, lon, z, dt)
#vlu_4bm = af.vadcp_beam_vertical_est(b1, b2, b3, b4, heading, pitch, roll, orient)
#vlu_5bm = af.vadcp_beam_vertical_true(b1, b2, b3, b4, b5, heading, pitch, roll, orient)
#err = af.vadcp_beam_error(b1, b2, b3, b4)
#
#print vle.T
#print vln.T
#print vlu_4bm.T
#print vlu_5bm.T
#print err.T
"""
#### RAD 2015-06-22
def test_vadcp_beam_int_input_velocity_data(self):
"""
Tests vadcp_beam_eastward, vadcp_beam_northward, vadcp_beam_vertical_est,
vadcp_beam_vertical_true and vadcp_beam_error functions for the specialized 5-beam ADCP
using int type raw velocity data, as will be supplied by CI.
Test values come from the function test_vadcp_beam, in this module.
The tests in this module will be used to derive unit tests checking the replacement
of ADCP int bad value sentinels (-32768) with Nans; these tests require that the
raw velocity data be of type int.
Implemented by:
2014-06-22: <NAME>. Initial code.
"""
# inputs
b1 = np.ones((10, 10), dtype=np.int) * -325
b2 = np.ones((10, 10), dtype=np.int) * 188
b3 = np.ones((10, 10), dtype=np.int) * 168
b4 = np.ones((10, 10), dtype=np.int) * -338
b5 = np.ones((10, 10), dtype=np.int) * -70
# units of centidegrees
heading = np.array([30, 30, 30, 30, 30,
32, 32, 32, 32, 32]) * 100
pitch = np.array([0, 2, 3, 3, 1, 2, 2, 3, 3, 1]) * 100
roll = np.array([0, 4, 3, 4, 3, 3, 4, 3, 4, 3]) * 100
orient = np.ones(10, dtype=np.int)
lat = np.ones(10) * self.lat
lon = np.ones(10) * self.lon
z = np.ones(10) * self.depth
dt = np.ones(10) * self.ntp
# expected outputs from test_vadcp_beam
vle_xpctd = np.array([[-0.02853200, -0.02630381, -0.02719268, -0.02626496, -0.02677222,
-0.06390457, -0.06314916, -0.06403668, -0.06310908, -0.06360277]])
vle_xpctd = np.tile(vle_xpctd.T, (1, 10))
vln_xpctd = np.array([[-1.05300003, -1.05621525, -1.05619207, -1.05617896, -1.05579924,
-1.05448459, -1.05465384, -1.05459965, -1.05461893, -1.05422174]])
vln_xpctd = np.tile(vln_xpctd.T, (1, 10))
vlu_4bm_xpctd = np.array([[0.08167564, 0.0033916, 0.00359505, -0.0094974, 0.02941538,
0.01650774, 0.0033916, 0.00359505, -0.0094974, 0.02941538]])
vlu_4bm_xpctd = np.tile(vlu_4bm_xpctd.T, (1, 10))
vlu_5bm_xpctd = np.array([[0.07000000, -0.00824854, -0.00804866, -0.02112871, 0.01775751,
0.00485518, -0.00824854, -0.00804866, -0.02112871, 0.01775751]])
vlu_5bm_xpctd = np.tile(vlu_5bm_xpctd.T, (1, 10))
err_vel_xpctd = np.tile(0.03411279, (10, 10))
vle_calc = af.vadcp_beam_eastward(
b1, b2, b3, b4, heading, pitch, roll, orient, lat, lon, z, dt)
vln_calc = af.vadcp_beam_northward(
b1, b2, b3, b4, heading, pitch, roll, orient, lat, lon, z, dt)
vlu_4bm_calc = af.vadcp_beam_vertical_est(b1, b2, b3, b4, heading, pitch, roll, orient)
vlu_5bm_calc = af.vadcp_beam_vertical_true(b1, b2, b3, b4, b5, heading, pitch, roll, orient)
err_vel_calc = af.vadcp_beam_error(b1, b2, b3, b4)
np.testing.assert_array_almost_equal(vle_calc, vle_xpctd, 6)
np.testing.assert_array_almost_equal(vln_calc, vln_xpctd, 6)
np.testing.assert_array_almost_equal(vlu_4bm_calc, vlu_4bm_xpctd, 6)
np.testing.assert_array_almost_equal(vlu_5bm_calc, vlu_5bm_xpctd, 6)
np.testing.assert_array_almost_equal(err_vel_calc, err_vel_xpctd, 6)
def test_vadcp_beam_with_fill(self):
"""
Tests vadcp_beam_eastward, vadcp_beam_northward, vadcp_beam_vertical_est,
vadcp_beam_vertical_true and vadcp_beam_error functions for the specialized
5-beam ADCP when system fill values and ADCP fill values (bad value sentinels)
are present in the data stream.
Non-fill test values come from the function test_vadcp_beam_int_input_velocity_data
in this module.
Implemented by:
2014-06-25: <NAME>. Initial code.
Notes:
Before this time there have been no scalar time tests for the vadcp functions.
Therefore, scalar time tests are included in this test function.
"""
# for convenience
sfill = SYSTEM_FILLVALUE
afill = ADCP_FILLVALUE
### scalar tests with all good data
# inputs
b1_x0 = np.tile(-325, (1, 6))
b2_x0 = np.tile(188, (1, 6))
b3_x0 = np.tile(168, (1, 6))
b4_x0 = np.tile(-338, (1, 6))
b5_x0 = np.tile(-70, (1, 6))
# units of centidegrees
heading = np.array([3000])
pitch = np.array([200])
roll = np.array([400])
orient = np.array([1]) # vertical orientation
lat = np.array([50.0000])
lon = np.array([-145.0000])
z = np.array([0.0])
dt = np.array([3545769600.0]) # May 12, 2012
# expected outputs from test_vadcp_beam
uu_x0 = np.tile(-0.02630, (1, 6))
vv_x0 = np.tile(-1.05621, (1, 6))
ww_4bm_x0 = np.tile(0.00330, (1, 6))
ww_5bm_x0 = np.tile(-0.00824, (1, 6))
ee_x0 = np.tile(0.03411, (1, 6))
uu_calc = af.vadcp_beam_eastward(
b1_x0, b2_x0, b3_x0, b4_x0, heading, pitch, roll, orient, lat, lon, z, dt)
vv_calc = af.vadcp_beam_northward(
b1_x0, b2_x0, b3_x0, b4_x0, heading, pitch, roll, orient, lat, lon, z, dt)
ww_4bm_calc = af.vadcp_beam_vertical_est(
b1_x0, b2_x0, b3_x0, b4_x0, heading, pitch, roll, orient)
ww_5bm_calc = af.vadcp_beam_vertical_true(
b1_x0, b2_x0, b3_x0, b4_x0, b5_x0, heading, pitch, roll, orient)
ee_calc = af.vadcp_beam_error(b1_x0, b2_x0, b3_x0, b4_x0)
np.testing.assert_array_almost_equal(uu_calc, uu_x0, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x0, 4)
np.testing.assert_array_almost_equal(ww_4bm_calc, ww_4bm_x0, 4)
np.testing.assert_array_almost_equal(ww_5bm_calc, ww_5bm_x0, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x0, 4)
### single time record case; missing roll data
## the ADCP does not use its bad flag sentinel for compass data, only beam data.
## however, it is possible that CI could supply the system fillvalue for missing compass data.
# input data
# beam inputs are the same as for the all good data scalar test above
b1_x1 = np.array([[-325, -325, -325, -325, -325, -325]])
b2_x1 = np.array([[188, 188, 188, 188, 188, 188]])
b3_x1 = np.array([[168, 168, 168, 168, 168, 168]])
b4_x1 = np.array([[-338, -338, -338, -338, -338, -338]])
b5_x1 = np.array([[-70, -70, -70, -70, -70, -70]])
# compass data as above, except the roll value from the instrument is missing:
missingroll = sfill
# expected results for all good beam data, missing roll data;
# nans for all results except for the error velocity, which does not depend on the compass
uu_x1 = uu_x0 * np.nan
vv_x1 = vv_x0 * np.nan
ww_4bm_x1 = ww_4bm_x0 * np.nan
ww_5bm_x1 = ww_5bm_x0 * np.nan
ee_x1 = np.copy(ee_x0)
# calculated
uu_calc = af.vadcp_beam_eastward(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient, lat, lon, z, dt)
vv_calc = af.vadcp_beam_northward(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient, lat, lon, z, dt)
ww_4bm_calc = af.vadcp_beam_vertical_est(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient)
ww_5bm_calc = af.vadcp_beam_vertical_true(b1_x1, b2_x1, b3_x1, b4_x1, b5_x1, heading, pitch,
missingroll,
orient)
ee_calc = af.vadcp_beam_error(b1_x1, b2_x1, b3_x1, b4_x1)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_x1, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x1, 4)
np.testing.assert_array_almost_equal(ww_4bm_calc, ww_4bm_x1, 4)
np.testing.assert_array_almost_equal(ww_5bm_calc, ww_5bm_x1, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x1, 4)
### single time record case; missing and bad-flagged beam data, good compass data
# input data
b1_x2 = np.array([[-325, -325, -325, sfill, -325, -325]])
b2_x2 = np.array([[188, afill, 188, 188, 188, 188]])
b3_x2 = np.array([[168, 168, 168, 168, 168, 168]])
b4_x2 = np.array([[-338, -338, -338, -338, -338, -338]])
b5_x2 = np.array([[sfill, sfill, -70, -70, afill, -70]])
# expected
uu_x2 = np.array([[-0.02630, np.nan, -0.02630, np.nan, -0.02630, -0.02630]])
vv_x2 = np.array([[-1.05621, np.nan, -1.05621, np.nan, -1.05621, -1.05621]])
ww_4bm_x2 = np.array([[0.00330, np.nan, 0.00330, np.nan, 0.00330, 0.00330]])
ww_5bm_x2 = np.array([[np.nan, np.nan, -0.00824, np.nan, np.nan, -0.00824]])
ee_x2 = np.array([[0.03411, np.nan, 0.03411, np.nan, 0.03411, 0.03411]])
# calculated
uu_calc = af.vadcp_beam_eastward(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient,
lat, lon, z, dt)
vv_calc = af.vadcp_beam_northward(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient,
lat, lon, z, dt)
ww_4bm_calc = af.vadcp_beam_vertical_est(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient)
ww_5bm_calc = af.vadcp_beam_vertical_true(b1_x2, b2_x2, b3_x2, b4_x2, b5_x2,
heading, pitch, roll, orient)
ee_calc = af.vadcp_beam_error(b1_x2, b2_x2, b3_x2, b4_x2)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_x2, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x2, 4)
np.testing.assert_array_almost_equal(ww_4bm_calc, ww_4bm_x2, 4)
np.testing.assert_array_almost_equal(ww_5bm_calc, ww_5bm_x2, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x2, 4)
### multiple (5) record case
## reset the test inputs for 5 time records
# 1st time record is the bad/missing beam data case above
# 2nd time record is a missing heading data case
# 3rd time record is all good data (note, b1_x1 = b1_x0, etc)
# 4th time record is bad/missing beam and missing pitch data.
# 5th time record is missing orientation data
b1 = np.vstack((b1_x2, b1_x1, b1_x1, b1_x2, b1_x1))
b2 = np.vstack((b2_x2, b2_x1, b2_x1, b2_x2, b2_x1))
b3 = np.vstack((b3_x2, b3_x1, b3_x1, b3_x2, b3_x1))
b4 = np.vstack((b4_x2, b4_x1, b4_x1, b4_x2, b4_x1))
b5 = np.vstack((b5_x2, b5_x1, b5_x1, b5_x2, b5_x1))
heading = np.hstack((heading, sfill, heading, heading, heading))
pitch = np.hstack((pitch, pitch, pitch, sfill, pitch))
roll = np.tile(roll, 5)
orient = np.hstack((orient, orient, orient, orient, sfill))
lat = np.tile(lat, 5)
lon = np.tile(lon, 5)
z = np.tile(z, 5)
dt = np.tile(dt, 5)
# set expected outputs for these 5 records
# notes:
# (1) heading is not used in the calculation of vertical velocities,
# therefore the second entries to the ww_xpctd products are good
# data out (ww_x0), not nans as resulted from the missingroll test.
# (2) pitch is not used in the calculation of error velocity, so that
# in the mixed case (time record 4) the error velocity should be
# the same as that for the pure bad/missing beam case (ee_x2, 1st
# and 4th entries in ee_xpctd).
# (3) the orientation argument affects the roll calculation, so that
# when its value is missing (5th time record) the expected result
# would be the same as if the roll value were missing. therefore
# the 5th column entries are all x1 results.
uu_xpctd = np.vstack((uu_x2, uu_x1, uu_x0, uu_x1, uu_x1))
vv_xpctd = np.vstack((vv_x2, vv_x1, vv_x0, vv_x1, vv_x1))
ww_4bm_xpctd = np.vstack((ww_4bm_x2, ww_4bm_x0, ww_4bm_x0, ww_4bm_x1, ww_4bm_x1))
ww_5bm_xpctd = np.vstack((ww_5bm_x2, ww_5bm_x0, ww_5bm_x0, ww_5bm_x1, ww_5bm_x1))
ee_xpctd = np.vstack((ee_x2, ee_x1, ee_x0, ee_x2, ee_x1))
# calculated
uu_calc = af.vadcp_beam_eastward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, z, dt)
vv_calc = af.vadcp_beam_northward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, z, dt)
ww_4bm_calc = af.vadcp_beam_vertical_est(b1, b2, b3, b4,
heading, pitch, roll, orient)
ww_5bm_calc = af.vadcp_beam_vertical_true(b1, b2, b3, b4, b5,
heading, pitch, roll, orient)
ee_calc = af.vadcp_beam_error(b1, b2, b3, b4)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_xpctd, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_xpctd, 4)
np.testing.assert_array_almost_equal(ww_4bm_calc, ww_4bm_xpctd, 4)
np.testing.assert_array_almost_equal(ww_5bm_calc, ww_5bm_xpctd, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_xpctd, 4)
def test_adcp_ins2earth_orientation(self):
"""
Test the adcp worker routine adcp_inst2earth when the vertical orientation
toggle switch codes for downward-looking (orient = 0).
The instrument to earth coordinate transformation was coded in matlab using
the January 2010 version of the Teledyne RD Instruments "ADCP Coordinate
Transformation" manual. The code was checked against the upwards looking
unit tests using adcp_inst2earth in the test_vadcp_beam function in this module;
the values agreed to beyond single precision. This matlab code was then used
to generate the check values for the downward looking case.
Implemented by:
2014-07-02: <NAME>. Initial code.
"""
# input values: these are the output of adcp_beam2inst in test_vadcp_beam
# (velocities are in instrument coordinates)
u = np.array([[-749.95582864]])
v = np.array([[-739.72251324]])
w = np.array([[-81.67564404]])
# units of centidegrees
heading = np.array([3200])
pitch = np.array([300])
roll = np.array([400])
### check test: upwards looking case
orient_1 = np.array([1])
# expected outputs, earth coordinates, upwards case, from test_vadcp_beam which
# agrees with the test matlab code values to (much) better than single precision.
vle = np.array([[247.015599]])
vln = np.array([[-1027.223026]])
vlu = np.array([[-9.497397]])
xpctd = np.hstack((vle, vln, vlu))
# calculated upwards looking case
uu, vv, ww = af.adcp_ins2earth(u, v, w, heading, pitch, roll, orient_1)
calc = np.hstack((uu, vv, ww))
# test results
np.testing.assert_array_almost_equal(calc, xpctd, 6)
### primary test: downwards looking case.
# change one input:
orient_0 = np.array([0])
# expected outputs, earth coordinates, downwards case, from matlab test code
vle = np.array([[-1029.9328104]])
vln = np.array([[-225.7064203]])
vlu = np.array([[-67.7426771]])
xpctd = np.hstack((vle, vln, vlu))
# calculated upwards looking case
uu, vv, ww = af.adcp_ins2earth(u, v, w, heading, pitch, roll, orient_0)
calc = np.hstack((uu, vv, ww))
# test results
np.testing.assert_array_almost_equal(calc, xpctd, 6)
def test_adcp_bin_depths_meters(self):
"""
Test the adcp_bin_depths_meters function.
Implemented by:
<NAME>, January 2015. Initial code.
<NAME>. 26-Jun-2015. Added time-vectorized unit test after modifying DPA.
30-Jun-2015. Added fill value unit test.
"""
sfill = SYSTEM_FILLVALUE
### scalar time case (1) - adcp looking up
# test inputs - note, CI will be sending these into the DPAs as ndarrays, not python scalars.
adcp_orientation = 1
bin_size = 400
dist_first_bin = 900
num_bins = 29
sensor_depth = 450
# expected outputs
# note that the output should be a row vector, not a 1D array.
xpctd_bins_up = np.array([[441., 437., 433., 429., 425., 421., 417., 413., 409., 405., 401., 397., 393., 389.,
385., 381., 377., 373., 369., 365., 361., 357., 353., 349., 345., 341., 337., 333.,
329.]])
# calculate bin depths
calc_bins_up = af.adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
# compare calculated results to expected results
np.testing.assert_allclose(calc_bins_up, xpctd_bins_up, rtol=0.000001, atol=0.000001)
### scalar time case (2) - adcp looking down
# test inputs
adcp_orientation = np.array([0])
bin_size = np.array([400])
dist_first_bin = np.array([900])
num_bins = np.array([29])
sensor_depth = np.array([7])
# expected outputs
xpctd_bins_down = np.array([[16., 20., 24., 28., 32., 36., 40., 44., 48., 52., 56., 60., 64., 68., 72., 76., 80.,
84., 88., 92., 96., 100., 104., 108., 112., 116., 120., 124., 128.]])
# calculate bin depths
calc_bins_down = af.adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
# compare calculated results to expected results
np.testing.assert_allclose(calc_bins_down, xpctd_bins_down, rtol=0.000001, atol=0.000001)
### time-vectorized case; cat the above two scalar cases together.
# inputs
dist_first_bin = np.array([900, 900])
bin_size = np.array([400, 400])
num_bins = np.array([29, 29])
sensor_depth = np.array([450, 7])
adcp_orientation = np.array([1, 0])
# expected
xpctd_bins = np.vstack((xpctd_bins_up, xpctd_bins_down))
# calculated
calc_bins = af.adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
# compare calculated results to expected results
np.testing.assert_allclose(calc_bins, xpctd_bins, rtol=0.000001, atol=0.000001)
### time-vectorized fill cases - test the action on a fill value in each of the 5 input data streams,
# plus one instance of all good data.
num_bins = np.array([29, 29, 29, sfill, 29, 29]) # NOTE: DPA uses only first num_bins value
dist_first_bin = np.array([900, sfill, 900, 900, 900, 900])
bin_size = np.array([400, 400, sfill, 400, 400, 400])
sensor_depth = np.array([450, 7, 450, 7, 450, sfill])
adcp_orientation = np.array([1, 0, 1, 0, sfill, 0])
# 1st and 4th rows will have non-Nan data.
xpctd_bins = np.tile(np.nan, (6, 29))
xpctd_bins[0, :] = xpctd_bins_up
xpctd_bins[3, :] = xpctd_bins_down
# calculated
calc_bins = af.adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
# compare calculated results to expected results
np.testing.assert_allclose(calc_bins, xpctd_bins, rtol=0.000001, atol=0.000001)
def test_adcp_bin_depths_dapa(self):
"""
Test the adcp_bin_depths_dapa function.
Values based on z_from_p check values.
Implemented by:
<NAME>, January 2015. Initial code.
<NAME>. 26-Jun-2015. Corrected pressure type and units and z_from_p usage.
30-Jun-2015. Added fill value unit test.
"""
sfill = SYSTEM_FILLVALUE
### scalar time case (1) - adcp looking up
# test inputs - note, CI will be sending these into the DPAs as ndarrays, not python scalars.
# test inputs
adcp_orientation = np.array([1])
bin_size = np.array([400])
dist_first_bin = np.array([900])
latitude = np.array([4.0])
num_bins =
|
np.array([10])
|
numpy.array
|
from brainbox import core, processing
import unittest
import numpy as np
class TestProcessing(unittest.TestCase):
def test_sync(self):
# Test casting non-uniformly-sampled data to a evenly-sampled TimeSeries.
# Begin by defining sampling intervals of random half-normally distributed length
times = np.cumsum(np.abs(np.random.normal(loc=4., scale=6., size=100)))
# take sample values as though the value was increasing as a cube of sample time
samples = times**3
# Use cubic interpolation to resample to uniform interval
cubes = core.TimeSeries(times=times, values=samples, columns=('cubic',))
resamp = processing.sync(0.1, timeseries=cubes, interp='cubic', fillval='extrapolate')
# Check that the sync function is returning a new time series object
self.assertTrue(isinstance(resamp, core.TimeSeries))
# Test that all returned sample times are uniformly spaced
# We need to use np.isclose because of floating point arithematic problems instead of ==0.1
# Since the actual diff returns 0.09999999999999964
self.assertTrue(np.all(np.isclose(np.diff(resamp.times), 0.1)))
# Check that we're within a margin of error on the interpolation
err_margin = 1e-3 # Maximum percent error allowed
err_percs = np.abs(resamp.times**3 - resamp.values.T) / (resamp.times**3)
self.assertTrue(np.all(err_percs < err_margin))
# Make a second timeseries of square-law increasing samples
times2 = np.cumsum(np.abs(np.random.normal(loc=2., scale=1., size=200)))
samples2 = times2**2
squares = core.TimeSeries(times=times2, values=samples2, columns=('square',))
# Use cubic interpolation again, this time on both timeseries
resamp2 = processing.sync(0.1, timeseries=[squares, cubes], interp='cubic',
fillval='extrapolate')
# Check that the new TS has both squares and cubes as keys and attribs
self.assertTrue(hasattr(resamp2, 'cubic'))
self.assertTrue(hasattr(resamp2, 'square'))
# Check that both timeseries are fully contained in the resampled TS
self.assertTrue(cubes.times.min() >= resamp2.times.min())
self.assertTrue(cubes.times.max() <= resamp2.times.max())
self.assertTrue(squares.times.min() >= resamp2.times.min())
self.assertTrue(squares.times.max() <= resamp2.times.max())
# Check that all interpolated values are within the margin of error against the known func
sq_errperc = np.abs(resamp2.times**2 - resamp2.square) / resamp2.times**2
cu_errperc = np.abs(resamp2.times**3 - resamp2.cubic) / resamp2.times**3
self.assertTrue(np.all(sq_errperc < err_margin) & np.all(cu_errperc < err_margin))
# Now check the numpy array behavior of sync.
# Try running sync on the cubic times and values only.
resamp = processing.sync(0.1, times=times, values=samples, interp='cubic',
fillval='extrapolate')
# Do all the tests we did for the instance created using TimeSeries objects
self.assertTrue(isinstance(resamp, core.TimeSeries))
self.assertTrue(np.all(np.isclose(np.diff(resamp.times), 0.1)))
err_margin = 1e-3 # Maximum percent error allowed
err_percs = np.abs(resamp.times**3 - resamp.values.T) / (resamp.times**3)
self.assertTrue(np.all(err_percs < err_margin))
# Try the multiple-arrays case in which we pass two times and two values
resamp2 = processing.sync(0.1, times=(times, times2), values=(samples, samples2),
interp='cubic', fillval='extrapolate')
self.assertTrue(times.min() >= resamp2.times.min())
self.assertTrue(times.max() <= resamp2.times.max())
self.assertTrue(times2.min() >= resamp2.times.min())
self.assertTrue(times2.max() <= resamp2.times.max())
def test_bincount_2d(self):
# first test simple with indices
x = np.array([0, 1, 1, 2, 2, 3, 3, 3])
y = np.array([3, 2, 2, 1, 1, 0, 0, 0])
r, xscale, yscale = processing.bincount2D(x, y, xbin=1, ybin=1)
r_ = np.zeros_like(r)
# sometimes life would have been simpler in c:
for ix, iy in zip(x, y):
r_[iy, ix] += 1
self.assertTrue(np.all(np.equal(r_, r)))
# test with negative values
y = np.array([3, 2, 2, 1, 1, 0, 0, 0]) - 5
r, xscale, yscale = processing.bincount2D(x, y, xbin=1, ybin=1)
self.assertTrue(np.all(
|
np.equal(r_, r)
|
numpy.equal
|
import numpy as np
class Conv():
import numpy as np
def __init__(self, inputarray, stride, weights):
self.filternumber = weights.shape[0]
self.width = weights.shape[2]
self.height = weights.shape[1]
self.stride = stride
self.inputarray = inputarray
self.weights = weights
def ForwardPass(self):
print("Input Array Shape for Conv : ",self.inputarray.shape)
self.hoffmap = self.inputarray[0].shape[0] # hoffmap -> height of feature map
self.woffmap = self.inputarray[0].shape[1] # woffmap -> width of feature map
self.widthtour = int(1 + (self.woffmap - self.width) / self.stride)
self.heighttour = int(1 + (self.hoffmap - self.height) / self.stride)
# print(self.widthtour)
# print(self.heighttour)
self.featuremap =
|
np.zeros((self.inputarray.shape[0], self.filternumber, self.widthtour, self.heighttour))
|
numpy.zeros
|
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
from scipy.sparse import csc_matrix
from method.settings import Settings
from numpy import linalg as LA
import numpy.matlib
class Similarity:
def __init__(self, setup: Settings, data: np.ndarray):
self.k = setup.k
self.distance_metric = setup.metric.value
self.data = data
def construct_W(self):
"""
Construct the affinity matrix W through different ways
Input
-----
X: {numpy array}, shape (n_samples, n_features)
input data
Output
------
W: {sparse matrix}, shape (n_samples, n_samples)
output affinity matrix W
"""
n_samples, _ = np.shape(self.data)
# compute pairwise euclidean distances
D = pairwise_distances(self.data, metric=self.distance_metric)
D **= 2
# np.fill_diagonal(D, 1)
# sort the distance matrix D in ascending order
idx = np.argsort(D, axis=1)
# choose the k-nearest neighbors for each instance
idx_new = idx[:, 0:self.k+1]
G = np.zeros((n_samples*(self.k+1), 3))
G[:, 0] = np.tile(np.arange(n_samples), (self.k+1, 1)).reshape(-1)
G[:, 1] = np.ravel(idx_new, order='F')
G[:, 2] = 1
# build the sparse affinity matrix W
W = csc_matrix((G[:, 2], (G[:, 0], G[:, 1])), shape=(n_samples, n_samples))
bigger = np.transpose(W) > W
W = W - W.multiply(bigger) + np.transpose(W).multiply(bigger)
return W
@staticmethod
def construct_spectral_info(S):
n_samples = S.shape[0]
# build the degree matrix
X_sum = np.array(S.sum(axis=1))
D = np.zeros((n_samples, n_samples))
for i in range(n_samples):
D[i, i] = X_sum[i]
# build the laplacian matrix
L = D - S
d1 = np.power(np.array(S.sum(axis=1)), -0.5)
d1[np.isinf(d1)] = 0
d2 = np.power(np.array(S.sum(axis=1)), 0.5)
v = np.dot(np.diag(d2[:, 0]), np.ones(n_samples))
v = v/LA.norm(v)
# build the normalized laplacian matrix
L_hat = (np.matlib.repmat(d1, 1, n_samples)) *
|
np.array(L)
|
numpy.array
|
from SLIX import toolbox
import numpy
import pytest
if toolbox.gpu_available:
use_gpu_arr = [True, False]
else:
use_gpu_arr = [False]
class TestToolbox:
@pytest.mark.parametrize("use_gpu", use_gpu_arr)
def test_background_mask(self, use_gpu):
average_image = numpy.zeros((256, 256, 10))
for i in range(0, 128):
average_image[i, :] = 0
average_image[i + 128, :] = 128
toolbox_mask = toolbox.background_mask(average_image, use_gpu=use_gpu)
print(toolbox_mask)
assert numpy.all(toolbox_mask[:128, :] == True)
assert numpy.all(toolbox_mask[128:, :] == False)
@pytest.mark.parametrize("use_gpu", use_gpu_arr)
def test_all_peaks(self, use_gpu):
# Create an absolute simple peak array
arr = numpy.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0])
arr = arr.reshape((1, 1, 11))
real_peaks = arr == 1
toolbox_peaks = toolbox.peaks(arr, use_gpu=use_gpu)
assert numpy.all(toolbox_peaks == real_peaks)
# Test one single peak
arr = numpy.array(([0, 1, 0, 0, 0, 0]), dtype=bool)
arr = arr.reshape((1, 1, 6))
real_peaks = arr == 1
toolbox_peaks = toolbox.peaks(arr, use_gpu=use_gpu)
assert numpy.all(toolbox_peaks == real_peaks)
# Test one single peak
arr = numpy.array(([0, 1, 1, 0, 0, 0]), dtype=bool)
arr = arr.reshape((1, 1, 6))
real_peaks = arr == 1
real_peaks[0, 0, 2] = False
toolbox_peaks = toolbox.peaks(arr, use_gpu=use_gpu)
assert numpy.all(toolbox_peaks == real_peaks)
# Test one single peak
arr = numpy.array(([0, 1, 1, 1, 0, 0]), dtype=bool)
arr = arr.reshape((1, 1, 6))
real_peaks = arr == 1
real_peaks[0, 0, 1] = False
real_peaks[0, 0, 3] = False
toolbox_peaks = toolbox.peaks(arr, use_gpu=use_gpu)
assert numpy.all(toolbox_peaks == real_peaks)
arr = numpy.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1] * 1)
arr = arr.reshape((1, 1, 11))
real_peaks = arr == 1
toolbox_peaks = toolbox.peaks(arr, use_gpu=use_gpu)
assert numpy.all(toolbox_peaks == real_peaks)
@pytest.mark.parametrize("use_gpu", use_gpu_arr)
def test_num_peaks(self, use_gpu):
# Create an absolute simple peak array
test_arr = numpy.array(([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), dtype=bool)
test_arr = test_arr.reshape((1, 1, 20))
real_peaks = test_arr == True
toolbox_peaks = toolbox.num_peaks(real_peaks, use_gpu=use_gpu)
expected_value = numpy.count_nonzero(real_peaks[0, 0, :])
assert numpy.all(toolbox_peaks == expected_value)
@pytest.mark.parametrize("use_gpu", use_gpu_arr)
def test_peak_prominence(self, use_gpu):
# Create an absolute simple peak array
arr = numpy.array([0, 1, 0, 0.07, 0, 1, 0, 0.07, 0, 1, 0] * 1)
arr = arr.reshape((1, 1, 11))
# Test if high and low prominence separation is working as intended
high_peaks = arr == 1
low_peaks = arr == 0.07
toolbox_peaks = toolbox.peaks(arr, use_gpu=use_gpu)
toolbox_prominence = toolbox.peak_prominence(arr, toolbox_peaks, kind_of_normalization=0, use_gpu=use_gpu)
toolbox_high_peaks = toolbox_peaks.copy()
toolbox_high_peaks[toolbox_prominence < toolbox.cpu_toolbox.TARGET_PROMINENCE] = False
toolbox_low_peaks = toolbox_peaks.copy()
toolbox_low_peaks[toolbox_prominence >= toolbox.cpu_toolbox.TARGET_PROMINENCE] = False
assert numpy.all(high_peaks == toolbox_high_peaks)
assert
|
numpy.all(low_peaks == toolbox_low_peaks)
|
numpy.all
|
import numpy as np
import scipy.ndimage as ndi
import scipy.sparse as sp
from discretize.utils.code_utils import as_array_n_by_dim, is_scalar
from scipy.spatial import cKDTree, Delaunay
from scipy import interpolate
import discretize
from discretize.utils.code_utils import deprecate_function
import warnings
num_types = [int, float]
def random_model(shape, seed=None, anisotropy=None, its=100, bounds=None):
"""Create random tensor model.
Creates a random tensor model by convolving a kernel function with a
uniformly distributed model. The user specifies the number of cells
along the x, (y and z) directions with the input argument *shape* and
the function outputs a tensor model with the same shape. Afterwards,
the user may use the :py:func:`~discretize.utils.mkvc` function
to convert the tensor to a vector which can be plotting on a
corresponding tensor mesh.
Parameters
----------
shape : (dim) tuple of int
shape of the model.
seed : int, optional
pick which model to produce, prints the seed if you don't choose
anisotropy : numpy.ndarray, optional
this is the kernel that is convolved with the model
its : int, optional
number of smoothing iterations
bounds : list, optional
Lower and upper bounds on the model. Has the form [lower_bound, upper_bound].
Returns
-------
numpy.ndarray
A random generated model whose shape was specified by the input parameter *shape*
Examples
--------
Here, we generate a random model for a 2D tensor mesh and plot.
>>> from discretize import TensorMesh
>>> from discretize.utils import random_model, mkvc
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
>>> h = [(1., 50)]
>>> vmin, vmax = 0., 1.
>>> mesh = TensorMesh([h, h])
>>> model = random_model(mesh.shape_cells, seed=4, bounds=[vmin, vmax])
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = plt.subplot(111)
>>> im, = mesh.plot_image(model, grid=False, ax=ax, clim=[vmin, vmax])
>>> cbar = plt.colorbar(im)
>>> ax.set_title('Random Tensor Model')
>>> plt.show()
"""
if bounds is None:
bounds = [0, 1]
if seed is None:
seed =
|
np.random.randint(1e3)
|
numpy.random.randint
|
import warnings
import pytest
import numpy as np
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from ADPYNE.Dual import Dual, makeHessianVars
import ADPYNE.elemFunctions as ef
import ADPYNE.elemFunctions as ef
from ADPYNE.Hessian import Hessian
# higher order derivative and __str__ test
def test_higherorderprint():
x = Dual(2, 1)
assert str(x) == "2 + 1ε"
y = x.makeHighestOrder(3)
f = y
f.buildCoefficients(3)
assert f.coefficients == [2.0, 1.0, 0.0, -0.0]
assert str(f) == "[2.0, 1.0, 0.0, -0.0]"
# addition tests
def test_add_dual_results():
# single input cases
# positive numbers
x = Dual(5, 1)
f = x + x
assert f.Real == 10
assert f.Dual == 2
# negative numbers
y = Dual(-5, 1)
f = y + y
assert f.Real == -10
assert f.Dual == 2
def test_add_vector_results():
x = Dual(np.array([[3],[1]]), np.array([[2, 1]]).T)
y = Dual(np.array([[2],[-3]]), np.array([[3, 2]]).T)
f = x + y
assert np.all(f.Real == np.array([[5], [-2]]))
assert np.all(f.Dual == np.array([[5], [3]]))
def test_add_constant_results():
# single input case
# positive numbers
x = Dual(5, 1)
f = x + 3
assert f.Real == 8
assert f.Dual == 1
# negative numbers
x = Dual(-5, 1)
f = x + 3
assert f.Real == -2
assert f.Dual == 1
def test_add_constant_vector_results():
x = Dual(np.array([[1, 3]]).T, np.array([[2, 1]]).T)
f = x + 3
assert np.all(f.Real == np.array([[4, 6]]).T)
assert np.all(f.Dual ==
|
np.array([[2], [1]])
|
numpy.array
|
from warnings import warn
import astroscrappy
import ccdproc
import numpy as np
from astropy import units as u
from astropy.nddata import CCDData, StdDevUncertainty
from astropy.stats import sigma_clipped_stats
from astropy.time import Time
from astroscrappy import detect_cosmics
from ccdproc import flat_correct, subtract_bias, subtract_dark
from .hduutil import (CCDData_astype, _parse_data_header, _parse_image,
add2hdr, errormap, load_ccd, propagate_ccdmask,
set_ccd_gain_rdnoise, trim_ccd, update_process,
update_tlm)
from .misc import LACOSMIC_KEYS, change_to_quantity, fitsxy2py
__all__ = [
"crrej", "medfilt_bpm", "bdf_process"]
# Set strings for header history & print (if verbose)
str_bias = "Bias subtracted (see BIASFRM)"
str_dark = "Dark subtracted (see DARKFRM)"
str_dscale = "Dark scaling using {}"
str_flat = "Flat corrected by image/flat*flat_norm_value (see FLATFRM; FLATNORM)"
str_fringe_noscale = "Fringe subtracted (see FRINFRM)"
str_fringe_scale = ("Finge subtracted with scaling (image - scale*fringe)"
+ "(see FRINFRM, FRINSECT, FRINFUNC and FRINSCAL)")
str_trim = "Trim by FITS section {} (see LTV, LTM, TRIMIM)"
str_e0 = "Readnoise propagated with Poisson noise (using gain above) of source."
str_ed = "Poisson noise from subtracted dark was propagated."
str_ef = "Flat uncertainty was propagated."
str_nexp = "Normalized by the exposure time."
str_navg = "Normalized by the average value of the frame."
str_nmed = "Normalized by the median value of the frame."
str_cr = ("Cosmic-Ray rejected by astroscrappy (v {}), with parameters: {}")
# def _add_and_print(s, header, verbose, update_header=True, t_ref=None):
# if update_header:
# # add as history
# add2hdr(header, 'h', s, t_ref=t_ref)
# if verbose:
# if isinstance(s, str):
# print(str_now(fmt='{}'), s)
# else:
# for _s in s:
# print(str_now(fmt='{}'), _s)
# # TODO: This is quite much overlapping with set_ccd_gain_rdnoise...
# def get_gain_readnoise(ccd, gain=None, gain_key="GAIN",
# gain_unit=u.electron/u.adu,
# rdnoise=None, rdnoise_key="RDNOISE",
# rdnoise_unit=u.electron, verbose=True,
# update_header=True):
# """ Get gain and readnoise from given paramters.
# gain, rdnoise : None, float, astropy.Quantity, optional.
# The gain and readnoise value. If ``gain`` or ``readnoise`` is
# specified, they are interpreted with ``gain_unit`` and
# ``rdnoise_unit``, respectively. If they are not specified, this
# function will seek for the header with keywords of ``gain_key``
# and ``rdnoise_key``, and interprete the header value in the unit
# of ``gain_unit`` and ``rdnoise_unit``, respectively.
# gain_key, rdnoise_key : str, optional.
# See ``gain``, ``rdnoise`` explanation above.
# gain_unit, rdnoise_unit : str, astropy.Unit, optional.
# See ``gain``, ``rdnoise`` explanation above.
# verbose : bool, optional.
# The verbose option.
# update_header : bool, optional
# Whether to update the given header.
# Note
# ----
# If gain and readout noise are not found properly, the default values
# of 1.0 and 0.0 with corresponding units will be returned.
# """
# gain_Q, gain_from = get_if_none(
# gain, ccd.header, key=gain_key, unit=gain_unit,
# verbose=verbose, default=1.0)
# rdnoise_Q, rdnoise_from = get_if_none(
# rdnoise, ccd.header, key=rdnoise_key, unit=rdnoise_unit,
# verbose=False, default=0.0)
# _add_and_print(str_grd.format(gain_from,
# "gain",
# gain_Q.value,
# gain_Q.unit),
# ccd.header, verbose, update_header=update_header)
# _add_and_print(str_grd.format(rdnoise_from,
# "rdnoise",
# rdnoise_Q.value,
# rdnoise_Q.unit),
# ccd.header, verbose, update_header=update_header)
# return gain_Q, rdnoise_Q
def crrej(
ccd, mask=None, propagate_crmask=False, update_header=True, add_process=True, gain=None, rdnoise=None,
sigclip=4.5, sigfrac=0.5, objlim=1.0, satlevel=np.inf, pssl=0.0, niter=4, sepmed=False,
cleantype='medmask', fsmode='median', psfmodel='gauss', psffwhm=2.5, psfsize=7, psfk=None, psfbeta=4.765,
verbose=True
):
""" Do cosmic-ray rejection using L.A.Cosmic default parameters.
Parameters
----------
ccd : CCDData
The ccd to be processed. The data must be in ADU, not electrons.
propagate_crmask : bool, optional.
Whether to save (propagate) the mask from CR rejection (``astroscrappy``) to the CCD's mask.
Default is `False`.
gain, rdnoise : None, float, astropy.Quantity, optional.
The gain and readnoise value. If not ``Quantity``, they must be in electrons per adu and
electron unit, respectively.
sigclip : float, optional
Laplacian-to-noise limit for cosmic ray detection. Lower values will flag more pixels as cosmic
rays.
Default: 4.5.
sigfrac : float, optional
Fractional detection limit for neighboring pixels. For cosmic ray neighbor pixels, a
lapacian-to-noise detection limit of sigfrac * sigclip will be used.
Default: 0.5.
objlim : float, optional
Minimum contrast between Laplacian image and the fine structure image. Increase this value if
cores of bright stars are flagged as cosmic rays.
Default: 1.0.
pssl : float, optional
Previously subtracted sky level in ADU. We always need to work in electrons for cosmic ray
detection, so we need to know the sky level that has been subtracted so we can add it back in.
Default: 0.0.
satlevel : float, optional
Saturation of level of the image (electrons). This value is used to detect saturated stars and
pixels at or above this level are added to the mask.
Default: ``np.inf``.
niter : int, optional
Number of iterations of the LA Cosmic algorithm to perform.
Default: 4.
sepmed : boolean, optional
Use the separable median filter instead of the full median filter. The separable median is not
identical to the full median filter, but they are approximately the same and the separable
median filter is significantly faster and still detects cosmic rays well.
Default: `True`
cleantype : {'median', 'medmask', 'meanmask', 'idw'}, optional
Set which clean algorithm is used:
* ``'median'``: An umasked 5x5 median filter
* ``'medmask'``: A masked 5x5 median filter
* ``'meanmask'``: A masked 5x5 mean filter
* ``'idw'``: A masked 5x5 inverse distance weighted interpolation
Default: ``"meanmask"``.
fsmode : {'median', 'convolve'}, optional
Method to build the fine structure image:
* ``'median'``: Use the median filter in the standard LA Cosmic algorithm
* ``'convolve'``: Convolve the image with the psf kernel
to calculate the fine structure image.
Default: ``'median'``.
psfmodel : {'gauss', 'gaussx', 'gaussy', 'moffat'}, optional
Model to use to generate the psf kernel if ``fsmode='convolve'`` and ``psfk`` is `None`. The
current choices are Gaussian and Moffat profiles. ``'gauss'`` and ``'moffat'`` produce circular
PSF kernels. The ``'gaussx'`` and ``'gaussy'`` produce Gaussian kernels in the x and y
directions respectively.
Default: ``"gauss"``.
psffwhm : float, optional
Full Width Half Maximum of the PSF to use to generate the kernel.
Default: 2.5.
psfsize : int, optional
Size of the kernel to calculate. Returned kernel will have size psfsize x psfsize. psfsize
should be odd.
Default: 7.
psfk : float numpy array, optional
PSF kernel array to use for the fine structure image if ``fsmode='convolve'``. If `None` and
``fsmode == 'convolve'``, we calculate the psf kernel using ``'psfmodel'``.
Default: `None`.
psfbeta : float, optional
Moffat beta parameter. Only used if ``fsmode='convolve'`` and ``psfmodel='moffat'``.
Default: 4.765.
verbose : boolean, optional
Print to the screen or not. Default: `False`.
Returns
-------
_ccd : CCDData
The cosmic-ray cleaned CCDData in ADU. ``astroscrappy`` automatically does a gain correction,
so I divided the ``astroscrappy`` result by gain to restore to ADU (not to surprise the users).
crmask : ndarray (mask)
The cosmic-ray mask from ``astroscrappy``, propagated by the original mask of the ccd (if
``ccd.mask`` is not `None`)and ``mask`` given by the user.
update_header : bool, optional.
Whether to update the header if there is any.
add_process : bool, optional.
Whether to add ``PROCESS`` key to the header.
Notes
-----
All defaults are based on IRAF version of L.A. Cosmic (Note the default parameters of L.A. Cosmic
differ from version to version, so I took the IRAF version written by <NAME>.)
See the docstring of astroscrappy by
>>> import astroscrappy
>>> astroscrappy.detect_cosmics?
Example
-------
>>> yfu.ccdutil.set_ccd_gain_rdnoise(ccd)
>>> nccd, mask = crrej(ccd)
"""
_t = Time.now()
if gain is None:
try:
gain = ccd.gain
except AttributeError:
raise ValueError(
"Gain must be given or accessible as ``ccd.gain``. Use, e.g., yfu.set_ccd_gain_rdnoise(ccd)."
)
if rdnoise is None:
try:
rdnoise = ccd.rdnoise
except AttributeError:
raise ValueError(
"Readnoise must be given or accessible as ``ccd.rdnoise``. "
+ "Use, e.g., yfu.set_ccd_gain_rdnoise(ccd)."
)
_ccd = ccd.copy()
data, hdr = _parse_data_header(_ccd)
inmask = propagate_ccdmask(_ccd, additional_mask=mask)
# The L.A. Cosmic accepts only the gain in e/adu and rdnoise in e.
gain = change_to_quantity(gain, u.electron/u.adu, to_value=True)
rdnoise = change_to_quantity(rdnoise, u.electron, to_value=True)
# remove the fucxing cosmic rays
crrej_kwargs = dict(
gain=gain,
readnoise=rdnoise,
sigclip=sigclip,
sigfrac=sigfrac,
objlim=objlim,
satlevel=satlevel,
pssl=pssl,
niter=niter,
sepmed=sepmed,
cleantype=cleantype,
fsmode=fsmode,
psfmodel=psfmodel,
psffwhm=psffwhm,
psfsize=psfsize,
psfk=psfk,
psfbeta=psfbeta
)
crmask, cleanarr = detect_cosmics(
data,
inmask=inmask,
verbose=verbose,
**crrej_kwargs
)
# create the new ccd data object
# astroscrappy automatically does the gain correction, so return
# back to avoid confusion.
_ccd.data = cleanarr / gain
if propagate_crmask:
_ccd.mask = propagate_ccdmask(_ccd, additional_mask=crmask)
if add_process and hdr is not None:
try:
hdr["PROCESS"] += "C"
except KeyError:
hdr["PROCESS"] = "C"
if update_header and hdr is not None:
add2hdr(
hdr, 'h', s=str_cr.format(astroscrappy.__version__, crrej_kwargs), verbose=verbose, t_ref=_t
)
else:
if verbose:
print(str_cr.format(astroscrappy.__version__, crrej_kwargs))
update_tlm(hdr)
_ccd.header = hdr
return _ccd, crmask
# TODO: put niter
# TODO: put medfilt_min
# to get std at each pixel by medfilt[<medfilt_min] = 0, and std = sqrt((1+snoise)*medfilt/gain +
# rdn**2)
def medfilt_bpm(
ccd, cadd=1.e-10, std_model="std", gain=1., rdnoise=0., snoise=0., size=5,
sigclip_kw=dict(sigma=3., maxiters=5, std_ddof=1), std_section=None,
footprint=None, mode='reflect', cval=0.0, origin=0,
med_sub_clip=None, med_rat_clip=[0.5, 2], std_rat_clip=[-5, 5],
dtype='float32', update_header=True, verbose=False, logical='and', full=False
):
""" Find bad pixels from median filtering technique (non standard..?)
Parameters
----------
ccd : `~astropy.nddata.CCDData`
The CCD to find the bad pixels.
cadd : float, optional.
A very small const to be added to the input array to avoid resulting value of 0.0 in the median
filtered image which raises zero-division in median ratio (image/|median_filtered|).
std_model : {"std", "ccd"} optional.
The model used to calculate the std (standard deviation) map.
* ``"std"``: Simple standard deviation is calculated.
* ``"ccd"``: Using CCD noise model (``sqrt{(1 + snoise)*med_filt/gain + (rdnoise/gain)**2}``)
For ``'std'``, the arguments `std_section` and `sigclip_kw` are used, while if ``'ccd'``,
arguments `gain`, `rdnoise`, `snoise` will be used.
size, footprint, mode, cval, origin : optional.
The parameters to obtain the median-filtered map. See `~scipy.ndimage.median_filter`.
sigclip_kw : dict, optional.
The paramters used for `~astropy.stats.sigma_clipped_stats` when estimating the sky standard
deviation at `std_section`. This is **ignored** if ``std_model='ccd'``.
std_section : str, optinal.
The region in FITS standard (1-indexing, end-inclusive, xyz order) to estimate the sky standard
deviation to obtain the `std_ratio`. If `None` (default), the full region of the given array
is used, which is many times not desirable due to the celestial objects in the FOV and
computational cost. This is **ignored** if ``std_model='ccd'``.
gain, rdnoise, snoise : float, optional.
The gain (electrons/ADU), readout noise (electrons), and sensitivity noise (fractional error
from flat fielding) of the frame. These are **ignored** if ``std_model="std"``.
med_sub_clip : list of two float or `None`, optional.
The thresholds to find bad pixel by ``med_sub = ccd.data - median_filter(ccd.data)``. The
clipping will be turned off if it is `None` (default). If a list, must be in the order of
``[lower, upper]`` and at most two of these can be `None`.
med_rat_clip : list of two float or `None`, optional.
The thresholds to find bad pixel by ``med_ratio = ccd.data/np.abs(median_filter(ccd.data))``.
The clipping will be turned off if it is `None` (default). If a list, must be in the order of
``[lower, upper]`` and at most two of these can be `None`.
std_rat_clip : list of two float or `None`, optional.
The thresholds to find bad pixel by ``std_ratio = (ccd - median_filter(ccd))/std``. The
clipping will be turned off if it is `None` (default). If a list, must be in the order of
``[lower, upper]`` and at most two of these can be `None`.
logical : {'and', '&', 'or', '|'} or list of these, optional.
The logic to propagate masks determined by the ``_clip``'s. The mask is propagated such as
``posmask = med_sub > med_sub_clip[1] &/| med_ratio > med_rat_clip[1] &/| std_ratio >
std_rat_clip[1]``. If a list, it must contain two str of these, in the order of
``[logical_negmask, logical_posmask]``.
Returns
-------
ccd : CCDData
The badpixel removed result.
The followings are returned as dict only if ``full=True``.
posmask, negmask : ndarry of bool
The masked pixels by positive/negative criteria.
sky_std : float
The (sigma-clipped) sky standard deviation. Returned only if ``full=True``.
Notes
-----
`med_sub_clips` is usually not necessary but useful to detect hot pixels in dark frames (no
light) for some special circumstances. ::
1. Median additive difference (data-medfilt) generated,
2. Median ratio (data/|medfilt|) generated,
3. Stddev ratio ((data-medfilt)/std) generated,
4. posmask and negmask calculated by clips MB_[ADD/RAT/STD]_[U/L] and logic MB_[N/P]LOG (see keywords),
5. Pixels of (posmask | negmask) are repleced with median filtered frame.
"""
from scipy.ndimage import median_filter
def _sanitize_clips(clips):
clips = np.atleast_1d(clips)
if clips.size == 1:
clips = np.repeat(clips, 2)
return clips
if ((med_sub_clip is None) and (med_rat_clip is None) and (std_rat_clip is None)):
warn("No BPM is found because all clips are None.", end=' ')
if full:
return ccd, dict(posmask=None, negmask=None, med_filt=None, med_sub=None, med_rat=None,
std_rat=None, std=None)
else:
return ccd
logical =
|
np.array(logical)
|
numpy.array
|
import numpy as np
import math
import sys
from timeit import default_timer as timer
sys.path.append("../../")
from core import wnn
from encoding import thermometer
from encoding import adult
from encoding import hamming_code
#Load Adult data
base_path = "../../dataset/adult/"
#2/3 Test
bits_encoding = 128
train_data, train_label, test_data, test_label, data_min, data_max = adult.load_data(base_path)
nominal_length = {1: 64, 3 : 64, 5: 64, 6: 64, 7: 64, 8:64, 9: 64, 13: 64}
nominal_length2 = {1: 30, 3 : 30, 5: 30, 6: 30, 7: 30, 8:30, 9: 30, 13: 30}
ths = {}
for index in data_max:
ths[index] = thermometer.Thermometer(data_min[index], data_max[index], bits_encoding)
train_bin = []
test_bin = []
i = 0
for data in train_data:
train_bin.append(np.array([], dtype=bool))
for a in range(len(data)):
if ths.has_key(a):
binarr = ths[a].binarize(data[a])
#print "C ", binarr
else:
#binarr = hamming_code.get_code(data[a], nominal_length[a])
p = data[a] * nominal_length2[a]
binarr = np.zeros(nominal_length2[a]*len(adult.nominal_att[a]), dtype=bool)
#print a, ":", nominal_length2[a], "*", data[a], "=", p, " - ", nominal_size[a]
for b in range(nominal_length2[a]):
binarr[p] = 1
p += 1
train_bin[i] = np.append(train_bin[i], binarr)
i += 1
i = 0
for data in test_data:
test_bin.append(np.array([], dtype=bool))
for a in range(len(data)):
if ths.has_key(a):
binarr = ths[a].binarize(data[a])
#print "C ", binarr
else:
#binarr = hamming_code.get_code(data[a], nominal_length[a])
p = data[a] * nominal_length2[a]
binarr = np.zeros(nominal_length2[a]*len(adult.nominal_att[a]), dtype=bool)
for b in range(nominal_length2[a]):
binarr[p] = 1
p += 1
test_bin[i] = np.append(test_bin[i], binarr)
i += 1
#Parameters
num_classes = 2
tuple_bit = 28
test_length = len(test_label)
num_runs = 20
acc_list = []
training_time = []
testing_time = []
dacc_list = []
dtraining_time = []
dtesting_time = []
bacc_list = []
btraining_time = []
btesting_time = []
entry_size = len(train_bin[0])
#Wisard
for r in range(num_runs):
wisard = wnn.Wisard(entry_size, tuple_bit, num_classes)
#Training
start = timer()
wisard.train(train_bin, train_label)
training_time.append(timer() - start)
#Testing
start = timer()
rank_result = wisard.rank(test_bin)
testing_time.append(timer() - start)
#Accuracy
num_hits = 0
for i in range(test_length):
#if rank_result[i] == test_label[i]:
if not (rank_result[i] ^ test_label[i]):
num_hits += 1
acc_list.append(float(num_hits)/float(test_length))
wisard_stats = wisard.stats()
del wisard
#DictWisard
for r in range(num_runs):
dwisard = wnn.DictWisard(entry_size, tuple_bit, num_classes)
#Training
start = timer()
dwisard.train(train_bin, train_label)
dtraining_time.append(timer() - start)
#Testing
start = timer()
rank_result = dwisard.rank(test_bin)
dtesting_time.append(timer() - start)
#Accuracy
num_hits = 0
for i in range(test_length):
if rank_result[i] == test_label[i]:
num_hits += 1
dacc_list.append(float(num_hits)/float(test_length))
dwisard_stats = dwisard.stats()
del dwisard
#Bloom Wisard
#capacity = len(train_label)
capacity = 100
error = 0.1
errors = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
b_stats = []
b_training_time = []
b_testing_time = []
b_acc = []
b_error = []
for e in range(len(errors)):
btraining_time = []
btesting_time = []
bacc_list = []
for r in range(num_runs):
bwisard = wnn.BloomWisard(entry_size, tuple_bit, num_classes, capacity, error=errors[e])
#Training
start = timer()
bwisard.train(train_bin, train_label)
btraining_time.append(timer() - start)
#Testing
start = timer()
rank_result = bwisard.rank(test_bin)
btesting_time.append(timer() - start)
#Accuracy
num_hits = 0
for i in range(test_length):
#if rank_result[i] == test_label[i]:
if not (rank_result[i] ^ test_label[i]):
num_hits += 1
bacc_list.append(float(num_hits)/float(test_length))
b_training_time.append(btraining_time)
b_testing_time.append(btesting_time)
b_acc.append(bacc_list)
b_stats.append(bwisard.stats())
b_error.append(bwisard.error())
#bwisard_stats = bwisard.stats()
#berror = bwisard.error()
del bwisard
#Writing output file
with open("stats.csv", "w") as out:
out.write("WNN; Entry size; Tuple size; # Rams; Capacity; Error; # Hashes; Ram size; # Discriminators; Total Bits; Acc(%); Acc Std; Training(s); Training Std; Testing(s); Testing Std; Runs;\n")
out.write("Wisard;" + str(entry_size) + ";" + str(tuple_bit) + ";" + str(wisard_stats[0]) + ";;;;" + str(wisard_stats[1]) + ";" + str(num_classes) + ";" + str(wisard_stats[3]) + ";")
out.write(str(np.mean(acc_list)) + ";" + str(np.std(acc_list)) + ";" + str(np.mean(training_time)) + ";" + str(np.std(training_time)) + ";" + str(np.mean(testing_time)) + ";" + str(np.std(testing_time)) + ";" + str(num_runs) + ";\n")
out.write("Dict Wisard;" + str(entry_size) + ";" + str(tuple_bit) + ";" + str(dwisard_stats[0]) + ";;;;" + str(dwisard_stats[1]) + ";" + str(num_classes) + ";" + str(dwisard_stats[2]) + ";")
out.write(str(np.mean(dacc_list)) + ";" + str(np.std(dacc_list)) + ";" + str(np.mean(dtraining_time)) + ";" + str(np.std(dtraining_time)) + ";" + str(np.mean(dtesting_time)) + ";" + str(np.std(dtesting_time)) + ";" + str(num_runs) + ";\n")
for i in range(len(errors)):
out.write("Bloom Wisard;" + str(entry_size) + ";" + str(tuple_bit) + ";" + str(b_stats[i][0]) + ";" + str(capacity) + ";" + str(b_error[i]) + ";" + str(b_stats[i][4]) + ";" + str(b_stats[i][1]) + ";" + str(num_classes) + ";" + str(b_stats[i][3]) + ";")
out.write(str(np.mean(b_acc[i])) + ";" + str(np.std(b_acc[i])) + ";" + str(np.mean(b_training_time[i])) + ";" + str(np.std(b_training_time[i])) + ";" + str(
|
np.mean(b_testing_time[i])
|
numpy.mean
|
from numpy import ones, arange, array, asarray, hstack, empty, lexsort
from scipy.sparse import csr_matrix, csc_matrix
from pydec.mesh.simplex import simplex
from pydec.math import kd_tree
from pydec.util import flatten
from simplex_array import simplex_array_searchsorted
__all__ = ['rips_complex']
class rips_complex:
def __init__(self, vertices, delta):
"""Construct a Rips complex
Construct a Rips for an array of vertices and a radius delta.
Parameters
----------
vertices : array_like
An N-by-D array of vertex coordinates where N is the
number of vertices and D is the dimension of the space
delta : float
Radius used to determine the connectivity of the Rips complex.
Vertices which are separated by no more than distance delta
are connected by an edge in the 1-skeleton of the Rips complex.
Examples
--------
>>> from pydec.dec import rips_complex
>>> from numpy import array
>>> vertices = array([[0,0],[2,0],[2,2],[0,2],[1,1]], dtype='float')
>>> delta = 2.1
>>> rc = rips_complex(vertices, delta)
>>> print rc.simplices[0]
[[0]
[1]
[2]
[3]
[4]]
>>> print rc.simplices[1]
[[0 1]
[0 3]
[0 4]
[1 2]
[1 4]
[2 3]
[2 4]
[3 4]]
>>> print rc.simplices[2]
[[0 1 4]
[0 3 4]
[1 2 4]
[2 3 4]]
"""
vertices =
|
asarray(vertices)
|
numpy.asarray
|
import numpy as np
import matplotlib.pyplot as plt
class LPMModel():
"""
input:
x -----(feature_num, batch)
y -------(1,batch)
"""
def __init__(self, x, y,epochs,lr,eplison):
self.x = x
self.y = y
print("input:x",
|
np.shape(x)
|
numpy.shape
|
import re
import csv
import sys
import nltk
import gensim
import string
import numpy as np
from preprocess import findEntity
class evaluate(object):
"""docstring for evaluate"""
def __init__(self, dir="./Datasets/", filename=None):
super(evaluate, self).__init__()
if filename==None:
self.text = findEntity()
else:
self.text = findEntity(filename=filename)
def getProperOutput(self, allPredictions, Index, mode="None"):
originalTokens = self.text.getTokenized()[Index[0]:(Index[-1]+1)]
outputText = []
for sentenceIndex,eachSentence in enumerate(allPredictions):
outputSentence = ""
for tagIndex, eachTag in enumerate(eachSentence):
predictedEntity = ""
if mode == "withIntermediate":
if eachTag == 0:
pass
elif eachTag == 1:
pass
elif eachTag == 3:
pass
elif eachTag == 5:
pass
else:
if eachTag == 0:
predictedEntity = originalTokens[sentenceIndex][tagIndex]
elif eachTag == 1:
if tagIndex == 0 or eachSentence[tagIndex - 1] != 1:
predictedEntity = "<ENAMEX TYPE=\"PERSON\">" + originalTokens[sentenceIndex][tagIndex]
if tagIndex == (len(eachSentence)-1) or eachSentence[tagIndex + 1] != 1:
predictedEntity = predictedEntity + "</ENAMEX>"
elif tagIndex == (len(eachSentence)-1) or eachSentence[tagIndex + 1] != 1:
predictedEntity = originalTokens[sentenceIndex][tagIndex] + "</ENAMEX>"
elif eachSentence[tagIndex - 1] == 1 and eachSentence[tagIndex + 1] == 1:
predictedEntity = originalTokens[sentenceIndex][tagIndex]
elif eachTag == 2:
if tagIndex == 0 or eachSentence[tagIndex - 1] != 2:
predictedEntity = "<ENAMEX TYPE=\"LOCATION\">" + originalTokens[sentenceIndex][tagIndex]
if tagIndex == (len(eachSentence)-1) or eachSentence[tagIndex + 1] != 2:
predictedEntity = predictedEntity + "</ENAMEX>"
elif tagIndex == (len(eachSentence)-1) or eachSentence[tagIndex + 1] != 2:
predictedEntity = originalTokens[sentenceIndex][tagIndex] + "</ENAMEX>"
elif eachSentence[tagIndex - 1] == 2 and eachSentence[tagIndex + 1] == 2:
predictedEntity = originalTokens[sentenceIndex][tagIndex]
elif eachTag == 3:
if tagIndex == 0 or eachSentence[tagIndex - 1] != 3:
predictedEntity = "<ENAMEX TYPE=\"ORGANIZATION\">" + originalTokens[sentenceIndex][tagIndex]
if tagIndex == (len(eachSentence)-1) or eachSentence[tagIndex + 1] != 3:
predictedEntity = predictedEntity + "</ENAMEX>"
elif tagIndex == (len(eachSentence)-1) or eachSentence[tagIndex + 1] != 3:
predictedEntity = originalTokens[sentenceIndex][tagIndex] + "</ENAMEX>"
elif eachSentence[tagIndex - 1] == 3 and eachSentence[tagIndex - 1] == 3:
predictedEntity = originalTokens[sentenceIndex][tagIndex]
outputSentence = outputSentence + " " + predictedEntity
outputText.append(outputSentence)
return outputText
def getTextOutput(self, allPredictions, Index, mode="None"):
punctuations = list(string.punctuation)
originalTokens = self.text.getTokenized()
originalText = self.text.corpus
outputText = originalText
for sentenceIndex,eachSentence in enumerate(allPredictions):
currentIndex = 0
endTagIndex = (len(eachSentence)-1)
currentSentence = outputText[sentenceIndex]
for tagIndex, eachTag in enumerate(eachSentence):
currentToken = originalTokens[sentenceIndex][tagIndex]
if mode == "withIntermediate":
if eachTag == 0:
pass
elif eachTag == 1:
pass
elif eachTag == 3:
pass
elif eachTag == 5:
pass
else:
if eachTag == 1:
if tagIndex == 0: #At beginning
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentSentence = currentSentence[0:currentIndex] + "<ENAMEX TYPE=\"PERSON\">" + currentSentence[currentIndex::]
tempIndex = currentIndex+22+len(currentToken)
if eachSentence[tagIndex+1] != 1:
currentSentence = currentSentence[0:tempIndex] + "</ENAMEX>" + currentSentence[tempIndex::]
currentIndex = tempIndex + 9
else:
currentIndex = tempIndex
elif 0<tagIndex and tagIndex<endTagIndex: #At middle
if eachSentence[tagIndex-1] == 1: #Previous word tag is the same
if eachSentence[tagIndex+1] == 1: #Next word tag is the same
pass #No need to insert tag
elif eachSentence[tagIndex+1] != 1: #Next word tag is NOT the same, the current word is an end word
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentIndex = currentIndex+len(currentToken)
currentSentence = currentSentence[0:currentIndex] + "</ENAMEX>" + currentSentence[currentIndex::]
elif eachSentence[tagIndex-1] != 1: #Previous word tag is NOT the same, the current word is a start word
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentSentence = currentSentence[0:currentIndex] + "<ENAMEX TYPE=\"PERSON\">" + currentSentence[currentIndex::]
tempIndex = currentIndex+22+len(currentToken)
if eachSentence[tagIndex+1] != 1:
currentSentence = currentSentence[0:tempIndex] + "</ENAMEX>" + currentSentence[tempIndex::]
currentIndex = tempIndex + 9
else:
currentIndex = tempIndex
elif tagIndex == endTagIndex: #At end
if eachSentence[tagIndex-1] != 1:
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentSentence = currentSentence[0:currentIndex] + "<ENAMEX TYPE=\"PERSON\">" + currentSentence[currentIndex::]
tempIndex = currentIndex+22+len(currentToken)
currentSentence = currentSentence[0:tempIndex] + "</ENAMEX>" + currentSentence[tempIndex::]
elif eachSentence[tagIndex-1] == 1: #Previous word tag is the same
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentIndex = currentIndex+len(currentToken)
currentSentence = currentSentence[0:currentIndex] + "</ENAMEX>" + currentSentence[currentIndex::]
elif eachTag == 2:
if tagIndex == 0: #At beginning
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentSentence = currentSentence[0:currentIndex] + "<ENAMEX TYPE=\"LOCATION\">" + currentSentence[currentIndex::]
tempIndex = currentIndex+24+len(currentToken)
if eachSentence[tagIndex+1] != 2:
currentSentence = currentSentence[0:tempIndex] + "</ENAMEX>" + currentSentence[tempIndex::]
currentIndex = tempIndex + 9
else:
currentIndex = tempIndex
elif 0<tagIndex and tagIndex<endTagIndex: #At middle
if eachSentence[tagIndex-1] == 2: #Previous word tag is the same
if eachSentence[tagIndex+1] == 2: #Next word tag is the same
pass #No need to insert tag
elif eachSentence[tagIndex+1] != 2: #Next word tag is NOT the same, the current word is an end word
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentIndex = currentIndex+len(currentToken)
currentSentence = currentSentence[0:currentIndex] + "</ENAMEX>" + currentSentence[currentIndex::]
elif eachSentence[tagIndex-1] != 2: #Previous word tag is NOT the same, the current word is a start word
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentSentence = currentSentence[0:currentIndex] + "<ENAMEX TYPE=\"LOCATION\">" + currentSentence[currentIndex::]
tempIndex = currentIndex+24+len(currentToken)
if eachSentence[tagIndex+1] != 2:
currentSentence = currentSentence[0:tempIndex] + "</ENAMEX>" + currentSentence[tempIndex::]
currentIndex = tempIndex + 9
else:
currentIndex = tempIndex
elif tagIndex == endTagIndex: #At end
if eachSentence[tagIndex-1] != 2:
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentSentence = currentSentence[0:currentIndex] + "<ENAMEX TYPE=\"LOCATION\">" + currentSentence[currentIndex::]
tempIndex = currentIndex+24+len(currentToken)
currentSentence = currentSentence[0:tempIndex] + "</ENAMEX>" + currentSentence[tempIndex::]
elif eachSentence[tagIndex-1] == 2: #Previous word tag is the same
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentIndex = currentIndex+len(currentToken)
currentSentence = currentSentence[0:currentIndex] + "</ENAMEX>" + currentSentence[currentIndex::]
elif eachTag == 3:
if tagIndex == 0: #At beginning
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentSentence = currentSentence[0:currentIndex] + "<ENAMEX TYPE=\"ORGANIZATION\">" + currentSentence[currentIndex::]
tempIndex = currentIndex+28+len(currentToken)
if eachSentence[tagIndex+1] != 3:
currentSentence = currentSentence[0:tempIndex] + "</ENAMEX>" + currentSentence[tempIndex::]
currentIndex = tempIndex + 9
else:
currentIndex = tempIndex
elif 0<tagIndex and tagIndex<endTagIndex: #At middle
if eachSentence[tagIndex-1] == 3: #Previous word tag is the same
if eachSentence[tagIndex+1] == 3: #Next word tag is the same
pass #No need to insert tag
elif eachSentence[tagIndex+1] != 3: #Next word tag is NOT the same, the current word is an end word
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentIndex = currentIndex+len(currentToken)
currentSentence = currentSentence[0:currentIndex] + "</ENAMEX>" + currentSentence[currentIndex::]
elif eachSentence[tagIndex-1] != 3: #Previous word tag is NOT the same, the current word is a start word
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentSentence = currentSentence[0:currentIndex] + "<ENAMEX TYPE=\"ORGANIZATION\">" + currentSentence[currentIndex::]
tempIndex = currentIndex+28+len(currentToken)
if eachSentence[tagIndex+1] != 3:
currentSentence = currentSentence[0:tempIndex] + "</ENAMEX>" + currentSentence[tempIndex::]
currentIndex = tempIndex + 9
else:
currentIndex = tempIndex
elif tagIndex == endTagIndex: #At end
if eachSentence[tagIndex-1] != 3:
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentSentence = currentSentence[0:currentIndex] + "<ENAMEX TYPE=\"ORGANIZATION\">" + currentSentence[currentIndex::]
tempIndex = currentIndex+28+len(currentToken)
currentSentence = currentSentence[0:tempIndex] + "</ENAMEX>" + currentSentence[tempIndex::]
elif eachSentence[tagIndex-1] == 3: #Previous word tag is the same
currentIndex = currentIndex + currentSentence[currentIndex::].index(currentToken) #Get left most index of an entry
currentIndex = currentIndex+len(currentToken)
currentSentence = currentSentence[0:currentIndex] + "</ENAMEX>" + currentSentence[currentIndex::]
outputText[sentenceIndex] = currentSentence
return outputText
def getPredictionElements(self, outputText):
predicted_corpus = outputText
yPred_Person, yPred_Location, yPred_Organization = self.text.find_enamex(corpus=predicted_corpus)
return yPred_Person, yPred_Location, yPred_Organization
def getTaggedElements(self, Index):
test_corpus = self.text.corpus[Index[0]:(Index[-1]+1)]
yPerson, yLocation, yOrganization = self.text.find_enamex(corpus=test_corpus)
return yPerson, yLocation, yOrganization
def F1_ExactMatch(self, yPerson, yLocation, yOrganization, yPred_Person, yPred_Location, yPred_Organization):
true_positive = np.zeros(3)
false_positive = np.zeros(3)
false_negative = np.zeros(3)
precision = np.zeros(3)
recall = np.zeros(3)
for predictions in yPred_Person:
if (predictions in yPerson) == True:
true_positive[0] += 1.0
else:
false_positive[0] +=1.0
for predictions in yPred_Location:
if (predictions in yLocation) == True:
true_positive[1] += 1.0
else:
false_positive[1] +=1.0
for predictions in yPred_Organization:
if (predictions in yOrganization) == True:
true_positive[2] += 1.0
else:
false_positive[2] +=1.0
false_negative[0] = len(yPerson) - true_positive[0]
false_negative[1] = len(yLocation) - true_positive[1]
false_negative[2] = len(yOrganization) - true_positive[2]
#Macro Average
for i in range(3):
if np.sum(true_positive[i]) == 0:
precision[i] = 0
recall[i] = 0
else:
precision[i] = true_positive[i] / (true_positive[i] + false_positive[i])
recall[i] = true_positive[i] / (true_positive[i] + false_negative[i])
MacroPrecision = np.average(precision)
MacroRecall = np.average(recall)
if MacroPrecision == 0 or MacroRecall == 0:
F1_ExactMatch_Macro = 0
else:
F1_ExactMatch_Macro = 2.0 * (MacroPrecision * MacroRecall)/(MacroPrecision + MacroRecall)
#Micro Average
if np.sum(true_positive) == 0:
F1_ExactMatch_Micro = 0
else:
MicroPrecision = np.sum(true_positive)/(np.sum(true_positive)+
|
np.sum(false_positive)
|
numpy.sum
|
import warnings
import uuid
from copy import deepcopy
import numpy as np
from collections import namedtuple
from openpnm.utils import Workspace, logging
from openpnm.utils import SettingsAttr
from openpnm.utils.misc import PrintableList, Docorator
docstr = Docorator()
logger = logging.getLogger(__name__)
ws = Workspace()
__all__ = ['Base']
@docstr.get_sections(base='BaseSettings', sections=['Parameters'])
class BaseSettings:
r"""
The default settings to use on instance of Base
Parameters
----------
prefix : str
The default prefix to use when generating a name
name : str
The name of the object, which will be generated if not given
uuid : str
A universally unique identifier for the object to keep things straight
"""
prefix = 'base'
name = ''
uuid = ''
@docstr.get_sections(base='Base', sections=['Parameters'])
class Base(dict):
r"""
Contains methods for working with the data in the OpenPNM dict objects
Parameters
----------
name : str, optional
The unique name of the object. If not given one will be generated.
Np : int, default is 0
The total number of pores to be assigned to the object
Nt : int, default is 0
The total number of throats to be assigned to the object
Notes
-----
This ``Base`` class is used as the template for all other OpenPNM objects,
including Networks, Geometries, Phases, Physics, and Algorithms. This
class is a subclass of the standard ``dict`` so has the usual methods such
as ``pop`` and ``keys``, and has extra methods for working specifically
with OpenPNM data.
"""
def __new__(cls, *args, **kwargs):
instance = super(Base, cls).__new__(cls, *args, **kwargs)
instance._settings = None
instance._settings_docs = None
return instance
def __init__(self, Np=0, Nt=0, network=None, name=None, project=None,
settings=None):
super().__init__()
self.settings = SettingsAttr(BaseSettings, settings)
if project is None:
if network is None:
project = ws.new_project()
else:
project = network.project
if name is None:
name = project._generate_name(self)
project._validate_name(name)
project.extend(self)
self.settings['name'] = name
self.settings.uuid = str(uuid.uuid4())
self.update({'pore.all': np.ones(shape=(Np, ), dtype=bool)})
self.update({'throat.all': np.ones(shape=(Nt, ), dtype=bool)})
def __repr__(self):
return f'<{self.__class__.__module__} object at {hex(id(self))}>'
def __eq__(self, other):
return hex(id(self)) == hex(id(other))
def __setitem__(self, key, value):
r"""
This is a subclass of the default __setitem__ behavior. The main aim
is to limit what type and shape of data can be written to protect
the integrity of the network. Specifically, this means only Np or Nt
long arrays can be written, and they must be called 'pore.***' or
'throat.***'. Also, any scalars are cast into full length vectors.
"""
if value is None:
return
# Check 1: If value is a dictionary, break it into constituent arrays
# and recursively call __setitem__ on each
if hasattr(value, 'keys'):
for item in value.keys():
prop = item.replace('pore.', '').replace('throat.', '')
self.__setitem__(key+'.'+prop, value[item])
return
# Check 3: Enforce correct dict naming
element = key.split('.')[0]
if element not in ['pore', 'throat']:
raise Exception('All keys must start with either pore, or throat')
# Check 2: If adding a new key, make sure it has no conflicts
if self.project:
proj = self.project
boss = proj.find_full_domain(self)
keys = boss.keys(mode='all', deep=True)
else:
boss = None
keys = self.keys()
# Prevent 'pore.foo.bar' when 'pore.foo' present
long_keys = [i for i in keys if i.count('.') > 1]
key_root = '.'.join(key.split('.')[:2])
if (key.count('.') > 1) and (key_root in keys):
raise Exception('Cannot create ' + key + ' when '
+ key_root + ' is already defined')
# Prevent 'pore.foo' when 'pore.foo.bar' is present
if (key.count('.') == 1) and any([i.startswith(key) for i in long_keys]):
hit = [i for i in keys if i.startswith(key)][0]
raise Exception('Cannot create ' + key + ' when '
+ hit + ' is already defined')
# Prevent writing pore.foo on boss when present on subdomain
if boss:
if boss is self and (key not in ['pore.all', 'throat.all']):
if (key in keys) and (key not in self.keys()):
raise Exception('Cannot create ' + key + ' when it is'
+ ' already defined on a subdomain')
# This check allows subclassed numpy arrays through, eg. with units
if not isinstance(value, np.ndarray):
value = np.array(value, ndmin=1) # Convert value to an ndarray
# Skip checks for 'coords', 'conns'
if key in ['pore.coords', 'throat.conns']:
super(Base, self).__setitem__(key, value)
return
# Skip checks for protected props, and prevent changes if defined
protected_keys = ['all']
if key.split('.')[1] in protected_keys:
if key in self.keys():
if np.shape(self[key]) == (0, ):
super(Base, self).__setitem__(key, value)
else:
warnings.warn(key+' is already defined.')
else:
super(Base, self).__setitem__(key, value)
return
# Write value to dictionary
if np.shape(value)[0] == 1: # If value is scalar
value = np.ones((self._count(element), ), dtype=value.dtype)*value
super(Base, self).__setitem__(key, value)
elif np.shape(value)[0] == self._count(element):
super(Base, self).__setitem__(key, value)
else:
if self._count(element) == 0:
self.update({key: value})
else:
raise Exception('Provided array is wrong length for ' + key)
def __getitem__(self, key):
element, prop = key.split('.', 1)
if key in self.keys():
# Get values if present on self
vals = super().__getitem__(key)
elif key in self.keys(mode='all', deep=True):
# Interleave values from geom if found there
vals = self.interleave_data(key)
elif any([k.startswith(key + '.') for k in self.keys()]):
# Create a subdict of values present on self
vals = {}
keys = self.keys()
vals.update({k: self.get(k) for k in keys if k.startswith(key + '.')})
elif any([k.startswith(key + '.') for k in self.keys(mode='all',
deep=True)]):
# Create a subdict of values in subdomains by interleaving
vals = {}
keys = self.keys(mode='all', deep=True)
vals.update({k: self.interleave_data(k) for k in keys
if k.startswith(key + '.')})
# Attempt to run model when missing data.
elif hasattr(self, 'models') and key in self.models:
self.regenerate_models(key)
vals = super().__getitem__(key)
else:
raise KeyError(key)
return vals
def __delitem__(self, key):
try:
super().__delitem__(key)
except KeyError as e:
d = self[key] # if key is a nested dict, get all values
for item in d.keys():
super().__delitem__(item)
def _set_name(self, name, validate=True):
old_name = self.settings['name']
if name == old_name:
return
if name is None:
name = self.project._generate_name(self)
if validate:
self.project._validate_name(name)
self.settings['name'] = name
# Rename any label arrays in other objects
for item in self.project:
if 'pore.' + old_name in item.keys():
item['pore.'+name] = item.pop('pore.' + old_name)
if 'throat.' + old_name in item.keys():
item['throat.' + name] = item.pop('throat.' + old_name)
def _get_name(self):
"""String representing the name of the object"""
try:
return self.settings['name']
except AttributeError:
return None
name = property(_get_name, _set_name)
def _get_project(self):
"""A shortcut to get a handle to the associated project."""
for proj in ws.values():
if self in proj:
return proj
project = property(fget=_get_project)
def _set_settings(self, settings):
self._settings = deepcopy(settings)
if (self._settings_docs is None) and (settings.__doc__ is not None):
self._settings_docs = settings.__doc__
def _get_settings(self):
"""Dictionary containing object settings."""
if self._settings is None:
self._settings = SettingsAttr()
if self._settings_docs is not None:
self._settings.__dict__['__doc__'] = self._settings_docs
return self._settings
def _del_settings(self):
self._settings = None
settings = property(fget=_get_settings, fset=_set_settings, fdel=_del_settings)
@property
def _domain(self):
try:
return self.phase
except AttributeError:
return self.network
@property
def network(self):
r"""
A shortcut to get a handle to the associated network.
There can only be one so this works.
"""
return self.project.network
def clear(self, element=None, mode='all'):
r"""
A subclassed version of the standard dict's clear method. This can be
used to selectively clear certain data from the object, including
properties and/or labels. Importantly, it does NOT clear items that
are required to maintain the integrity of the simulation. These are
arrays that define the topology (ie. 'pore.all', 'pore.coords',
'throat.all', 'throat.conns'), as well as arrays that indicate
associations bewteen objects (ie. 'pore.geo_01').
Parameters
----------
element : str or List[str]
Can be either 'pore' or 'throat', which specifies whether 'pore'
and/or 'throat' arrays should be cleared. The default is both.
mode : str or List[str]
This controls what is cleared from the object. Options are:
**'props'** : Removes all numerical property values from
the object dictionary
**'model_data'** : Removes only numerical data that were
produced by an associated model
**'labels'** : Removes all labels from the object
dictionary, except those relating to the pore and throat
locations of associated objects
**'all'** : Removes both 'props' and 'labels'
Notes
-----
If you wish to selectively remove some properties but not others,
use something like ``del object['pore.blah']`` at the Python
prompt. This can also be done in a for-loop to remove a list of
items.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> len(pn.labels()) # There are 10 total labels on the network
12
>>> pn.clear(mode='labels')
>>> len(pn.labels()) # Kept only 'pore.all' and 'throat.all'
2
>>> geom = op.geometry.GenericGeometry(network=pn, pores=pn.Ps,
... throats=pn.Ts, name='geo1')
>>> len(pn.labels()) # 2 new labels were added for geometry locations
4
>>> pn.clear(mode='labels')
>>> 'pore.'+geom.name in pn.keys() # The geometry labels were kept
True
>>> len(pn.props()) # The network has two properties
2
>>> pn.clear(element='pore', mode='props')
>>> 'pore.coords' in pn.keys() # The pore property was removed
True
>>> pn.clear() # Remove everything except protected labels and arrays
>>> print(sorted(list(pn.keys(element='pore', mode='all'))))
['pore.all', 'pore.coords', 'pore.geo1']
"""
protected = ['pore.all', 'throat.all', 'pore.coords', 'throat.conns']
allowed = ['props', 'labels', 'model_data', 'all']
mode = self._parse_mode(mode=mode, allowed=allowed)
if 'model_data' in mode:
for item in list(self.keys()):
temp = '.'.join(item.split('.')[0:2])
if temp in self.models.keys():
logger.info('deleting ' + item)
del self[item]
mode.remove('model_data')
for item in self.keys(mode=mode, element=element):
if item not in protected:
if item.split('.')[1] not in self.project.names:
del self[item]
def keys(self, element=None, mode=None, deep=False):
r"""
This subclass works exactly like ``keys`` when no arguments are passed,
but optionally accepts an ``element`` and a ``mode``, which filters
the output to only the requested keys.
The default behavior is exactly equivalent to the normal ``keys``
method.
Parameters
----------
element : str
Can be either 'pore' or 'throat', which limits the returned list of
keys to only 'pore' or 'throat' keys. If neither is given, then
both are assumed.
mode : str, optional
Controls which keys are returned. Options are:
**'labels'**
Limits the returned list of keys to only 'labels'
(boolean arrays)
**'props'**
Limits he return list of keys to only 'props'
(numerical arrays).
**'all'**
Returns both 'labels' and 'props'. This is equivalent
to sending a list of both 'labels' and 'props'.
If no mode is specified then the normal KeysView object is
returned.
deep : bool
If set to ``True`` then the keys on all associated subdomain
objects are returned as well.
See Also
--------
props
labels
Notes
-----
This subclass can be used to get dictionary keys of specific
kinds of data. It's use augments ``props`` and ``labels`` by
returning a list containing both types, but possibly limited by
element type ('pores' or 'throats'.)
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic([5, 5, 5])
>>> pn.keys(mode='props') # Get all props
['pore.coords', 'throat.conns']
>>> pn.keys(mode='props', element='pore') # Get only pore props
['pore.coords']
"""
if mode is None:
return super().keys()
element = self._parse_element(element=element)
allowed = ['props', 'labels']
if 'all' in mode:
mode = allowed
mode = self._parse_mode(mode=mode, allowed=allowed)
keys = super().keys()
temp = []
if 'props' in mode:
temp.extend([i for i in keys if self.get(i).dtype != bool])
if 'labels' in mode:
temp.extend([i for i in keys if self.get(i).dtype == bool])
if element:
temp = [i for i in temp if i.split('.')[0] in element]
if deep:
if self._isa('phase'):
for item in self.project.find_physics(phase=self):
temp += item.keys(element=element, mode=mode, deep=False)
if self._isa('network'):
for item in self.project.geometries().values():
temp += item.keys(element=element, mode=mode, deep=False)
return temp
def props(self, element=None, mode='all', deep=False):
r"""
Returns a list containing the names of all defined pore or throat
properties.
Parameters
----------
element : str, optional
Can be either 'pore' or 'throat' to specify what properties are
returned. If no element is given, both are returned
mode : str, optional
Controls what type of properties are returned. Options are:
**'all'** : Returns all properties on the object (default)
**'models'** : Returns only properties that are associated with a
model
**'constants'** : returns data values that were *not* generated by
a model, but manaully created.
deep : bool
If set to ``True`` then the props on all associated subdomain
objects are returned as well.
Returns
-------
A an alphabetically sorted list containing the string name of all
pore or throat properties currently defined. This list is an iterable,
so is useful for scanning through properties.
See Also
--------
labels
keys
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[3, 3, 3])
>>> pn.props('pore')
['pore.coords']
>>> pn.props('throat')
['throat.conns']
>>> pn.props()
['pore.coords', 'throat.conns']
"""
# Parse Inputs
element = self._parse_element(element=element)
allowed_modes = ['all', 'constants', 'models']
mode = self._parse_mode(mode=mode, allowed=allowed_modes, single=True)
if mode == 'all':
vals = set(self.keys(mode='props'))
if mode == 'constants':
if hasattr(self, 'models'):
temp = set(self.keys(mode='props'))
vals = temp.difference(self.models.keys())
else:
vals = set(self.keys(mode='props'))
if mode == 'models':
if hasattr(self, 'models'):
temp = set(self.keys(mode='props'))
vals = temp.intersection(self.models.keys())
else:
logger.warning('Object does not have a models attribute')
vals = set()
# Deal with hidden props
hide = set([i for i in self.keys() if i.split('.')[1].startswith('_')])
vals = vals.difference(hide)
# Remove values of the wrong element
temp = set([i for i in vals if i.split('.')[0] not in element])
vals = set(vals).difference(temp)
# Convert to nice list for printing
vals = PrintableList(list(vals))
# Repeat for associated objects if deep is True
if deep:
if self._isa('phase'):
for item in self.project.find_physics(phase=self):
vals += item.props(element=element, mode=mode, deep=False)
if self._isa('network'):
for item in self.project.geometries().values():
vals += item.props(element=element, mode=mode, deep=False)
return vals
@property
def Np(self):
r"""A shortcut to query the total number of pores on the object"""
return np.shape(self.get('pore.all'))[0]
@property
def Nt(self):
r"""A shortcut to query the total number of throats on the object"""
return np.shape(self.get('throat.all'))[0]
@property
def Ps(self):
r"""A shortcut to get a list of all pores on the object"""
return np.arange(0, self.Np)
@property
def Ts(self):
r"""A shortcut to get a list of all throats on the object"""
return np.arange(0, self.Nt)
def _tomask(self, indices, element):
r"""
This is a generalized version of tomask that accepts a string of
'pore' or 'throat' for programmatic access.
"""
element = self._parse_element(element, single=True)
indices = self._parse_indices(indices)
N = np.shape(self[element + '.all'])[0]
ind = np.array(indices, ndmin=1)
mask = np.zeros((N, ), dtype=bool)
mask[ind] = True
return mask
def to_mask(self, pores=None, throats=None):
r"""
Convert a list of pore or throat indices into a boolean mask of the
correct length.
Parameters
----------
pores : array_like
List of pore indices. Only one of these can be specified at a
time, and the returned result will be of the corresponding
length.
throats : array_like
List of throat indices. Only one of these can be specified at
a time, and the returned result will be of the corresponding
length.
Returns
-------
ndarray
A boolean mask of length Np or Nt with ``True`` in the
specified pore or throat locations.
See Also
--------
to_indices
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> mask = pn.to_mask(pores=[0, 10, 20])
>>> sum(mask) # 3 non-zero elements exist in the mask (0, 10 and 20)
3
>>> len(mask) # Mask size is equal to the number of pores in network
125
>>> mask = pn.to_mask(throats=[0, 10, 20])
>>> len(mask) # Mask is now equal to number of throats in network
300
"""
if (pores is not None) and (throats is None):
mask = self._tomask(element='pore', indices=pores)
elif (throats is not None) and (pores is None):
mask = self._tomask(element='throat', indices=throats)
else:
raise Exception('Cannot specify both pores and throats')
return mask
def to_indices(self, mask):
r"""
Converts a boolean mask to a list of pore or throat indices.
Parameters
----------
mask : array_like of booleans
A boolean array with ``True`` at locations where indices are
desired. The appropriate indices are returned based an the length
of mask, which must be either Np or Nt long.
Returns
-------
ndarray
A list of pore or throat indices corresponding the locations
where the received mask was ``True``.
See Also
--------
to_mask
Notes
-----
This behavior could just as easily be accomplished by using the mask
in ``pn.pores()[mask]`` or ``pn.throats()[mask]``. This method is
just a convenience function and is a complement to ``to_mask``.
"""
if np.amax(mask) > 1:
raise Exception('Received mask does not appear to be boolean')
mask = np.array(mask, dtype=bool)
indices = self._parse_indices(mask)
return indices
def interleave_data(self, prop):
r"""
Retrieves requested property from associated objects, to produce a full
Np or Nt length array.
Parameters
----------
prop : str
The property name to be retrieved
Returns
-------
ndarray
A full length (Np or Nt) array of requested property values.
Notes
-----
This makes an effort to maintain the data 'type' when possible;
however when data are missing this can be tricky. Data can be
missing in two different ways: A set of pores is not assisgned to
a geometry or the network contains multiple geometries and data
does not exist on all. Float and boolean data is fine, but missing
ints are converted to float when nans are inserted.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[2, 2, 2])
>>> Ps = pn['pore.top']
>>> Ts = pn.find_neighbor_throats(pores=Ps)
>>> g1 = op.geometry.GenericGeometry(network=pn, pores=Ps, throats=Ts)
>>> Ts = ~pn.to_mask(throats=Ts)
>>> g2 = op.geometry.GenericGeometry(network=pn, pores=~Ps, throats=Ts)
>>> g1['pore.value'] = 1
>>> print(g1['pore.value'])
[1 1 1 1]
>>> print(g2['pore.value']) # 'pore.value' is defined on g1, not g2
[nan nan nan nan]
>>> print(pn['pore.value'])
[nan 1. nan 1. nan 1. nan 1.]
>>> g2['pore.value'] = 20
>>> print(pn['pore.value'])
[20 1 20 1 20 1 20 1]
>>> pn['pore.label'] = False
>>> print(g1['pore.label']) # 'pore.label' is defined on pn, not g1
[False False False False]
"""
# Fetch sources list depending on type of self
proj = self.project
if self._isa() in ['network', 'geometry']:
sources = list(proj.geometries().values())
elif self._isa() in ['phase', 'physics']:
sources = list(proj.find_physics(phase=self))
elif self._isa() in ['algorithm', 'base']:
sources = [self]
else:
raise Exception('Unrecognized object type, cannot find dependents')
# Get generalized element and array length
element = self._parse_element(prop.split('.')[0], single=True)
N = self.project.network._count(element)
# Attempt to fetch the requested array from each object
arrs = [obj.get(prop, None) for obj in sources]
# Check for missing sources, and add None to arrs if necessary
if N > sum([obj._count(element) for obj in sources]):
arrs.append(None)
# Obtain list of locations for inserting values
locs = [self._get_indices(element, item.name) for item in sources]
if np.all([item is None for item in arrs]): # prop not found anywhere
raise KeyError(prop)
# Let's start by handling the easy cases first
if not any([a is None for a in arrs]):
# All objs present and array found on all objs
shape = list(arrs[0].shape)
shape[0] = N
types = [a.dtype for a in arrs]
if len(set(types)) == 1:
# All types are the same
temp_arr = np.ones(shape, dtype=types[0])
for vals, inds in zip(arrs, locs):
temp_arr[inds] = vals
return temp_arr # Return early because it's just easier
if all([a.dtype in [float, int, bool] for a in arrs]):
# All types are numeric, make float
temp_arr = np.ones(shape, dtype=float)
for vals, inds in zip(arrs, locs):
temp_arr[inds] = vals
return temp_arr # Return early because it's just easier
# Now handle the complicated cases
# Check the general type of each array
atype = []
for a in arrs:
if a is not None:
t = a.dtype.name
if t.startswith('int') or t.startswith('float'):
atype.append('numeric')
elif t.startswith('bool'):
atype.append('boolean')
else:
atype.append('other')
if not all([item == atype[0] for item in atype]):
raise Exception('The array types are not compatible')
dummy_val = {'numeric': np.nan, 'boolean': False, 'other': None}
# Create an empty array of the right type and shape
for item in arrs:
if item is not None:
if len(item.shape) == 1:
temp_arr = np.zeros((N, ), dtype=item.dtype)
else:
temp_arr = np.zeros((N, item.shape[1]), dtype=item.dtype)
temp_arr.fill(dummy_val[atype[0]])
sizes = [np.size(a) for a in arrs]
# Convert int arrays to float IF NaNs are expected
if temp_arr.dtype.name.startswith('int') and \
(np.any([i is None for i in arrs]) or np.sum(sizes) != N):
temp_arr = temp_arr.astype(float)
temp_arr.fill(np.nan)
# Fill new array with values in the corresponding locations
for vals, inds in zip(arrs, locs):
if vals is not None:
temp_arr[inds] = vals
else:
temp_arr[inds] = dummy_val[atype[0]]
return temp_arr
def interpolate_data(self, propname, mode='mean'):
r"""
Determines a pore (or throat) property as the average of it's
neighboring throats (or pores)
Parameters
----------
propname: str
The dictionary key to the values to be interpolated.
mode : str
The method used for interpolation. Options are 'mean' (default),
'min', and 'max'.
Returns
-------
vals : ndarray
An array containing interpolated pore (or throat) data
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[3, 1, 1])
>>> pn['pore.value'] = [1, 2, 3]
>>> pn.interpolate_data('pore.value')
array([1.5, 2.5])
"""
from openpnm.models.misc import from_neighbor_throats, from_neighbor_pores
if propname.startswith('throat'):
values = from_neighbor_throats(target=self, prop=propname, mode=mode)
elif propname.startswith('pore'):
values = from_neighbor_pores(target=self, prop=propname, mode=mode)
if hasattr(self[propname], 'units'):
values *= self[propname].units
return values
def get_conduit_data(self, prop, mode='mean'):
r"""
Combines requested data into a single 3-column array.
Parameters
----------
prop : str
The dictionary key to the property of interest
mode : str
How interpolation should be peformed for missing values. If
values are present for both pores and throats, then this
argument is ignored. The ``interpolate`` data method is used.
Options are:
**'mean'** (default):
Finds the mean value of the neighboring pores (or throats)
**'min'**
Finds the minimuem of the neighboring pores (or throats)
**'max'**
Finds the maximum of the neighboring pores (or throats)
Returns
-------
conduit_data : ndarray
An Nt-by-3 array with each column containg the requested
property for each pore-throat-pore conduit.
"""
try:
T = self['throat.' + prop]
try:
P1, P2 = self['pore.' + prop][self.network.conns].T
except KeyError:
P = self.interpolate_data(propname='throat.'+prop, mode=mode)
P1, P2 = P[self.network.conns].T
except KeyError:
P1, P2 = self['pore.' + prop][self.network.conns].T
T = self.interpolate_data(propname='pore.'+prop, mode=mode)
return np.vstack((P1, T, P2)).T
def _count(self, element=None):
r"""
Returns a dictionary containing the number of pores and throats in
the network, stored under the keys 'pore' or 'throat'
Parameters
----------
element : str, optional
Can be either 'pore' , 'pores', 'throat' or 'throats', which
specifies which count to return.
Returns
-------
dict
A dictionary containing the number of pores and throats under
the 'pore' and 'throat' key respectively.
See Also
--------
num_pores
num_throats
Notes
-----
The ability to send plurals is useful for some types of 'programmatic'
access. For instance, the standard argument for locations is pores
or throats. If these are bundled up in a **kwargs dict then you can
just use the dict key in count() without removing the 's'.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> pn._count('pore')
125
>>> pn._count('throat')
300
"""
element = self._parse_element(element=element, single=True)
temp = np.size(self.__getitem__(element+'.all'))
return temp
def show_hist(self,
props=['pore.diameter', 'throat.diameter', 'throat.length'],
bins=20, fontsize=14, **kwargs):
r"""
Shows a quick plot of key property distributions.
Parameters
----------
props : str or List[str]
The pore and/or throat properties to be plotted as histograms. By
default this function will show 'pore.diameter', 'throat.diameter',
and 'throat.length'.
bins : int or array_like
The number of bins to use when generating the histogram. If an
array is given they are used as the bin spacing instead.
fontsize : int
Sets the font size temporarily. The default size of matplotlib is
10, which is too small for many screens. This function has a
default of 22, which does not overwrite the matplotlib setting.
Note that you can override matplotlib setting globally with
``matplotlib.rcParams['font.size'] = 22``.
Notes
-----
Other keyword arguments are passed to the ``matplotlib.pyplot.hist``
function.
"""
import matplotlib.pyplot as plt
temp = plt.rcParams['font.size']
plt.rcParams['font.size'] = fontsize
if isinstance(props, str):
props = [props]
N = len(props)
color = plt.cm.tab10(range(10))
if N <= 3:
r, c = 1, N
elif N == 4:
r, c = 2, 2
else:
r, c = N // 3 + 1, 3
fig, ax = plt.subplots(r, c, figsize=(3*c, 3*r))
axs = np.array(ax).flatten()
i = None
for i, _ in enumerate(props):
try:
# Update kwargs with some default values
if 'edgecolor' not in kwargs.keys():
kwargs.update({'edgecolor': 'k'})
if 'facecolor' not in kwargs:
kwargs.update({'facecolor': color[np.mod(i, 10)]})
axs[i].hist(self[props[i]], bins=bins, **kwargs)
axs[i].set_xlabel(props[i])
except KeyError:
pass
# Hide unpopulated subplots from the grid
for j in range(i + 1, len(axs)):
axs[j].set_axis_off()
plt.rcParams['font.size'] = temp
plt.tight_layout(h_pad=0.9, w_pad=0.9)
def _parse_indices(self, indices):
r"""
This private method accepts a list of pores or throats and returns a
properly structured Numpy array of indices.
Parameters
----------
indices : int or array_like
This argument can accept numerous different data types including
boolean masks, integers and arrays.
Returns
-------
A Numpy array of indices.
Notes
-----
This method should only be called by the method that is actually using
the locations, to avoid calling it multiple times.
"""
if indices is None:
indices =
|
np.array([], ndmin=1, dtype=int)
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from pathlib import Path
from itertools import product, chain
from operator import add, sub
import numpy as np
import tensorflow as tf
from dotenv import load_dotenv
from annotation.piece import Piece
from annotation.direction import (Direction, get_eight_directions,
get_cross_directions)
from ..naive_long import WhiteNaiveLongEffectLayer
__author__ = 'Yasuhiro'
__date__ = '2018/3/02'
class TestWhiteLongEffectHi(tf.test.TestCase):
@classmethod
def setUpClass(cls):
dotenv_path = Path(__file__).parents[3] / '.env'
load_dotenv(str(dotenv_path))
cls.data_format = os.environ.get('DATA_FORMAT')
cls.use_cudnn = bool(os.environ.get('USE_CUDNN'))
def test_effect1(self):
"""
HIの利きがあるかを確認するテスト
他の駒が利きを遮る場合は考えない
:return:
"""
shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1)
board = np.empty(shape, dtype=np.int32)
ph = tf.placeholder(tf.int32, shape=shape)
for direction in chain(get_eight_directions(),
[Direction.RIGHT_DOWN_DOWN,
Direction.LEFT_DOWN_DOWN]):
if direction in (Direction.RIGHT_DOWN_DOWN,
Direction.LEFT_DOWN_DOWN):
# 桂馬の方向の長い利きはあり得ないのでエラー
with self.assertRaises(ValueError):
WhiteNaiveLongEffectLayer(
direction=direction, data_format=self.data_format,
use_cudnn=self.use_cudnn
)(ph)
continue
white_effect = WhiteNaiveLongEffectLayer(
direction=direction, data_format=self.data_format,
use_cudnn=self.use_cudnn
)(ph)
# チャネルの処理が面倒なので、次元を下げる
white_effect = tf.squeeze(white_effect)
with self.test_session() as sess:
for i, j in product(range(9), repeat=2):
with self.subTest(direction=direction, i=i, j=j):
board[:] = Piece.EMPTY
if self.data_format == 'NCHW':
board[0, 0, i, j] = Piece.WHITE_HI
else:
board[0, i, j, 0] = Piece.WHITE_HI
effect = sess.run(white_effect, feed_dict={ph: board})
self.assertTupleEqual(effect.shape, (9, 9))
if direction not in get_cross_directions():
# 利きがあるマスはない
self.assertFalse(np.any(effect))
continue
if direction == Direction.RIGHT:
edge = i == 0
elif direction == Direction.LEFT:
edge = i == 8
elif direction == Direction.UP:
edge = j == 0
elif direction == Direction.DOWN:
edge = j == 8
else:
raise ValueError(direction)
if edge:
# 盤の端に駒があるので、盤の中に利きはない
self.assertFalse(np.any(effect))
continue
if direction == Direction.RIGHT:
self.assertTrue(np.all(effect[:i, j]))
effect[:i, j] = False
elif direction == Direction.LEFT:
self.assertTrue(np.all(effect[i + 1:, j]))
effect[i + 1:, j] = False
elif direction == Direction.UP:
self.assertTrue(np.all(effect[i, :j]))
effect[i, :j] = False
elif direction == Direction.DOWN:
self.assertTrue(np.all(effect[i, j + 1:]))
effect[i, j + 1:] = False
else:
raise ValueError(direction)
self.assertFalse(
|
np.any(effect)
|
numpy.any
|
from ..postprocessing.postprocessing_tools import _get_quality_metric_data, _get_pca_metric_data, \
_get_spike_times_clusters, _get_amp_metric_data
import spikeextractors as se
import numpy as np
def get_spike_times_metrics_data(sorting, sampling_frequency):
'''
Computes and returns the spike times in seconds and also returns
along with cluster_ids needed for quality metrics
Parameters
----------
sorting: SortingExtractor
The sorting extractor
sampling_frequency: float
The sampling frequency of the recording
Returns
-------
spike_times: numpy.ndarray (num_spikes x 0)
Spike times in frames
spike_clusters: numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
'''
if not isinstance(sorting, se.SortingExtractor):
raise AttributeError()
if len(sorting.get_unit_ids()) == 0:
raise Exception("No units in the sorting result, can't compute any metric information.")
# spike times.npy and spike clusters.npy
spike_times, spike_clusters = _get_spike_times_clusters(sorting)
spike_times = np.squeeze((spike_times / sampling_frequency))
spike_clusters = np.squeeze(spike_clusters.astype(int))
return spike_times, spike_clusters
def get_pca_metric_data(recording, sorting, n_comp=3, ms_before=1., ms_after=2., dtype=None, max_spikes_per_unit=np.inf,
max_spikes_for_pca=np.inf, recompute_info=True, save_features_props=False,
verbose=False, seed=0):
'''
Computes and returns all data needed to compute all the quality metrics from SpikeMetrics
Parameters
----------
recording: RecordingExtractor
The recording extractor
sorting: SortingExtractor
The sorting extractor
n_comp: int
n_compFeatures in template-gui format
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
max_spikes_per_unit: int
The maximum number of spikes to extract per unit.
max_spikes_for_pca: int
The maximum number of spikes to use to compute PCA.
recompute_info: bool
If True, will always re-extract waveforms.
save_features_props: bool
If True, save all features and properties in the sorting extractor.
verbose: bool
If True output is verbose
seed: int
Random seed for reproducibility
Returns
-------
spike_times: numpy.ndarray (num_spikes x 0)
Spike times in frames
spike_clusters: numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
pc_features: numpy.ndarray (num_spikes x num_pcs x num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind: numpy.ndarray (num_units x num_channels)
Channel indices of PCs for each unit
'''
if not isinstance(recording, se.RecordingExtractor) or not isinstance(sorting, se.SortingExtractor):
raise AttributeError()
if len(sorting.get_unit_ids()) == 0:
raise Exception("No units in the sorting result, can't compute any metric information.")
spike_times, spike_clusters, pc_features, pc_feature_ind = _get_pca_metric_data(recording, sorting, n_comp=n_comp,
ms_before=ms_before,
ms_after=ms_after,
dtype=dtype,
max_spikes_per_unit=
max_spikes_per_unit,
max_spikes_for_pca=
max_spikes_for_pca,
recompute_info=recompute_info,
save_features_props=
save_features_props,
verbose=verbose, seed=seed)
return np.squeeze(recording.frame_to_time(spike_times)), np.squeeze(spike_clusters), pc_features, pc_feature_ind
def get_amplitude_metric_data(recording, sorting, amp_method='absolute', amp_peak='both', amp_frames_before=3,
amp_frames_after=3, max_spikes_per_unit=np.inf, recompute_info=True,
save_features_props=False, seed=0):
'''
Computes and returns all data needed to compute all the quality metrics from SpikeMetrics
Parameters
----------
recording: RecordingExtractor
The recording extractor
sorting: SortingExtractor
The sorting extractor
dtype: dtype
The numpy dtype of the waveforms
amp_method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.
amp_peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)
amp_frames_before: int
Frames before peak to compute amplitude
amp_frames_after: int
Frames after peak to compute amplitude
max_spikes_per_unit: int
The maximum number of spikes to extract per unit.
recompute_info: bool
If True, will always re-extract waveforms.
save_features_props: bool
If True, save all features and properties in the sorting extractor.
verbose: bool
If True output is verbose
seed: int
Random seed for reproducibility
Returns
-------
spike_times: numpy.ndarray (num_spikes x 0)
Spike times in frames
spike_clusters: numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
amplitudes: numpy.ndarray (num_spikes x 0)
Amplitude value for each spike time
'''
if not isinstance(recording, se.RecordingExtractor) or not isinstance(sorting, se.SortingExtractor):
raise AttributeError()
if len(sorting.get_unit_ids()) == 0:
raise Exception("No units in the sorting result, can't compute any metric information.")
spike_times, spike_clusters, amplitudes = _get_amp_metric_data(recording, sorting,
amp_method=amp_method,
amp_peak=amp_peak,
amp_frames_before=amp_frames_before,
amp_frames_after=amp_frames_after,
max_spikes_per_unit=max_spikes_per_unit,
save_features_props=save_features_props,
recompute_info=recompute_info,
seed=seed)
return np.squeeze(recording.frame_to_time(spike_times)), np.squeeze(spike_clusters), np.squeeze(amplitudes)
def get_all_metric_data(recording, sorting, n_comp=3, ms_before=1., ms_after=2., dtype=None, amp_method='absolute',
amp_peak='both', amp_frames_before=3, amp_frames_after=3, max_spikes_per_unit=np.inf,
max_spikes_for_pca=np.inf, recompute_info=True, save_features_props=False,
verbose=False, seed=0):
'''
Computes and returns all data needed to compute all the quality metrics from SpikeMetrics
Parameters
----------
recording: RecordingExtractor
The recording extractor
sorting: SortingExtractor
The sorting extractor
n_comp: int
n_compFeatures in template-gui format
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
amp_method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.
amp_peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)
amp_frames_before: int
Frames before peak to compute amplitude
amp_frames_after: int
Frames after peak to compute amplitude
max_spikes_per_unit: int
The maximum number of spikes to extract per unit.
max_spikes_for_pca: int
The maximum number of spikes to use to compute PCA.
recompute_info: bool
If True, will always re-extract waveforms.
save_features_props: bool
If True, save all features and properties in the sorting extractor.
verbose: bool
If True output is verbose
seed: int
Random seed for reproducibility
Returns
-------
spike_times: numpy.ndarray (num_spikes x 0)
Spike times in frames
spike_times:amps: numpy.ndarray (num_spikes x 0)
Spike times in frames for amplitudes
spike_times_pca: numpy.ndarray (num_spikes x 0)
Spike times in frames for pca
spike_clusters: numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
spike_clusters_amps: numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time in amplitudes
spike_clusters_pca: numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time in pca
amplitudes: numpy.ndarray (num_spikes x 0)
Amplitude value for each spike time
pc_features: numpy.ndarray (num_spikes x num_pcs x num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind: numpy.ndarray (num_units x num_channels)
Channel indices of PCs for each unit
'''
if not isinstance(recording, se.RecordingExtractor) or not isinstance(sorting, se.SortingExtractor):
raise AttributeError()
if len(sorting.get_unit_ids()) == 0:
raise Exception("No units in the sorting result, can't compute any metric information.")
spike_times, spike_times_amps, spike_times_pca, spike_clusters, spike_clusters_amps, spike_clusters_pca, \
amplitudes, pc_features, pc_feature_ind = _get_quality_metric_data(
recording, sorting, n_comp=n_comp,
ms_before=ms_before, ms_after=ms_after,
dtype=dtype, amp_method=amp_method,
amp_peak=amp_peak,
amp_frames_before=amp_frames_before,
amp_frames_after=amp_frames_after,
max_spikes_per_unit=max_spikes_per_unit,
max_spikes_for_pca=max_spikes_for_pca,
recompute_info=recompute_info,
save_features_props=save_features_props,
verbose=verbose, seed=seed)
return np.squeeze(recording.frame_to_time(spike_times)), np.squeeze(recording.frame_to_time(spike_times_amps)), \
np.squeeze(recording.frame_to_time(spike_times_pca)), np.squeeze(spike_clusters), \
|
np.squeeze(spike_clusters_amps)
|
numpy.squeeze
|
import netCDF4 as nc
import numpy as np
import datetime as dt
from matplotlib.ticker import FormatStrFormatter
import cmocean
from salishsea_tools import evaltools as et
import pickle
ig0=112
ig1=112+97
jg0=644
jg1=644+130
fformat0='%Y%m%d'
print('NorthNut defined variables: ig0,ig1,jg0,jg1,fformat0')
with nc.Dataset('/ocean/eolson/MEOPAR/NEMO-forcing/grid/mesh_mask201702_noLPE.nc') as fmesh:
vmask=fmesh.variables['vmask'][0,:,jg0:jg1,ig0:ig1]
umask=fmesh.variables['umask'][0,:,jg0:jg1,ig0:ig1]
tmask=fmesh.variables['tmask'][0,:,jg0:jg1,ig0:ig1]
fmask=fmesh.variables['fmask'][0,:,jg0:jg1,ig0:ig1]
vmask0=vmask[0,:,:]
umask0=umask[0,:,:]
gdept=fmesh.variables['gdept_0'][0,:,jg0:jg1,ig0:ig1]
gdept_1d=fmesh.variables['gdept_1d'][0,:]
e1t=fmesh.variables['e1t'][0,jg0:jg1,ig0:ig1]
e2t=fmesh.variables['e2t'][0,jg0:jg1,ig0:ig1]
e1f=fmesh.variables['e1f'][0,jg0:jg1,ig0:ig1]
e2f=fmesh.variables['e2f'][0,jg0:jg1,ig0:ig1]
e12t=fmesh.variables['e1t'][0,jg0:jg1,ig0:ig1].astype(float)*\
fmesh.variables['e2t'][0,jg0:jg1,ig0:ig1].astype(float)*\
fmesh.variables['tmask'][0,0,jg0:jg1,ig0:ig1]
e1v=np.copy(fmesh.variables['e1v'][0,jg0:jg1,ig0:ig1]).astype(float)
e2u=np.copy(fmesh.variables['e2u'][0,jg0:jg1,ig0:ig1]).astype(float)
e3t_1d=fmesh.variables['e3t_1d'][0,:]
e3t_0=fmesh.variables['e3t_0'][0,:,jg0:jg1,ig0:ig1].astype(float)
print('NorthNut defined variables: vmask, vmask0, umask, umask0, tmask, fmask, gdept, ',
'gdept_1d, e1t, e2t, e12t, e1f, e2f, e1v, e2u, e3t_1d')
boxCol=(.7,.7,.7)
colL=(.8,.6,0)
colR=(.8,0,0.5)
arrowwidth=1
headwidth=5
headlength=3
alen=3
toff=1
apw=dict(width=arrowwidth, #the width of the arrow in points
headwidth=headwidth, #the width of the base of the arrow head in points
headlength=headlength, #the length of the arrow head in points
shrink=0, #fraction of total length to 'shrink' from both ends
color='w')#edgecolor=boxCol,facecolor='w')
apk=dict(width=arrowwidth, #the width of the arrow in points
headwidth=headwidth, #the width of the base of the arrow head in points
headlength=headlength, #the length of the arrow head in points
shrink=0, #fraction of total length to 'shrink' from both ends
color='k')#edgecolor=boxCol,facecolor='w')
apk2=dict(width=.5, #the width of the arrow in points
headwidth=headwidth, #the width of the base of the arrow head in points
headlength=2, #the length of the arrow head in points
shrink=0, #fraction of total length to 'shrink' from both ends
color='k')#edgecolor=boxCol,facecolor='w')
print('NorthNut defined variables: boxCol, colL, colR, arrowwidth, headwidth, headlength, alen, toff, apw, apk')
def defboxes(k):
# calc transports: boxes in full model coords
boxes=dict()
boxes[0]={'i':(119,132),'j':(735,762)}
boxes[1]={'i':(118,146),'j':(720,735)}
boxes[2]={'i':(118,146),'j':(705,720)}
boxes[3]={'i':(121,150),'j':(690,705)}
boxes[4]={'i':(126,154),'j':(675,690)}
boxes[5]={'i':(129,159),'j':(660,675)}
#boxes[6]={'i':(130,150),'j':(645,660)}
# boxes in subgrid coords:
boxesDIAN=dict()
print('volumes: ')
for el in boxes.keys():
boxesDIAN[el]={'i':[ii-ig0 for ii in boxes[el]['i']],'j':[ii-jg0 for ii in boxes[el]['j']]}
vv=tmask[:k,boxesDIAN[el]['j'][0]:boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]]*\
e12t[boxesDIAN[el]['j'][0]:boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]]*\
e3t_0[:k,boxesDIAN[el]['j'][0]:boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]]
v=np.sum(np.sum(np.sum(vv,2),1),0)
A_north=np.sum(tmask[:k,boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]]*\
e1t[boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]]*\
e3t_0[:k,boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]])
A_south=np.sum(tmask[:k,boxesDIAN[el]['j'][0],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]]*\
e1t[boxesDIAN[el]['j'][0],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]]*\
e3t_0[:k,boxesDIAN[el]['j'][0],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]])
A_east=np.sum(tmask[:k,boxesDIAN[el]['j'][0]:boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][1]]*\
e2t[boxesDIAN[el]['j'][0]:boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][1]]*\
e3t_0[:k,boxesDIAN[el]['j'][0]:boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][1]])
A_floor=np.sum(tmask[k,boxesDIAN[el]['j'][0]:boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]]*\
e12t[boxesDIAN[el]['j'][0]:boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]])
print(np.shape(tmask))
print(el,'vol:', v,'m3')
print(el,'north face area:',A_north/1e6,'km2')
print(el,'south face area:',A_south/1e6,'km2')
print(el,'east face area:',A_east/1e6,'km2')
print(el,'floor area:',A_floor/1e6,'km2')
print(el,'floor area:',A_floor/1e6,'km2')
return boxes, boxesDIAN
def boxAreas(k):
boxes, boxesDIAN = defboxes(k);
ABoxes=dict()
for el in boxes.keys():
ABoxes[el]=np.sum(tmask[k,boxesDIAN[el]['j'][0]:boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]]*\
e12t[boxesDIAN[el]['j'][0]:boxesDIAN[el]['j'][1],boxesDIAN[el]['i'][0]:boxesDIAN[el]['i'][1]]);
return ABoxes
def defboxesDiscovery():
# calc transports: boxes in full model coords
boxes=dict()
boxes[0]={'i':(116,130),'j':(762,771)}
boxes[1]={'i':(119,133),'j':(753,762)}
boxes[2]={'i':(120,132),'j':(744,753)}
boxes[3]={'i':(119,132),'j':(735,744)}
# boxes in subgrid coords:
boxesDIAN=dict()
for el in boxes.keys():
boxesDIAN[el]={'i':[ii-ig0 for ii in boxes[el]['i']],'j':[ii-jg0 for ii in boxes[el]['j']]}
return boxes, boxesDIAN
def boxcoordsT(box):
xm=0.5*(box['i'][0]+box['i'][1]-1)
ym=0.5*(box['j'][0]+box['j'][1]-1)
x0=box['i'][0]-.5
y0=box['j'][0]-.5
x1=box['i'][1]-1+.5
y1=box['j'][1]-1+.5
return xm, ym, x0, y0, x1, y1
def boxcoordsU(box):
xm=0.5*(box['i'][0]-1+box['i'][1]-1)
ym=0.5*(box['j'][0]+box['j'][1]-1)
x0=box['i'][0]-1
y0=box['j'][0]-.5
x1=box['i'][1]-1
y1=box['j'][1]-1+.5
return xm, ym, x0, y0, x1, y1
def boxcoordsV(box):
xm=0.5*(box['i'][0]+box['i'][1]-1)
ym=0.5*(box['j'][0]-1+box['j'][1]-1)
x0=box['i'][0]-.5
y0=box['j'][0]-1
x1=box['i'][1]-1+.5
y1=box['j'][1]-1
return xm, ym, x0, y0, x1, y1
def makebox(boxcoords):
iii=
|
np.array((boxcoords[2],boxcoords[4],boxcoords[4],boxcoords[2],boxcoords[2]))
|
numpy.array
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.