prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import sys
sys.path.append('/root/denoise')
import torch
import time
import matplotlib.pyplot as plt
plt.rcParams["legend.loc"] = "upper right"
plt.rcParams['axes.titlesize'] = 'x-large'
plt.rcParams['axes.labelsize'] = 'x-large'
plt.rcParams['legend.fontsize'] = 'x-large'
plt.rcParams['xtick.labelsize'] = 'x-large'
plt.rcParams['ytick.labelsize'] = 'x-large'
from termcolor import cprint
import numpy as np
import os
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from src.utils import pload, pdump, yload, ydump, mkdir
from src.utils import bmtm, bmv, vnorm, fast_acc_integration
from src.lie_algebra import SO3
from src.DGADataset import DGADataset
class LearningProcess:
"""
Manage all training and test process.
"""
def __init__(self, params, mode): # , net_class, net_params, address, dt
"""
- Make sure that a model's in gpu after initialization.
"""
load_weight_path = params['load_weight_path'] # Before loading past parameters
self.params = params
self.weight_path = os.path.join(self.params['result_dir'], 'weights.pt')
self.dict_test_result = {}
self.figsize = (20, 12)
self.dt = 0.005 # (s)
self.id = params['id']
self.predata_dir = params['dataset']['predata_dir']
# self.preprocess()
if mode == 'train':
if not os.path.exists(self.params['result_dir']):
os.makedirs(self.params['result_dir'])
ydump(self.params, params['result_dir'], 'params.yaml')
self.net = params['net_class'](params)
elif mode == 'test':
self.params = yload(params['result_dir'], 'params.yaml')
self.figure_dir = os.path.join('/home/leecw/project/results/DenoiseIMU', 'figures')
self.net = params['net_class'](params)
if load_weight_path is not None:
weights = torch.load(load_weight_path)
else:
weights = torch.load(self.weight_path)
self.net.load_state_dict(weights)
else:
self.params = yload(params['result_dir'], 'params.yaml')
self.figure_dir = os.path.join('/home/leecw/project/results/DenoiseIMU', 'figures')
cprint(' No need to initialize a model', 'yellow')
return
self.net = self.net.cuda()
def preprocess(self):
print('\n# Preprocess ... ')
all_seqs = [*self.params['dataset']['train_seqs'], *self.params['dataset']['test_seqs']]
all_seqs.sort()
dt = 0.005
dv_normed_windows = [16, 32, 64, 128, 256, 512] #
def interpolate(x, t, t_int):
"""
Interpolate ground truth at the sensor timestamps
"""
# vector interpolation
x_int = np.zeros((t_int.shape[0], x.shape[1]))
for i in range(x.shape[1]):
if i in [4, 5, 6, 7]:
continue
x_int[:, i] = np.interp(t_int, t, x[:, i])
# quaternion interpolation
t_int = torch.Tensor(t_int - t[0])
t = torch.Tensor(t - t[0])
qs = SO3.qnorm(torch.Tensor(x[:, 4:8]))
qs = SO3.qinterp(qs, t, t_int)
qs = SO3.qnorm(qs)
x_int[:, 4:8] = qs.numpy()
return x_int
for seq in all_seqs:
_seq_dir = os.path.join(self.params['dataset']['predata_dir'], seq)
if not os.path.exists(_seq_dir):
os.makedirs(_seq_dir)
path_imu = os.path.join(self.params['dataset']['data_dir'], seq, "mav0", "imu0", "data.csv")
imu = np.genfromtxt(path_imu, delimiter=",", skip_header=1)
path_gt = os.path.join(self.params['dataset']['data_dir'], seq, "mav0", "state_groundtruth_estimate0", "data.csv")
gt = np.genfromtxt(path_gt, delimiter=",", skip_header=1)
# time synchronization between IMU and ground truth
t0 = np.max([gt[0, 0], imu[0, 0]])
t_end = np.min([gt[-1, 0], imu[-1, 0]])
# start index
idx0_imu = np.searchsorted(imu[:, 0], t0)
idx0_gt = np.searchsorted(gt[:, 0], t0)
# end index
idx_end_imu = np.searchsorted(imu[:, 0], t_end, 'right')
idx_end_gt = np.searchsorted(gt[:, 0], t_end, 'right')
# subsample
imu = imu[idx0_imu: idx_end_imu]
gt = gt[idx0_gt: idx_end_gt]
ts = imu[:, 0]/1e9
gt_interpolated = interpolate(gt, gt[:, 0]/1e9, ts)
gt_interpolated[:, 0] = imu[:, 0]
# take ground truth position
p_gt = gt_interpolated[:, 1:4]
p_gt = p_gt - p_gt[0]
# take ground true quaternion pose
q_gt = torch.Tensor(gt_interpolated[:, 4:8]).double()
q_gt = q_gt / q_gt.norm(dim=1, keepdim=True)
Rot_gt = SO3.from_quaternion(q_gt.cuda(), ordering='wxyz')
# interpolate w_gt
N = Rot_gt.shape[0]
rot_tmp = bmtm(Rot_gt[:-1], Rot_gt[1:])
rot_tmp = SO3.dnormalize(rot_tmp.double())
q_tmp = SO3.to_quaternion(rot_tmp)
q_tmp = SO3.qnorm(q_tmp).cpu()
_t = torch.from_numpy(np.linspace(1.0, float(N-1), N-1)).cpu().double()
_t_int = _t[:-1] + 0.5
q_int_tmp = SO3.qinterp(q_tmp.cpu(), _t, _t_int)
q_int_tmp = SO3.qnorm(q_int_tmp)
q_tmp = torch.cat([q_tmp[0].unsqueeze(0), q_int_tmp, q_tmp[-1].unsqueeze(0)])
rot_tmp = SO3.from_quaternion(q_tmp.cuda(), ordering='wxyz')
w_gt = SO3.log(rot_tmp) / 0.005
assert w_gt.shape[0] == q_gt.shape[0]
# convert from numpy
p_gt = torch.Tensor(p_gt).double()
v_gt = torch.tensor(gt_interpolated[:, 8:11]).double().cuda()
imu = torch.Tensor(imu).double()
gt_interpolated = torch.Tensor(gt_interpolated)
# # take pseudo ground truth accerelation
# print('v_gt:', v_gt.shape, v_gt.dtype, v_gt.device)
# print('a_gt:', a_gt.shape, a_gt.dtype, a_gt.device)
# compute pre-integration factors for all training
mtf = self.params['dataset']['min_train_freq']
dRot_ij = bmtm(Rot_gt[:-mtf], Rot_gt[mtf:])
dRot_ij = SO3.dnormalize(dRot_ij.cuda())
dxi_ij = SO3.log(dRot_ij).cpu()
_gt_dv_normed = {}
for window_size in dv_normed_windows:
N = v_gt.shape[0]
bread = torch.ones(window_size, N+window_size-1, 3, dtype=v_gt.dtype).cuda()
bread *= v_gt[0, :].expand_as(bread)
for i in range(window_size):
bread[i, window_size-1-i:N+window_size-1-i] = v_gt
bread = bread[:, 0:N]
## Debug
# for i in range(window_size):
# li = bread[:, i, 0].tolist()
# for e in li:
# print('%.6f' % e, end=',')
# print()
##
_mean = bread.mean(dim=0)
_normalized = v_gt - _mean
## Debug
# print('v_gt')
# print(v_gt[0:8, 0].tolist())
# print('_normalized:', _normalized.shape, _normalized.dtype)
# print(_normalized[0:8, 0].tolist())
##
_gt_dv_normed[str(window_size)] = _normalized
## SAVE
imu_path = os.path.join(_seq_dir, 'imu.csv')
if not os.path.exists(imu_path):
print('preprocess/%s/imu:'%seq, imu.shape, imu.dtype)
np.savetxt(imu_path, imu, delimiter=',')
q_gt_path = os.path.join(_seq_dir, 'q_gt.csv')
if not os.path.exists(q_gt_path):
print('preprocess/%s/q_gt:'%seq, q_gt.shape, q_gt.dtype)
np.savetxt(q_gt_path, q_gt, delimiter=',')
v_gt_path = os.path.join(_seq_dir, 'v_gt.csv')
if not os.path.exists(v_gt_path):
v_gt = v_gt.cpu().numpy()
print('preprocess/%s/v_gt:'%seq, v_gt.shape, v_gt.dtype)
np.savetxt(v_gt_path, v_gt, delimiter=',')
w_gt_path = os.path.join(self.predata_dir, seq, 'w_gt.csv')
if not os.path.exists(w_gt_path):
w_gt = w_gt.cpu().numpy()
print('preprocess/%s/w_gt:'%seq, w_gt.shape, w_gt.dtype)
np.savetxt(w_gt_path, w_gt, delimiter=',')
a_gt_path = os.path.join(self.predata_dir, seq, 'a_gt.csv')
if not os.path.exists(a_gt_path):
v_gt = torch.tensor(v_gt)
a_gt = (v_gt[1:] - v_gt[:-1]) / (dt)
a_gt = torch.cat([
a_gt[0].unsqueeze(0),
(a_gt[1:] + a_gt[:-1]) / 2.0,
a_gt[-1].unsqueeze(0)
]).cpu().numpy()
print('preprocess/%s/a_gt:'%seq, a_gt.shape, a_gt.dtype)
np.savetxt(a_gt_path, a_gt, delimiter=',')
w_mean_path = os.path.join(self.predata_dir, seq, 'w_mean.csv')
w_std_path = os.path.join(self.predata_dir, seq, 'w_std.csv')
a_mean_path = os.path.join(self.predata_dir, seq, 'a_mean.csv')
a_std_path = os.path.join(self.predata_dir, seq, 'a_std.csv')
if not os.path.exists(w_mean_path):
gap_dict = torch.load('/home/leecw/project/results/Figures/gap_dist.pt')
seq_dict = gap_dict[seq]
w_mean = seq_dict['w_mean'].cpu().numpy()
w_std = seq_dict['w_std'].cpu().numpy()
a_mean = seq_dict['a_mean'].cpu().numpy()
a_std = seq_dict['a_std'].cpu().numpy()
np.savetxt(w_mean_path, w_mean, delimiter=',')
np.savetxt(w_std_path, w_std, delimiter=',')
np.savetxt(a_mean_path, a_mean, delimiter=',')
np.savetxt(a_std_path, a_std, delimiter=',')
print('preprocess/%s/gap_dist:'%seq)
dw_16_gt_path = os.path.join(self.predata_dir, seq, 'dw_16_gt.csv')
if not os.path.exists(dw_16_gt_path):
dw_16_gt = bmtm(Rot_gt[:-16], Rot_gt[16:])
dw_16_gt = SO3.dnormalize(dw_16_gt.double())
dw_16_gt = SO3.log(dw_16_gt)
dw_16_gt = dw_16_gt.cpu().numpy()
print('preprocess/%s/dw_16_gt:'%seq, dw_16_gt.shape, dw_16_gt.dtype)
np.savetxt(dw_16_gt_path, dw_16_gt, delimiter=',')
dw_32_gt_path = os.path.join(self.predata_dir, seq, 'dw_32_gt.csv')
if not os.path.exists(dw_32_gt_path):
dw_32_gt = bmtm(Rot_gt[:-32], Rot_gt[32:])
dw_32_gt = SO3.dnormalize(dw_32_gt.double())
dw_32_gt = SO3.log(dw_32_gt)
dw_32_gt = dw_32_gt.cpu().numpy()
print('preprocess/%s/dw_32_gt:'%seq, dw_32_gt.shape, dw_32_gt.dtype)
np.savetxt(dw_32_gt_path, dw_32_gt, delimiter=',')
dv_16_gt_path = os.path.join(self.predata_dir, seq, 'dv_16_gt.csv')
if not os.path.exists(dv_16_gt_path):
dv_16_gt = v_gt[16:] - v_gt[:-16]
dv_16_gt = dv_16_gt.cpu().numpy()
print('preprocess/%s/dv_16_gt:'%seq, dv_16_gt.shape, dv_16_gt.dtype)
np.savetxt(dv_16_gt_path, dv_16_gt, delimiter=',')
dv_32_gt_path = os.path.join(self.predata_dir, seq, 'dv_32_gt.csv')
if not os.path.exists(dv_32_gt_path):
dv_32_gt = v_gt[32:] - v_gt[:-32]
dv_32_gt = dv_32_gt.cpu().numpy()
print('preprocess/%s/dv_32_gt:'%seq, dv_32_gt.shape, dv_32_gt.dtype)
np.savetxt(dv_32_gt_path, dv_32_gt, delimiter=',')
for key, val in _gt_dv_normed.items():
_path = os.path.join(self.predata_dir, seq, 'dv_normed_%s_gt.csv' % key)
if not os.path.exists(_path):
_item = val.cpu().numpy()
print('preprocess/%s/dv_normed_%s_gt:'%(seq, key), _item.shape, _item.dtype)
np.savetxt(_path, _item, delimiter=',')
# _gt_path = os.path.join(_seq_dir, 'gt.pt')
# _gt_dict = {
# 'gt_interpolated': gt_interpolated.float(),
# 'dw_16': dxi_ij.float(), # the 16-size window's euler angle difference
# 'a_gt': a_gt.float(),
# }
# torch.save(_gt_dict, _gt_path)
# _gt_dv_path = os.path.join(_seq_dir, 'dv.pt')
# _gt_dv_dict = {
# 'dv_normed': {
# '16': _gt_dv_normed['16'].float(),
# '32': _gt_dv_normed['32'].float(),
# '64': _gt_dv_normed['64'].float(),
# '128': _gt_dv_normed['128'].float(),
# '256': _gt_dv_normed['256'].float(),
# '512': _gt_dv_normed['512'].float(),
# }
# }
# torch.save(_gt_dv_dict, _gt_dv_path)
print("--- success ---")
"""
_mean: torch.Size([36381, 3]) torch.float64
_normalized: torch.Size([36381, 3]) torch.float64
ts: <class 'torch.Tensor'> torch.Size([36381]) torch.float32
us: <class 'torch.Tensor'> torch.Size([36381, 6]) torch.float32
dw_16: <class 'torch.Tensor'> torch.Size([36365, 3]) torch.float32
gt_interpolated: <class 'torch.Tensor'> torch.Size([36381, 17]) torch.float32
dv:
16: <class 'torch.Tensor'> torch.Size([36365, 3]) torch.float32
32: <class 'torch.Tensor'> torch.Size([36349, 3]) torch.float32
64: <class 'torch.Tensor'> torch.Size([36317, 3]) torch.float32
dv_normed:
32: <class 'torch.Tensor'> torch.Size([36381, 3]) torch.float32
64: <class 'torch.Tensor'> torch.Size([36381, 3]) torch.float32
512: <class 'torch.Tensor'> torch.Size([36381, 3]) torch.float32
1024: <class 'torch.Tensor'> torch.Size([36381, 3]) torch.float32
128: <class 'torch.Tensor'> torch.Size([36381, 3]) torch.float32
256: <class 'torch.Tensor'> torch.Size([36381, 3]) torch.float32
"""
def train(self):
"""train the neural network. GPU is assumed"""
ydump(self.params, self.params['result_dir'], 'params.yaml')
# define datasets
dataset_train = DGADataset(self.params, 'train')
dataset_val = DGADataset(self.params, 'val')
# Define class
Optimizer = self.params['train']['optimizer_class']
Scheduler = self.params['train']['scheduler_class']
Loss = self.params['train']['loss_class']
# Instantiate optimizer, scheduler and loss.
optimizer = Optimizer(self.net.parameters(), **self.params['train']['optimizer'])
scheduler = Scheduler(optimizer, **self.params['train']['scheduler'])
dataloader = DataLoader(dataset_train, **self.params['train']['dataloader'])
criterion = Loss(self.params)
# remaining training parameters
freq_val = self.params['train']['freq_val']
n_epochs = self.params['train']['n_epochs']
# init net w.r.t dataset
self.net.set_normalized_factors(torch.Tensor(dataset_train.mean_u), torch.Tensor(dataset_train.std_u))
# start tensorboard writer
writer = SummaryWriter(self.params['result_dir'])
start_time = time.time()
best_loss = torch.Tensor([float('Inf')])
# define some function for seeing evolution of training
# def write(epoch, loss_epoch):
# scheduler.step(epoch)
# Training Loop
loss, best_loss = torch.Tensor([10000.0]), torch.Tensor([10000.0])
for epoch in range(1, n_epochs + 1):
loss_epoch = self.loop_train(dataloader, optimizer, criterion)
writer.add_scalar('loss/train', loss_epoch.item(), epoch)
writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
scheduler.step(epoch)
# Validate
if epoch % freq_val == 0:
loss = self.loop_val(dataset_val, criterion)
dt = time.time() - start_time
if loss <= best_loss:
cprint('Epoch %4d Loss(val) Decrease - %.2fs' % (epoch, dt), 'blue')
# print(' - gloss: %.4f, acc_loss: %.4f, gap_loss: %.4f' % (gloss_epoch, acc_loss_epoch, gap_loss_epoch))
print(' - current: %.4f' % loss.item())
print(' - best : %.4f' % best_loss.item())
best_loss = loss
self.save_net(epoch, 'best')
else:
cprint('Epoch %4d Loss(val) Increase - %.2fs' % (epoch, dt), 'yellow')
# print(' - gloss: %.4f, acc_loss: %.4f, gap_loss: %.4f' % (gloss_epoch, acc_loss_epoch, gap_loss_epoch))
print(' - current: %.4f' % loss.item())
print(' - best : %.4f' % best_loss.item())
self.save_net(epoch, 'log')
writer.add_scalar('loss/val', loss.item(), epoch)
start_time = time.time()
elif epoch % (freq_val//4) == 0:
print('Epoch %4d Loss(train) %.4f' % (epoch, loss_epoch))
cprint('\n Train is over \n', 'cyan', attrs=['bold'])
cprint('Testing ... ', 'green')
dataset_test = DGADataset(self.params, 'test')
weights = torch.load(self.weight_path)
self.net.load_state_dict(weights)
self.net.cuda()
test_loss = self.loop_val(dataset_test, criterion)
dict_loss = {
'final_loss/val': best_loss.item(),
'final_loss/test': test_loss.item()
}
for key, value in dict_loss.items():
print(' %s: ' % key, value)
ydump(dict_loss, self.params['result_dir'], 'final_loss.yaml')
writer.close()
def test(self):
"""test a network once training is over"""
Loss = self.params['train']['loss_class']
criterion = Loss(self.params)
dataset_test = DGADataset(self.params, 'test')
if not os.path.exists(self.params['test_dir']):
os.makedirs(self.params['test_dir'])
cprint('Test ... ', 'green')
self.loop_test(dataset_test, criterion)
print(' --success')
def analyze(self):
dataset_test = DGADataset(self.params, 'test')
for i, seq in enumerate(dataset_test.sequences):
cprint('\n# Visualze results on %s ... ' % seq, 'green')
self.seq = seq
if not os.path.exists(os.path.join(self.params['result_dir'], seq)):
os.mkdir(os.path.join(self.params['result_dir'], seq))
## LOAD DATA
seq, us, q_gt, w_gt, dw_16_gt, dw_32_gt, dv_16_gt, dv_32_gt, dv_normed_dict, w_mean, w_std, a_mean, a_std = dataset_test[i]
us = us.cpu()
q_gt = q_gt.cpu()
dv_16_gt = dv_16_gt.cpu()
dv_32_gt = dv_32_gt.cpu()
for w in dv_normed_dict:
dv_normed_dict[w] = dv_normed_dict[w].cpu()
N = us.shape[0]
fname = os.path.join(self.predata_dir, seq, 'v_gt.csv')
v_gt = np.loadtxt(fname, delimiter=',')[:N]
v_gt = torch.from_numpy(v_gt).float()
fname = os.path.join(self.predata_dir, seq, 'a_gt.csv')
a_gt = np.loadtxt(fname, delimiter=',')[:N]
mondict = pload(self.params['result_dir'], 'tests', 'results_%s.p'%seq)
# w_hat = mondict['w_hat']
a_hat = mondict['a_hat'].detach()
loss = mondict['loss'] # float
## Analyze Orientation
self.ts = torch.linspace(0, N * self.dt, N)
rot_gt = SO3.from_quaternion(q_gt.cuda()).cpu()
rpy_gt = SO3.to_rpy(rot_gt.cuda()).cpu()
def rad2deg(x):
return x * (180. / np.pi)
## Analyze Orientation
quat_hat, rot_imu, rot_hat = self.integrate_with_quaternions_superfast(dw_16.shape[0], us, w_hat, quat_gt)
rpy_imu = SO3.to_rpy(rot_imu).cpu()
rpy_hat = SO3.to_rpy(rot_hat).cpu()
self.plot_orientation(N, rad2deg(rpy_imu), rad2deg(rpy_hat), rad2deg(rpy_gt))
self.plot_orientation_error(N, rot_imu, rot_hat, rot_gt)
gyro_corrections = (us[:, :3] - w_hat[:N, :])
self.plot_gyro_correction(gyro_corrections)
# ## Analyze Acceleration
# v_hat = fast_acc_integration(a_hat.unsqueeze(0)).squeeze()
# v_gt = v_gt - v_gt[0].expand_as(v_gt)
# print('v_hat:', v_hat.shape, v_hat.device, v_hat.dtype)
# print('v_gt:', v_gt.shape, v_gt.device, v_gt.dtype)
# self.plot_velocity(v_hat.numpy(), v_gt.numpy())
# ## correction
# rot_gt = rot_gt.reshape(us.shape[0], 3, 3) # [N, 3, 3]
# a_raw = bmv(rot_gt, us[:, 3:6]) - torch.Tensor([0., 0., 9.81])
# accel_corrections = (a_raw - a_hat)
# self.plot_accel_correction(accel_corrections.numpy())
## nomred
# for window in self.params['train']['loss']['dv_normed']:
# v_raw = fast_acc_integration(a_raw.unsqueeze(0)).squeeze()
# v_normed_raw = vnorm(v_raw.unsqueeze(0), window_size=window).squeeze()
# v_normed_hat = vnorm(v_hat.unsqueeze(0), window_size=window).squeeze()
# v_normed_gt = vnorm(v_gt.unsqueeze(0), window_size=window).squeeze()
# self.plot_v_normed(v_normed_raw, v_normed_hat, v_normed_gt, window)
# ## Acceleration
# self.plot_acceleration(a_raw, a_hat, a_gt)
# print('--- success ---')
def loop_train(self, dataloader, optimizer, criterion):
"""Forward-backward loop over training data"""
loss_epoch = 0
optimizer.zero_grad()
for seq, us, q_gt, w_gt, dw_16_gt, dw_32_gt, dv_16_gt, dv_32_gt, dv_normed_dict, w_mean, w_std, a_mean, a_std in dataloader:
us = dataloader.dataset.add_noise(us)
q_gt = q_gt.reshape(-1, 4)
rot_gt = SO3.from_quaternion(q_gt.cuda())
rot_gt = rot_gt.reshape(us.shape[0], us.shape[1], 3, 3)
a_hat, w_hat, loss = None, None, None
if self.params['net_version'].startswith('acc'):
a_hat = self.net(us, rot_gt)
loss = criterion(a_hat, dv_16_gt, dv_32_gt, dv_normed_dict)
elif self.params['net_version'] == 'ori_ver1':
w_hat = self.net(us)
gyro16, gyro32 = criterion(w_hat, dw_16_gt)
loss = gyro16 + gyro32
elif self.params['net_version'] == 'ori_ver2':
w_hat = self.net(us)
gyro16, gyro32, gnll = criterion(w_hat, dw_16_gt, w_gt, w_mean, w_std)
loss = gyro16 + gyro32 + gnll * self.params['train']['loss']['ori_gnll_ratio']
loss /= len(dataloader)
loss.backward()
loss_epoch += loss.detach().cpu()
optimizer.step()
return loss_epoch
def loop_val(self, dataset, criterion):
"""Forward loop over validation data"""
loss_epoch = 0.0
gyro16_epoch = 0.0
gyro32_epoch = 0.0
gnll_epoch = 0.0
self.net.eval()
with torch.no_grad():
for i in range(len(dataset)):
seq, us, q_gt, w_gt, dw_16_gt, dw_32_gt, dv_16_gt, dv_32_gt, dv_normed_dict, w_mean, w_std, a_mean, a_std = dataset[i]
rot_gt = SO3.from_quaternion(q_gt.cuda())
rot_gt = rot_gt.reshape(us.shape[0], 3, 3)
a_hat, w_hat, loss = None, None, None
if self.params['net_version'].startswith('acc'):
a_hat = self.net(us.unsqueeze(0), rot_gt.unsqueeze(0))
for key in dv_normed_dict:
dv_normed_dict[key] = dv_normed_dict[key].unsqueeze(0)
loss = criterion(a_hat, dv_16_gt.unsqueeze(0), dv_32_gt.unsqueeze(0), dv_normed_dict)
elif self.params['net_version'] == 'ori_ver1':
w_hat = self.net(us.unsqueeze(0))
gyro16, gyro32 = criterion(w_hat, dw_16_gt.unsqueeze(0))
gyro16_epoch += gyro16.item()
gyro32_epoch += gyro32.item()
loss = gyro16 + gyro32
elif self.params['net_version'] == 'ori_ver2':
w_hat = self.net(us.unsqueeze(0))
gyro16, gyro32, gnll = criterion(w_hat, dw_16_gt.unsqueeze(0), w_gt.unsqueeze(0), w_mean.unsqueeze(0), w_std.unsqueeze(0))
gyro16_epoch += gyro16.item()
gyro32_epoch += gyro32.item()
gnll_epoch += gnll.item()
loss = gyro16 + gyro32 + gnll * self.params['train']['loss']['ori_gnll_ratio']
loss /= len(dataset)
loss_epoch += loss.cpu()
print('Gyro 16 Loss:', gyro16_epoch/len(dataset))
print('Gyro 32 Loss:', gyro16_epoch/len(dataset))
print('Gaussian NLL Loss:', gnll_epoch/len(dataset))
print('Total Loss:', loss_epoch.item())
self.net.train()
return loss_epoch
def loop_test(self, dataset, criterion):
"""Forward loop over test data"""
loss_epoch = 0.0
gyro16_epoch = 0.0
gyro32_epoch = 0.0
gnll_epoch = 0.0
self.net.eval()
for i in range(len(dataset)):
seq, us, q_gt, w_gt, dw_16_gt, dw_32_gt, dv_16_gt, dv_32_gt, dv_normed_dict, w_mean, w_std, a_mean, a_std = dataset[i]
rot_gt = SO3.from_quaternion(q_gt.cuda())
rot_gt = rot_gt.reshape(us.shape[0], 3, 3)
a_hat, w_hat, loss = None, None, None
if self.params['net_version'] == 'ver1':
a_hat, a_tilde_b = self.net(us.unsqueeze(0), rot_gt.unsqueeze(0), mode='test')
### Plot hist body
x = a_tilde_b.cpu().detach().numpy().squeeze()
fig, ax = plt.subplots(3, 1, figsize=self.figsize, dpi=200)
fig.suptitle('%s a_tilde_body Distribution / %s / %s' % (self.params['net_version'], seq, self.id), fontsize=20)
ax[0].hist(x[:, 0], bins = 50, label='x')
ax[0].set_title("X axis frequency")
ax[0].legend()
ax[1].hist(x[:, 1], bins = 50, label='y')
ax[0].set_title("Y axis frequency")
ax[1].legend()
ax[2].hist(x[:, 2], bins = 50, label='z')
ax[0].set_title("Z axis frequency")
ax[2].legend()
_dir = os.path.join(self.figure_dir, seq, 'tilde_dist')
_path = os.path.join(_dir, self.id + '_b.png')
if not os.path.exists(_dir):
os.makedirs(_dir)
self.savefig(ax, fig, _path)
plt.close(fig)
###
elif self.params['net_version'] == 'ver2':
a_hat, a_tilde_b, a_tilde_w = self.net(us.unsqueeze(0), rot_gt.unsqueeze(0), mode='test')
print('a_hat:', a_hat.shape)
print('a_tilde_b:', a_tilde_b.shape)
print('a_tilde_w:', a_tilde_w.shape)
### Plot hist body
data = a_tilde_b.cpu().detach().numpy().squeeze()
print('data:', data.shape)
x, y, z = data[:, 0], data[:, 1], data[:, 2]
print('x:', x.shape)
print(x.min(), x.max(), y.min(), y.max(), z.min(), z.max())
fig, ax = plt.subplots(3, 1, figsize=self.figsize, dpi=200)
fig.suptitle('%s a_tilde_body Distribution / %s / %s' % (self.params['net_version'], seq, self.id), fontsize=20)
ax[0].hist(x, bins = 100, range=(x.min(), x.max()), label='x')
ax[0].set_title("X axis frequency")
ax[0].legend()
ax[1].hist(y, bins = 100, range=(y.min(), y.max()), label='y')
ax[0].set_title("Y axis frequency")
ax[1].legend()
ax[2].hist(z, bins = 100, range=(z.min(), z.max()), label='z')
ax[0].set_title("Z axis frequency")
ax[2].legend()
_dir = os.path.join(self.figure_dir, seq, 'tilde_dist')
_path = os.path.join(_dir, self.id + '_b.png')
if not os.path.exists(_dir):
os.makedirs(_dir)
self.savefig(ax, fig, _path)
plt.close(fig)
###
### Plot hist world
data = a_tilde_w.cpu().detach().numpy().squeeze()
print('data:', data.shape)
x, y, z = data[:, 0], data[:, 1], data[:, 2]
print('x:', x.shape)
print(x.min(), x.max(), y.min(), y.max(), z.min(), z.max())
fig, ax = plt.subplots(3, 1, figsize=self.figsize, dpi=200)
fig.suptitle('%s a_tilde_world Distribution / %s / %s' % (self.params['net_version'], seq, self.id), fontsize=20)
ax[0].hist(x, bins = 100, range=(x.min(), x.max()), label='x')
ax[0].set_title("X axis frequency")
ax[0].legend()
ax[1].hist(y, bins = 100, range=(y.min(), y.max()), label='y')
ax[0].set_title("Y axis frequency")
ax[1].legend()
ax[2].hist(z, bins = 100, range=(z.min(), z.max()), label='z')
ax[0].set_title("Z axis frequency")
ax[2].legend()
_dir = os.path.join(self.figure_dir, seq, 'tilde_dist')
_path = os.path.join(_dir, self.id + '_w.png')
if not os.path.exists(_dir):
os.makedirs(_dir)
self.savefig(ax, fig, _path)
plt.close(fig)
###
elif self.params['net_version'] == 'ver3':
a_hat, a_tilde_w = self.net(us.unsqueeze(0), rot_gt.unsqueeze(0), mode='test')
### Plot hist world
x = a_tilde_w.cpu().detach().numpy().squeeze()
fig, ax = plt.subplots(3, 1, figsize=self.figsize, dpi=200)
fig.suptitle('%s a_tilde_world Distribution / %s / %s' % (self.params['net_version'], seq, self.id), fontsize=20)
ax[0].hist(x[:, 0], bins = 50, label='x')
ax[0].set_title("X axis frequency")
ax[0].legend()
ax[1].hist(x[:, 1], bins = 50, label='y')
ax[0].set_title("Y axis frequency")
ax[1].legend()
ax[2].hist(x[:, 2], bins = 50, label='z')
ax[0].set_title("Z axis frequency")
ax[2].legend()
_dir = os.path.join(self.figure_dir, seq, 'tilde_dist')
_path = os.path.join(_dir, self.id + '_w.png')
if not os.path.exists(_dir):
os.makedirs(_dir)
self.savefig(ax, fig, _path)
plt.close(fig)
elif self.params['net_version'] == 'ori_ver1':
w_hat = self.net(us.unsqueeze(0))
gyro16, gyro32 = criterion(w_hat, dw_16_gt.unsqueeze(0))
gyro16_epoch += gyro16.item()
gyro32_epoch += gyro32.item()
loss = gyro16 + gyro32
elif self.params['net_version'] == 'ori_ver2':
w_hat = self.net(us.unsqueeze(0))
gyro16, gyro32, gnll = criterion(w_hat, dw_16_gt.unsqueeze(0), w_gt.unsqueeze(0), w_mean.unsqueeze(0), w_std.unsqueeze(0))
gyro16_epoch += gyro16.item()
gyro32_epoch += gyro32.item()
gnll_epoch += gnll.item()
loss = gyro16 + gyro32 + gnll * self.params['train']['loss']['ori_gnll_ratio']
# Plot
def rad2deg(x):
return x * (180. / np.pi)
N = us.shape[0]
if self.params['net_version'].startswith('ori'):
self.seq = seq
self.ts = torch.linspace(0, N * self.dt, N)
q_hat, rot_raw, rot_hat = self.integrate_with_quaternions_superfast(dw_16_gt.shape[0], us.squeeze(), w_hat.detach().squeeze(), q_gt.squeeze())
rpy_imu = SO3.to_rpy(rot_raw).cpu()
rpy_hat = SO3.to_rpy(rot_hat).cpu()
rot_gt = SO3.from_quaternion(q_gt.cuda()).cpu()
rpy_gt = SO3.to_rpy(rot_gt.cuda()).cpu()
self.plot_orientation(N, rad2deg(rpy_imu), rad2deg(rpy_hat), rad2deg(rpy_gt))
loss /= len(dataset)
loss_epoch += loss.cpu()
# for key in dv_normed_dict:
# dv_normed_dict[key] = dv_normed_dict[key].unsqueeze(0)
# loss = criterion(a_hat, dv_16_gt.unsqueeze(0), dv_32_gt.unsqueeze(0), dv_normed_dict)
# self.dict_test_result[seq] = {
# 'w_hat': w_hat[0].cpu(),
# # 'a_hat': a_hat[0].cpu(),
# 'loss': loss.cpu().item(),
# }
# print(' - %s loss:' % seq, loss.cpu().item())
# # for key, value in self.dict_test_result[seq].items():
# # if key == 'loss':
# # continue
# # print(' %s:'%key, type(value), value.shape, value.dtype)
# path_results = os.path.join(self.params['test_dir'], 'results_%s.p'%seq)
# if not os.path.exists(path_results):
# pdump(self.dict_test_result[seq], path_results)
def save_net(self, epoch=None, state='log'):
"""save the weights on the net in CPU"""
self.net.eval().cpu()
if state == 'log':
save_path = os.path.join(self.params['result_dir'], 'ep_%04d.pt' % epoch)
torch.save(self.net.state_dict(), save_path)
elif state == 'best':
save_path = os.path.join(self.params['result_dir'], 'ep_%04d_best.pt' % epoch)
torch.save(self.net.state_dict(), save_path)
torch.save(self.net.state_dict(), self.weight_path)
self.net.train().cuda()
def save_gyro_estimate(self, seq):
net_us = pload(self.params['result_dir'], seq, 'results.p')['hat_xs']
N = net_us.shape[0]
path = os.path.join("/home/leecw/Data/Result/DenoiseIMU/estimate", seq, seq + '_net_us.csv')
header = "time(s),wx,wy,wz"
x = np.zeros(N, 4)
x[:, 0]
def to_open_vins(self, dataset):
"""
Export results to Open-VINS format. Use them eval toolbox available
at https://github.com/rpng/open_vins/
"""
print("open_vins()")
for i, seq in enumerate(dataset.sequences):
self.seq = seq
# get ground truth
self.gt = dataset.load_gt(i)
raw_us, _ = dataset[i]
net_us = pload(self.params['result_dir'], seq, 'results.p')['hat_xs']
N = net_us.shape[0]
net_qs, imu_Rots, net_Rots = self.integrate_with_quaternions_superfast(N, raw_us, net_us)
path = os.path.join(self.params['result_dir'], seq + '.csv')
header = "time(s),tx,ty,tz,qx,qy,qz,qw"
x = np.zeros((net_qs.shape[0], 8))
x[:, 0] = self.gt['ts'][:net_qs.shape[0]]
x[:, [7, 4, 5, 6]] = net_qs
np.savetxt(path, x[::1], header=header, delimiter=",", fmt='%1.9f')
### Save wx, wy, wz csv file
# if seq in ['MH_02_easy', 'MH_04_difficult']:
# print("\t", seq)
# N = net_qs.shape[0]
# print("\t\tnet_qs: ", net_qs.shape)
# print("\t\tnet_us: ", net_us.shape)
# print("\t\timu_Rots: ", imu_Rots.shape)
# print("\t\tnet_Rots: ", net_Rots.shape)
# print("\t\tself.gt['ts']:", self.gt['ts'].shape)
# gt_processed_path = os.path.join("/root/Data/Result/DenoiseIMU/gt", seq, "gt_processed.csv")
# gt_processed = np.loadtxt(gt_processed_path, delimiter=',')
# t_ns = gt_processed[:, 0]
# print("\t\tt_ns:", t_ns.shape) # (29992,) float64
# print("\t\traw_us:", raw_us.shape) # [29952, 6]
# imu_processed_path = os.path.join("/root/Data/Result/DenoiseIMU/estimate", seq, "imu_processed.csv")
# imu_processed = np.loadtxt(imu_processed_path, delimiter=',')
# denoised_path = os.path.join("/root/Data/Result/DenoiseIMU/estimate", seq, "denoised.csv")
# header = "timestamp [ns],w_RS_S_x [rad s^-1],w_RS_S_y [rad s^-1],w_RS_S_z [rad s^-1],a_RS_S_x [m s^-2],a_RS_S_y [m s^-2],a_RS_S_z [m s^-2]"
# denoised = np.zeros((N, 7))
# denoised[:, 0] = imu_processed[:N, 0]
# denoised[:, 1:4] = net_us
# denoised[:, 4:7] = imu_processed[:N, 4:7]
# np.savetxt(denoised_path, denoised, header=header, fmt='%d,%1.9f,%1.9f,%1.9f,%1.9f,%1.9f,%1.9f')
# print("\t\tdenoised is saved in \'%s\'" % denoised_path)
###
### DENOISED IMU DATA
if seq in ['MH_02_easy', 'MH_04_difficult']:
raw_interpolated_imu_path = os.path.join("/root/Data/Result/DenoiseIMU", seq + '_raw_imu_interpolated.csv')
raw_interpolated_imu = np.loadtxt(raw_interpolated_imu_path, dtype=np.float64, delimiter=',')
denoised_interpolated_imu = raw_interpolated_imu[:net_us.shape[0]]
denoised_interpolated_imu_path = os.path.join(self.params['result_dir'], seq, 'denoised_imu.csv')
denoised_interpolated_imu[:,1:4] = np.squeeze(net_us)
header = "time[ns],wx,wy,wz,ax,ay,az"
np.savetxt(denoised_interpolated_imu_path, denoised_interpolated_imu, fmt="%d,%1.9f,%1.9f,%1.9f,%1.9f,%1.9f,%1.9f", header=header)
print("denoied imu data is saved in \'%s\'" % denoised_interpolated_imu_path)
# Save net_us on csv file
# net_us_csv = np.zeros((net_us))
# np.savetxt("/root/Data/Result/DenoiseIMU/estimate/MH_02_easy/net_us.csv", )
imu_rpys_path = os.path.join(self.params['result_dir'], seq, 'raw_rpy.csv')
net_rpys_path = os.path.join(self.params['result_dir'], seq, 'net_rpy.csv')
imu_rpys = SO3.to_rpy(imu_Rots).cpu()
net_rpys = SO3.to_rpy(net_Rots).cpu()
imu_t = self.gt['ts'][:imu_rpys.shape[0]]
imu_t = np.expand_dims(imu_t, axis=1)
net_t = self.gt['ts'][:net_rpys.shape[0]]
net_t = np.expand_dims(net_t, axis=1)
imu_rpys =
|
np.hstack((imu_t, imu_rpys))
|
numpy.hstack
|
import os
import sys
from tqdm import tqdm
import concurrent.futures as futures
import numpy as np
import shutil
import mercantile
from rasterio import open as rasterio_open
from rasterio.vrt import WarpedVRT
from rasterio.enums import Resampling
from rasterio.warp import transform_bounds
from rasterio.transform import from_bounds
from neat_eo.core import load_config, check_classes, make_palette, web_ui, Logs
from neat_eo.tiles import (
tiles_from_csv,
tile_from_xyz,
tile_image_to_file,
tile_label_to_file,
tile_image_from_file,
tile_label_from_file,
)
def add_parser(subparser, formatter_class):
parser = subparser.add_parser("tile", help="Tile a raster, or a rasters coverage", formatter_class=formatter_class)
inp = parser.add_argument_group("Inputs")
inp.add_argument("--rasters", type=str, required=True, nargs="+", help="path to raster files to tile [required]")
inp.add_argument("--cover", type=str, help="path to csv tiles cover file, to filter tiles to tile [optional]")
inp.add_argument("--bands", type=str, help="list of 1-n index bands to select (e.g 1,2,3) [optional]")
out = parser.add_argument_group("Output")
out.add_argument("--zoom", type=int, required=True, help="zoom level of tiles [required]")
out.add_argument("--ts", type=str, default="512,512", help="tile size in pixels [default: 512,512]")
help = "nodata pixel value, used by default to remove coverage border's tile [default: 0]"
out.add_argument("--nodata", type=int, default=0, choices=range(0, 256), metavar="[0-255]", help=help)
help = "Skip tile if nodata pixel ratio > threshold. [default: 100]"
out.add_argument("--nodata_threshold", type=int, default=100, choices=range(0, 101), metavar="[0-100]", help=help)
out.add_argument("--keep_borders", action="store_true", help="keep tiles even if borders are empty (nodata)")
out.add_argument("--format", type=str, help="file format to save images in (e.g jpeg)")
out.add_argument("--out", type=str, required=True, help="output directory path [required]")
lab = parser.add_argument_group("Labels")
lab.add_argument("--label", action="store_true", help="if set, generate label tiles")
lab.add_argument("--config", type=str, help="path to config file [required with --label, if no global config setting]")
perf = parser.add_argument_group("Performances")
perf.add_argument("--workers", type=int, help="number of workers [default: raster files]")
ui = parser.add_argument_group("Web UI")
ui.add_argument("--web_ui_base_url", type=str, help="alternate Web UI base URL")
ui.add_argument("--web_ui_template", type=str, help="alternate Web UI template path")
ui.add_argument("--no_web_ui", action="store_true", help="desactivate Web UI output")
parser.set_defaults(func=main)
def is_nodata(image, nodata, threshold, keep_borders=False):
if not keep_borders:
if (
np.all(image[0, :, :] == nodata)
or np.all(image[-1, :, :] == nodata)
or np.all(image[:, 0, :] == nodata)
or np.all(image[:, -1, :] == nodata)
):
return True # pixel border is nodata, on all bands
C, W, H = image.shape
return np.sum(image[:, :, :] == nodata) >= C * W * H * (threshold / 100)
def main(args):
assert not (args.label and args.format), "Format option not supported for label, output must be kept as png"
try:
args.bands = list(map(int, args.bands.split(","))) if args.bands else None
except:
raise ValueError("invalid --args.bands value")
if not args.workers:
args.workers = min(os.cpu_count(), len(args.rasters))
if args.label:
config = load_config(args.config)
check_classes(config)
colors = [classe["color"] for classe in config["classes"]]
palette = make_palette(colors)
assert len(args.ts.split(",")) == 2, "--ts expect width,height value (e.g 512,512)"
width, height = list(map(int, args.ts.split(",")))
cover = [tile for tile in tiles_from_csv(os.path.expanduser(args.cover))] if args.cover else None
splits_path = os.path.join(os.path.expanduser(args.out), ".splits")
args.out = os.path.expanduser(args.out)
if os.path.dirname(os.path.expanduser(args.out)):
os.makedirs(args.out, exist_ok=True)
log = Logs(os.path.join(args.out, "log"), out=sys.stderr)
raster = rasterio_open(os.path.expanduser(args.rasters[0]))
args.bands = args.bands if args.bands else raster.indexes
raster.close()
print(
"neo tile {} rasters on bands {}, on CPU with {} workers".format(len(args.rasters), args.bands, args.workers),
file=sys.stderr,
flush=True,
)
skip = []
tiles_map = {}
total = 0
for path in args.rasters:
raster = rasterio_open(os.path.expanduser(path))
assert set(args.bands).issubset(set(raster.indexes)), "Missing bands in raster {}".format(path)
try:
w, s, e, n = transform_bounds(raster.crs, "EPSG:4326", *raster.bounds)
except:
log.log("WARNING: missing or invalid raster projection, SKIPPING: {}".format(path))
skip.append(path)
continue
tiles = [mercantile.Tile(x=x, y=y, z=z) for x, y, z in mercantile.tiles(w, s, e, n, args.zoom)]
tiles = list(set(tiles) & set(cover)) if cover else tiles
total += len(tiles)
for tile in tiles:
tile_key = (str(tile.x), str(tile.y), str(tile.z))
if tile_key not in tiles_map.keys():
tiles_map[tile_key] = []
tiles_map[tile_key].append(path)
raster.close()
assert total, "Nothing left to tile"
if len(args.bands) == 1 or args.label:
ext = "png" if args.format is None else args.format
if len(args.bands) == 3:
ext = "webp" if args.format is None else args.format
if len(args.bands) > 3:
ext = "tiff" if args.format is None else args.format
tiles = []
progress = tqdm(desc="Coverage tiling", total=total, ascii=True, unit="tile")
with futures.ThreadPoolExecutor(args.workers) as executor:
def worker(path):
if path in skip:
return None
raster = rasterio_open(path)
w, s, e, n = transform_bounds(raster.crs, "EPSG:4326", *raster.bounds)
tiles = [mercantile.Tile(x=x, y=y, z=z) for x, y, z in mercantile.tiles(w, s, e, n, args.zoom)]
tiled = []
for tile in tiles:
if cover and tile not in cover:
continue
w, s, e, n = mercantile.xy_bounds(tile)
warp_vrt = WarpedVRT(
raster,
crs="epsg:3857",
resampling=Resampling.bilinear,
add_alpha=False,
transform=from_bounds(w, s, e, n, width, height),
width=width,
height=height,
)
data = warp_vrt.read(
out_shape=(len(args.bands), width, height), indexes=args.bands, window=warp_vrt.window(w, s, e, n)
)
if data.dtype == "uint16": # GeoTiff could be 16 bits
data = np.uint8(data / 256)
elif data.dtype == "uint32": # or 32 bits
data = np.uint8(data / (256 * 256))
image = np.moveaxis(data, 0, 2) # C,H,W -> H,W,C
tile_key = (str(tile.x), str(tile.y), str(tile.z))
if (
not args.label
and len(tiles_map[tile_key]) == 1
and is_nodata(image, args.nodata, args.nodata_threshold, args.keep_borders)
):
progress.update()
continue
if len(tiles_map[tile_key]) > 1:
out = os.path.join(splits_path, str(tiles_map[tile_key].index(path)))
else:
out = args.out
x, y, z = map(int, tile)
if not args.label:
tile_image_to_file(out, mercantile.Tile(x=x, y=y, z=z), image, ext=ext)
if args.label:
tile_label_to_file(out, mercantile.Tile(x=x, y=y, z=z), palette, args.nodata, image)
if len(tiles_map[tile_key]) == 1:
tiled.append(mercantile.Tile(x=x, y=y, z=z))
progress.update()
raster.close()
return tiled
for tiled in executor.map(worker, args.rasters):
if tiled is not None:
tiles.extend(tiled)
total = sum([1 for tile_key in tiles_map.keys() if len(tiles_map[tile_key]) > 1])
progress = tqdm(desc="Aggregate splits", total=total, ascii=True, unit="tile")
with futures.ThreadPoolExecutor(args.workers) as executor:
def worker(tile_key):
if len(tiles_map[tile_key]) == 1:
return
image = np.zeros((width, height, len(args.bands)), np.uint8)
x, y, z = map(int, tile_key)
for i in range(len(tiles_map[tile_key])):
root = os.path.join(splits_path, str(i))
_, path = tile_from_xyz(root, x, y, z)
if not args.label:
split = tile_image_from_file(path)
if args.label:
split = tile_label_from_file(path)
if len(split.shape) == 2:
split = split.reshape((width, height, 1)) # H,W -> H,W,C
assert image.shape == split.shape, "{}, {}".format(image.shape, split.shape)
image[np.where(image == 0)] += split[
|
np.where(image == 0)
|
numpy.where
|
import sm
import numpy as np
import pylab as pl
def plotSpline(spline, splineB = None):
"""Plots a spline over the full range of times.
The orientations are shown as EulerAnglesYawPitchRoll.
Args:
spline (BsplinePose): a pose describing spline
"""
ap = sm.EulerAnglesYawPitchRoll()
t1 = spline.t_min()
t2 = spline.t_max()
times = np.linspace(t1, t2, 1000)
pos = []
ori = []
for t in times:
T = spline.transformation(t)
pos.append(T[:,3])
ori.append(ap.rotationMatrixToParameters(T[:3,:3]))
if (splineB is not None):
posB = []
oriB = []
for t in times:
T = splineB.transformation(t)
posB.append(T[:,3])
oriB.append(ap.rotationMatrixToParameters(T[:3,:3]))
posB = np.array(posB)
oriB =
|
np.array(oriB)
|
numpy.array
|
from qutip import *
import numpy as np
import scipy
from scipy import stats
import itertools
import random
import matplotlib.pyplot as plt
import pickle
from time import time
from plot_settings import *
#Pauli matrices
s = [sigmax(), sigmay(), sigmaz()]
#General qubit state, input as list of Bloch vector components, i.e. r = [rx, ry, rz]
def rho(r):
if np.linalg.norm(r) != 1:
r = np.array(r)/
|
np.linalg.norm(r)
|
numpy.linalg.norm
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def _test_cast_float2int(test_case, device, shape):
np_arr = np.random.randn(*shape).astype(np.float32)
input = flow.Tensor(np_arr, dtype=flow.float32, device=flow.device(device))
output = flow.cast(input, flow.int8)
np_out = np_arr.astype(np.int8)
test_case.assertTrue(np.array_equal(output.numpy(), np_out))
def _test_cast_int2float(test_case, device, shape):
np_arr =
|
np.random.randn(*shape)
|
numpy.random.randn
|
import copy
import numpy as np
from numba import jit
@jit
def _compute_trial_expectation(prior, likelihood, transition):
# Forward-backward algorithm, see Rabiner for implementation details
# http://www.cs.ubc.ca/~murphyk/Bayes/rabiner.pdf
t = 0
T = likelihood.shape[1]
num_states = likelihood.shape[0]
# E-step
# alpha1 is the forward probability of seeing the sequence
alpha1 = np.zeros((len(prior), T))
alpha2 = np.zeros((len(prior), T))
scale = np.zeros((len(prior), T))
scale_a = np.ones((T, 1))
score = np.zeros((T, 1))
alpha1[:, 0] = prior * likelihood[:, 0]
alpha1[:, 0] = alpha1[:, 0] / np.sum(alpha1[:, 0], axis = 0)
scale[:, 0] = alpha1[:, 0]
alpha2[:, 0] = prior
for t in range(1, T):
alpha1[:, t] = np.matmul(transition[:, :, t].T, alpha1[:, t - 1])
scale[:, t] = alpha1[:, t] / np.sum(alpha1[:, t], axis = 0)
alpha1[:, t] = alpha1[:, t] * likelihood[:, t]
# Use this scaling component to try to prevent underflow errors
scale_a[t] = np.sum(alpha1[:, t], axis = 0)
alpha1[:, t] = alpha1[:, t] / scale_a[t]
alpha2[:, t] = np.matmul(transition[:, :, t].T, alpha2[:, t - 1])
alpha2[:, t] = alpha2[:, t] / np.sum(alpha2[:, t], axis = 0)
score[t] = np.sum(alpha2[:, t] * likelihood[:, t], axis = 0)
# beta is the backward probability of seeing the sequence
beta = np.zeros((len(prior), T)) # beta(i, t) = Pr(O(t + 1:T) | X(t) = i)
beta[:, -1] = np.ones(len(prior)) / len(prior)
scale_b = np.ones((T, 1))
for t in range(T - 2, -1, -1):
beta[:, t] = np.matmul(transition[:, :, t + 1], (beta[:, t + 1] * likelihood[:, t + 1]))
scale_b[t] = np.sum(beta[:, t], axis = 0)
beta[:, t] = beta[:, t] / scale_b[t]
# If any of the values are 0, it's defacto an underflow error so set it to eps
alpha1[alpha1 == 0] = np.finfo(float).eps
beta[beta == 0] = np.finfo(float).eps
# gamma is the probability of seeing the sequence, found by combining alpha and beta
gamma = np.exp(np.log(alpha1) +
|
np.log(beta)
|
numpy.log
|
import time
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from matplotlib import cm
from matplotlib.colors import LightSource
ID = 0
R_EXP = 1
R_OBS = 2
P_EXP = 3
P_OBS = 5
class AngularRepresentation:
angle_id_ = 0
position_expected_ = np.array([], dtype=np.double)
position_observed_ = np.array([], dtype=np.double)
range_expected_ = np.array([], dtype=np.double)
range_observed_ = np.array([], dtype=np.double)
def __init__(self, angle_id, range_expected, range_observed, position_expected, position_observed):
self.angle_id_ = angle_id
self.range_expected_ = np.concatenate([self.range_expected_, np.array([range_expected], dtype=np.double)])
self.range_observed_ = np.concatenate([self.range_observed_, np.array([range_observed], dtype=np.double)])
self.position_expected_ = np.array([[position_expected[0]], [position_expected[1]]], dtype=np.double)
self.position_observed_ = np.array([[position_observed[0]], [position_observed[1]]], dtype=np.double)
def add(self, range_expected, range_observed, position_expected, position_observed):
# if hasattr(self, 'range_exp_cutoff'):
# if range_expected > self.range_exp_cutoff:
# return
# if hasattr(self, 'range_obs_cutoff'):
# if range_observed > self.range_obs_cutoff:
# return
assert (np.isnan(range_expected) or np.isnan(range_observed) or np.isnan(position_expected).any() or np.isnan(
position_observed).any()) == False, "read nan values"
self.range_expected_ = np.concatenate([self.range_expected_, np.array([range_expected], dtype=np.double)])
self.position_expected_ = np.concatenate([self.position_expected_, position_expected.reshape(2, 1)], axis=0)
self.range_observed_ = np.concatenate([self.range_observed_, np.array([range_observed], dtype=np.double)])
self.position_observed_ = np.concatenate([self.position_observed_, position_observed.reshape(2, 1)], axis=0)
def set_cutoff(self, range_exp_cutoff=np.finfo(np.double).max, range_obs_cutoff=np.finfo(np.double).max):
self.range_exp_cutoff = min(range_exp_cutoff, range_obs_cutoff)
self.range_obs_cutoff = min(range_exp_cutoff, range_obs_cutoff)
def normalize_observations(self):
'''
Calculates the distance between observations and expected measurements
:return:
'''
if not hasattr(self, 'normalized_observations'):
self.normalized_observations_ = self.range_expected_ - self.range_observed_
def getIdxArray(self):
if not hasattr(self, 'idx_array_'):
self.idx_array_ = np.array([self.angle_id_ for _ in range(self.range_observed_)], dtype=np.double)
return self.idx_array_
def getMaxExp(self):
if not hasattr(self, 'maxval_exp'):
self.maxval_exp = np.max(self.range_expected_)
return self.maxval_exp
def getMaxObs(self):
if not hasattr(self, 'maxval_obs'):
self.maxval_obs = np.max(self.range_observed_)
return self.maxval_obs
def getMinExp(self):
if not hasattr(self, 'minval_exp'):
self.minval_exp = np.min(self.range_expected_)
return self.minval_exp
def getMinObs(self):
if not hasattr(self, 'minval_obs'):
self.minval_obs = np.min(self.range_observed_)
return self.minval_obs
class StatisticsEvaluator:
def __init__(self):
pass
def parse(file, max_lines=100000000):
lines = []
cntr = 0
for l in file.readlines():
if cntr >= max_lines:
return lines
lelems = l.split(", ")
if len(lelems) != 7:
print("WARNING: line {} inconsistent, 7 entries per line expected, therefore aborting parse operation " % (
len(lines) + 1))
return lines
# print('%s, %s, %s, %s, %s' % (lelems[0], lelems[R_EXP], lelems[R_OBS], lelems[P_EXP], lelems[P_OBS]))
lines.append(lelems)
cntr += 1
return lines
def agglomerate(lines):
'''
agglomerates for each angle the respective measurements to perform a statistic on how many measurements
lie within the boundaries of the expected measurements etc...
:param lines: the individual lines read from result.csv
:return: datastructure containing the agglomerated result for further processing
'''
angle2measurements = {}
for line in lines:
angle_id = np.double(line[ID])
range_exp = np.double(line[R_EXP])
range_obs = np.double(line[R_OBS])
position_exp = np.array([np.double(line[P_EXP]), np.double(line[P_EXP + 1])], dtype=np.double)
position_obs = np.array([np.double(line[P_OBS]), np.double(line[P_OBS + 1])], dtype=np.double)
val = angle2measurements.get(angle_id)
if val is None:
angle2measurements[angle_id] = AngularRepresentation(angle_id,
range_exp,
range_obs,
position_exp,
position_obs)
else:
val.add(range_exp,
range_obs,
position_exp,
position_obs)
return angle2measurements
def eval(angle2meas):
total_meas = len(angle2meas.items())
print("eval: ", total_meas)
cntr = 0
for k, v in angle2meas.items():
v.normalize_observations()
print("", cntr, total_meas)
cntr += 1
# print("%d %lf %lf" % (key, table_range[key][0], table_range[key][1]))
def lookup_table(table, x, y):
v = table.get((x, y))
if v is None:
return 0
return 1.0
def hist3d_thrun_dbg(angle2meas, binsize_xy):
fig = plt.figure()
idxs = np.array([], dtype=np.double)
max_exp_val = np.finfo(np.double).min
min_exp_val = np.finfo(np.double).max
for key, val in angle2meas.items():
max_exp_val = max(val.getMaxExp(), max_exp_val)
min_exp_val = min(val.getMinExp(), min_exp_val)
binrange_y = max_exp_val - min_exp_val
binshape_xy = (np.int(np.ceil(binrange_y / binsize_xy[0])), np.int(np.ceil(binrange_y / binsize_xy[1])))
grid = np.zeros(shape=binshape_xy, dtype=np.double)
X, Y = np.meshgrid(np.arange(0, binshape_xy[0], 1), np.arange(0, binshape_xy[1], 1))
print(max_exp_val)
print(min_exp_val)
print(binshape_xy)
for key, val in angle2meas.items():
y_exp_idx = ((val.range_expected_ - min_exp_val) / binsize_xy[1]).astype(np.int)
grid[y_exp_idx, y_exp_idx] += 1
ax = fig.add_subplot(111, projection='3d')
# Construct arrays for the anchor positions of the 16 bars.
# Note: np.meshgrid gives arrays in (ny, nx) so we use 'F' to flatten xpos,
# ypos in column-major order. For numpy >= 1.7, we could instead call meshgrid
# with indexing='ij'.
# Construct arrays with the dimensions for the 16 bars.
# ls = LightSource(270, 45)
# rgb = ls.shade(grid, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')
ax.plot_surface(X, Y, grid, rstride=1, cstride=1, # facecolors=rgb,
linewidth=0, antialiased=False, shade=False)
plt.show(block=True)
def scatter_plot(angle2meas):
fig = plt.figure()
scatter_xrange = np.array([], dtype=np.double)
scatter_yrange = np.array([], dtype=np.double)
for key, val in angle2meas.items():
scatter_xrange = np.concatenate([scatter_xrange, val.range_observed_])
scatter_yrange = np.concatenate([scatter_yrange, val.range_expected_])
axsc = fig.add_subplot(111)
axsc.scatter(scatter_xrange, scatter_yrange, marker='+')
def hist3d_thrun(angle2meas, binsize_xy, normalize_total=False, normalize_rows=True, medianize=True, switch_axes=False,
compute_ratio=True):
max_obs_val = np.finfo(np.double).min
min_obs_val = np.finfo(np.double).max
max_exp_val = np.finfo(np.double).min
min_exp_val = np.finfo(np.double).max
for key, val in angle2meas.items():
max_obs_val = max(val.getMaxObs(), max_obs_val)
min_obs_val = min(val.getMinObs(), min_obs_val)
max_exp_val = max(val.getMaxExp(), max_exp_val)
min_exp_val = min(val.getMinExp(), min_exp_val)
print("minmax exp")
print(min_exp_val)
print(max_exp_val)
print("-----------")
print("minmax obs")
print(min_obs_val)
print(max_obs_val)
print("-----------")
binrange_x = max_obs_val
binrange_y = max_exp_val
binshape_xy = (np.int(np.ceil(binrange_x / binsize_xy[0])), np.int(np.ceil(binrange_y / binsize_xy[1])))
binshape_xy = (max(binshape_xy[0], binshape_xy[1]), max(binshape_xy[0], binshape_xy[1]))
grid = np.zeros(shape=binshape_xy, dtype=np.double)
if not switch_axes:
X, Y = np.meshgrid(np.arange(0, binshape_xy[0], 1),
|
np.arange(0, binshape_xy[1], 1)
|
numpy.arange
|
import numpy as np
from random import sample
from random import seed as _py_seed
from numpy.random import choice, shuffle
from numpy.random import multinomial, normal, uniform
from numpy.random import seed as _np_seed
__all__ = ["choice","sample","seed","shuffle",
"prob_vector","bimodal","categorical"]
""" Probability functions and distributions useful in embedding and testing
Ising models.
"""
def seed(a):
_py_seed(a)
_np_seed(a)
def prob_vector(N):
vec = [normal(0, 1) for i in range(N)]
mag = sum(x**2 for x in vec) ** .5
return [(x/mag)**2 for x in vec]
def bimodal(N, loc1=-1.0,scale1=.25,size1=None,
loc2=+1.0,scale2=.25,size2=None):
if size1 is None:
size1=N//2
if size2 is None:
size2=N-size1
samples1 = normal(loc1,scale1,size1)
samples2 =
|
normal(loc2,scale2,size2)
|
numpy.random.normal
|
#!/usr/bin/env python
#
# Example of one object under gravity with one contactor and a ground
# using the Siconos proposed mechanics API
#
from siconos.mechanics.collision.convexhull import ConvexHull2d
from siconos.mechanics.collision.tools import Contactor
from siconos.io.mechanics_run import MechanicsHdf5Runner
import siconos.numerics as sn
import siconos.kernel as sk
from siconos.mechanics.collision.bullet import SiconosBulletOptions
import numpy
import random
import math
bullet_options = SiconosBulletOptions()
bullet_options.worldScale = 1.0
bullet_options.contactBreakingThreshold = 0.04
bullet_options.dimension = 1
density = 1000.0
def create_grain(io, name, cname, grain_size=0.05, density=1, trans=None,
tob=None):
# Definition of an irregular polyhedron as a convex shape
rd = [math.pi / 2 * random.gauss(0.5, 0.2) for _ in range(16)]
def vert(id1, id2, a, b):
return (a * math.cos(rd[id1]) * math.cos(rd[id2]),
b * math.sin(rd[id1]) * math.cos(rd[id2]))
vertices = [vert(0, 1, 1, 1),
vert(2, 3, 1, -1),
vert(4, 5, -1, 1),
vert(6, 7, -1, -1)]
#print('vertices', vertices)
scale = grain_size / max(
numpy.array(vertices).max(axis=0) - numpy.array(vertices).min(axis=0))
ch2d = ConvexHull2d(vertices)
cm = ch2d.centroid()
# correction of vertices such that 0 is the centroid
vertices = (numpy.array(vertices)[:] - cm[:]) * scale
ch2d = ConvexHull2d(vertices)
cm = ch2d.centroid()
# Definition of a polyhedron as a convex shape
io.add_convex_shape(cname, vertices, insideMargin=0.001 * grain_size)
# computation of inertia and volume
inertia, area = ch2d.inertia(ch2d.centroid())
# print('geometric inertia:', inertia)
# print('volume:', volume)
# print('mass:', volume*density)
# print('inertia:', inertia*density)
io.add_object(name,
[Contactor(cname)],
translation=trans,
#velocity=veloci,
mass=area * density,
time_of_birth=tob,
inertia=inertia * density)
def create_grains(io, n_row=5, n_col=5, x_shift=3.0,
grain_size=0.05, top=0, rate=0.01, density=1,
distribution=('uniform', 0.1)):
N = n_row * n_col
dist, rng = distribution
if dist == 'uniform':
sizes = numpy.random.uniform(low=grain_size - rng / 2,
high=grain_size + rng / 2,
size=N)
elif dist == 'double':
sizes = numpy.hstack(
(numpy.random.normal(scale=rng * 0.2,
loc=grain_size - rng / 2,
size=N / 2),
numpy.random.normal(scale=rng * 0.2,
loc=grain_size + rng / 2,
size=N / 2)))
numpy.random.shuffle(sizes)
# Gives a rock size distribution with two sizes of rocks, with
# the mean between both at grain_size
elif dist == 'exp':
# In a normal distribution, 10-
# and 90-percentiles are loc +/- rng*1.28.
# Let's arrange the 10- and 90-percentiles of the distribution
# to be in the desired range.
sizes = numpy.random.exponential(1, N)
bottom = numpy.percentile(sizes, 10)
top =
|
numpy.percentile(sizes, 90)
|
numpy.percentile
|
import warnings
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.colors as mcolors
import sklearn
# Designed for sklearn version 0.24.1
def zero_one_normalize(xs):
"""Rescales dataset features to [0,1] range
Args:
xs: Pandas DataFrame
"""
return (xs - xs.min())/(xs.max() - xs.min())
def plot_cbar(xs, cmap, fig, ax, label=None, fontsize=None):
normalize = mcolors.Normalize(vmin=xs.min(), vmax=xs.max())
scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=cmap)
scalarmappaple.set_array(xs)
cbar = fig.colorbar(scalarmappaple, ax=ax)
cbar.set_label(label, fontsize=fontsize)
return cbar
def highlight_region_around_x(ax, x_target, xs, spacing=0.5, highlight_color="orange", alpha=0.5):
eps = 1e-8
diffs = xs - x_target
try:
min_pos_diff = np.min(diffs[diffs > eps])
except:
min_pos_diff = 0.0
try:
min_neg_diff = np.max(diffs[diffs < -eps])
except:
min_neg_diff = 0.0
rng_pos = x_target + min_pos_diff*spacing
rng_neg = x_target + min_neg_diff*spacing
ax.axvspan(rng_neg, rng_pos, color=highlight_color, alpha=alpha)
##########################################################################################
# L1/L2 Regularization (logistic) #
##########################################################################################
def check_object(LogisticRegressionCV):
""" Checks that LogisticRegressionCV is fit with l1/l2 penalty
"""
assert LogisticRegressionCV.penalty in ("l1", "l2"), "penalty must be l1 or l2"
sklearn.utils.validation.check_is_fitted(LogisticRegressionCV)
def plot_reg_path_coef(model, marker='o', highlight_c="orange", figsize=None, fontsize=None, ax=None):
"""Plots coefs vs penalization strength.
Parameters
----------
model: sklearn.linear_model.LogisticRegressionCV instance
A fit LogisticRegressionCV with L1/L2 penalty for which plots are made
marker: matplotlib.markers format, default='o'
Marker type used in plots
highlight_c: matplotlib color format or None, default="orange"
If not None, the best penalization strength is highlighted by a bar of
color highlight_c.
figsize: tuple or list of floats or None, default=None
Specifies the figure size for both plots combined.
fontsize: int or None, default=None
Specifies the font size used in labels and titles.
ax: matplotlib.axes or None, default=None
ax object to plot onto. If None, new ax is created.
Returns
-------
ax (if ax!=None): matplotlib.axes
fig, ax (if ax=None): matplotlib.pyplot.figure, matplotlib.axes
"""
check_object(model)
# create figure if absent
return_fig = False
if not ax:
fig, ax = plt.subplots(1,1, figsize=figsize)
return_fig = True
else:
if figsize:
warnings.warn("ax provided, figsize not updated")
ax.plot(np.log10(model.Cs_), model.coefs_paths_[1].mean(axis=0), marker=marker)
ymin, ymax = ax.set_ylim()
if highlight_c:
highlight_region_around_x(ax, x_target=np.log10(model.C_[0]), xs=np.log10(model.Cs_), spacing=0.5, highlight_color=highlight_c, alpha=0.5)
ax.set_xlabel('log(C)', fontsize=fontsize)
ax.set_ylabel('Mean Coefficient', fontsize=fontsize)
ax.axis('tight')
if return_fig:
return fig, ax
else:
return ax
def plot_reg_path_perf(model, marker='o', highlight_c="orange", include_n_coef=False, figsize=None, fontsize=None, ax=None):
"""Plots perf vs penalization strength with num of nonzero coefs if specified.
Parameters
----------
model: sklearn.linear_model.LogisticRegressionCV instance
A fit LogisticRegressionCV with L1/L2 penalty for which plots are made
marker: matplotlib.markers format, default='o'
Marker type used in plots
highlight_c: matplotlib color format or None, default="orange"
If not None, the best penalization strength is highlighted by a bar of
color highlight_c.
include_n_coef: bool, default=False
If true, the second plot also includes the number of nonzero
coefficients vs penalization strength on a second axis on the right.
figsize: tuple or list of floats or None, default=None
Specifies the figure size for both plots combined.
fontsize: int or None, default=None
Specifies the font size used in labels and titles.
ax: matplotlib.axes or None, default=None
ax object to plot onto. If None, new ax is created.
Returns
-------
ax (if ax!=None): matplotlib.axes
fig, ax (if ax=None): matplotlib.pyplot.figure, matplotlib.axes
"""
check_object(model)
# create figure if absent
return_fig = False
if not ax:
fig, ax = plt.subplots(1,1, figsize=figsize)
return_fig = True
else:
if figsize:
warnings.warn("ax provided, figsize not updated")
ax.plot(np.log10(model.Cs_), model.scores_[1].mean(axis=0), label=model.scoring, marker=marker)
ymin, ymax = ax.set_ylim()
if include_n_coef:
ax_coef = ax.twinx()
ax_coef.set_ylabel("n coef", fontsize=fontsize)
ax_coef.plot(np.log10(model.Cs_), (model.coefs_paths_[1] != 0).any(axis=0).sum(axis=1), marker=marker, label="n coef", color="orange")
ax_coef.axis('tight')
# add legend to distinguish series
ax.plot([], [], label="n coef")
ax.legend()
if highlight_c:
highlight_region_around_x(ax, x_target=np.log10(model.C_[0]), xs=np.log10(model.Cs_), spacing=0.5, highlight_color=highlight_c, alpha=0.5)
ax.set_xlabel('log(C)', fontsize=fontsize)
ax.set_ylabel(model.scoring, fontsize=fontsize)
ax.axis('tight')
if return_fig:
return fig, ax
else:
return ax
def plot_reg_path(model, marker='o', highlight_c="orange", include_n_coef=False, figsize=None, fontsize=None):
"""Plots path of an L1/L2 regularized sklearn.linear_model.LogisticRegressionCV.
Produces two adjacent plots.
The first is a plot of mean coefficient values vs penalization strength.
The second is a plot of performance vs penalization strength.
The second plot may include number of nonzero coefs, if specified by parameters.
Parameters
----------
model: sklearn.linear_model.LogisticRegressionCV instance
A fit LogisticRegressionCV with L1/L2 penalty for which plots are made
marker: matplotlib.markers format, default='o'
Marker type used in plots
highlight_c: matplotlib color format or None, default="orange"
If not None, the best penalization strength is highlighted by a bar of
color highlight_c.
include_n_coef: bool, default=False
If true, the second plot also includes the number of nonzero
coefficients vs penalization strength on a second axis on the right.
figsize: tuple or list of floats or None, default=None
Specifies the figure size for both plots combined.
fontsize: int or None, default=None
Specifies the font size used in labels and titles.
Returns
-------
fig, (ax1, ax2): matplotlib.pyplot.figure and matplotlib.axes for plots.
"""
# Validate object
check_object(model)
# Create subplots
fig, (ax1, ax2) = plt.subplots(1,2, figsize=figsize)
# Create title
fig.suptitle('Mean Logistic Regression Path Over Crossval Folds', fontsize=fontsize)
# First plot
plot_reg_path_coef(model, marker=marker, highlight_c=highlight_c, fontsize=fontsize, ax=ax1)
# Second plot
plot_reg_path_perf(model, marker=marker, highlight_c=highlight_c, include_n_coef=include_n_coef, fontsize=fontsize, ax=ax2)
return fig, (ax1, ax2)
##########################################################################################
# Elastic Net (logistic) #
##########################################################################################
def plot_perf_vs_l1ratio(model, marker='o', highlight_c="orange", cmap=cm.viridis, t=None, figsize=None, fontsize=None, ax=None):
"""Plots path of Elastic Net sklearn.linear_model.LogisticRegressionCV.
Performance is plotted vs penalization strength.
Parameters
----------
model: sklearn.linear_model.LogisticRegressionCV instance
A fit LogisticRegressionCV with L1/L2 penalty for which plots are made
marker: matplotlib.markers format, default='o'
Marker type used in plots
highlight_c: matplotlib color format or None, default="orange"
If not None, the best penalization strength is highlighted by a bar of
color highlight_c.
cmap: matplotlib colormap, default=matplotlib.cm.viridis
Color map for series colors associated with penalization strength
(first plot)
t: int or None, default=None
Defines lowest plotted performance by linear interpolation between
lowest and highest performance.
t=None => Lowest plotted performance is worst performance above 0.5
t=0 => Lowest plotted performance is the worst performance
t=0.5 => Lowest plotted performance is halfway between worst and best
t=1.0 => Lowest plotted performance is best performance
figsize: tuple or list of floats or None, default=None
Specifies the figure size for both plots combined.
fontsize: int or None, default=None
Specifies the font size used in labels and titles.
Returns
-------
ax (if ax!=None): matplotlib.axes
fig, ax (if ax=None): matplotlib.pyplot.figure, matplotlib.axes
"""
# create figure if absent
return_fig = False
if not ax:
fig, ax = plt.subplots(1,1, figsize=figsize)
return_fig = True
else:
if figsize:
warnings.warn("ax provided, figsize not updated")
# calculate mean crossval perfs
mean_scores = model.scores_[1].mean(axis=0)
max_score = mean_scores.max()
if t:
min_score = mean_scores.min()
min_score = (1-t)*min_score + t*max_score
ax.set_ylim([min_score, max_score])
else:
min_score = model.scores_[1].flatten()[model.scores_[1].flatten() > 0.5].min()
ax.set_ylim([min_score, max_score])
log10Cs = np.log10(model.Cs_)
colors = zero_one_normalize(log10Cs)
for col, c, series in zip(colors, log10Cs, mean_scores):
ax.plot(model.l1_ratios, series, marker=marker, label="{0:.4f}".format(c), color=cmap(col))
if highlight_c:
highlight_region_around_x(ax, x_target=model.l1_ratio_[0], xs=model.l1_ratios, spacing=0.5, highlight_color=highlight_c, alpha=0.5)
ax.set_xlabel("l1 ratio", fontsize=fontsize)
ax.set_ylabel(model.scoring, fontsize=fontsize)
if return_fig:
plot_cbar(log10Cs, cmap=cmap, fig=fig, ax=ax, label="log(C)", fontsize=fontsize)
return fig, ax
else:
return ax
def plot_perf_vs_c(model, marker='o', highlight_c="orange", cmap=cm.viridis, t=None, figsize=None, fontsize=None, ax=None):
"""Plots path of Elastic Net sklearn.linear_model.LogisticRegressionCV.
Performance is plotted vs l1 ratio.
Parameters
----------
model: sklearn.linear_model.LogisticRegressionCV instance
A fit LogisticRegressionCV with L1/L2 penalty for which plots are made
marker: matplotlib.markers format, default='o'
Marker type used in plots
highlight_c: matplotlib color format or None, default="orange"
If not None, the best penalization strength is highlighted by a bar of
color highlight_c.
cmap: matplotlib colormap, default=matplotlib.cm.viridis
Color map for series colors associated with penalization strength
(first plot)
t: int or None, default=None
Defines lowest plotted performance by linear interpolation between
lowest and highest performance.
t=None => Lowest plotted performance is worst performance without 0.5
t=0 => Lowest plotted performance is the worst performance
t=0.5 => Lowest plotted performance is halfway between worst and best
t=1.0 => Lowest plotted performance is best performance
figsize: tuple or list of floats or None, default=None
Specifies the figure size for both plots combined.
fontsize: int or None, default=None
Specifies the font size used in labels and titles.
Returns
-------
ax (if ax!=None): matplotlib.axes
fig, ax (if ax=None): matplotlib.pyplot.figure, matplotlib.axes
"""
# create figure if absent
return_fig = False
if not ax:
fig, ax = plt.subplots(1,1, figsize=figsize)
return_fig = True
else:
if figsize:
warnings.warn("ax provided, figsize not updated")
# calculate mean crossval perfs
mean_scores = model.scores_[1].mean(axis=0)
max_score = mean_scores.max()
if t:
min_score = mean_scores.min()
min_score = (1-t)*min_score + t*max_score
ax.set_ylim([min_score, max_score])
else:
min_score = model.scores_[1].flatten()[model.scores_[1].flatten() > 0.5].min()
ax.set_ylim([min_score, max_score])
colors = zero_one_normalize(model.l1_ratios)
for col, l1ratio, series in zip(colors, model.l1_ratios, mean_scores.T):
ax.plot(
|
np.log10(model.Cs_)
|
numpy.log10
|
"""
Copyright: <NAME>, 2013
Code adapted from the Mark Paskin Matlab version
from http://openslam.informatik.uni-freiburg.de/data/svn/tjtf/trunk/matlab/ralign.m
modified by <NAME>
"""
import numpy as np
def estimate_SIM3_umeyama(X,Y):
"""[summary]
estimates rigid transform from X(source) to Y(target)
Args:
X ([type]): Nx3 array
Y ([type]): Nx3 array
Returns:
[type]: [description]
"""
n= X.shape[0]
m = 3
mx = X.mean(0, keepdims=True)
my = Y.mean(0, keepdims=True)
Xc = X - mx#np.tile(mx, (n, 1)).T
Yc = Y - my#np.tile(my, (n, 1)).T
sx = np.mean(np.sum(Xc*Xc, axis=1))
sy = np.mean(np.sum(Yc*Yc, axis=1))
Sxy = (Xc.T @ Yc) / n
U,D,VT = np.linalg.svd(Sxy,full_matrices=True,compute_uv=True)
r = Sxy.ndim #np.rank(Sxy)
d =
|
np.linalg.det(Sxy)
|
numpy.linalg.det
|
#!/usr/bin/env python
# Part of the psychopy_ext library
# Copyright 2010-2015 <NAME>
# The program is distributed under the terms of the GNU General Public License,
# either version 3 of the License, or (at your option) any later version.
"""
A library of simple models of vision
Simple usage::
import glob
from psychopy_ext import models
ims = glob.glob('Example_set/*.jpg') # get all jpg images
hmax = models.HMAX()
# if you want to see how similar your images are to each other
hmax.compare(ims)
# or to simply get the output and use it further
out = hmax.run(ims)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from __future__ import unicode_literals
import sys, os, glob, itertools, warnings, inspect, argparse, imp
import tempfile, shutil
import pickle
from collections import OrderedDict
import numpy as np
import scipy.ndimage
import pandas
import seaborn as sns
import matlab_wrapper
import sklearn.manifold
import sklearn.preprocessing, sklearn.metrics, sklearn.cluster
import skimage.feature, skimage.data
from psychopy_ext import stats, plot, report, utils
try:
imp.find_module('caffe')
HAS_CAFFE = True
except:
try:
os.environ['CAFFE']
# put Python bindings in the path
sys.path.insert(0, os.path.join(os.environ['CAFFE'], 'python'))
HAS_CAFFE = True
except:
HAS_CAFFE = False
if HAS_CAFFE:
# Suppress GLOG output for python bindings
GLOG_minloglevel = os.environ.pop('GLOG_minloglevel', None)
os.environ['GLOG_minloglevel'] = '5'
import caffe
from caffe.proto import caffe_pb2
from google.protobuf import text_format
HAS_CAFFE = True
# Turn GLOG output back on for subprocess calls
if GLOG_minloglevel is None:
del os.environ['GLOG_minloglevel']
else:
os.environ['GLOG_minloglevel'] = GLOG_minloglevel
class Model(object):
def __init__(self, model, labels=None, verbose=True, *args, **kwargs):
self.name = ALIASES[model]
self.nice_name = NICE_NAMES[model]
self.safename = self.name
self.labels = labels
self.args = args
self.kwargs = kwargs
self.verbose = verbose
def download_model(self, path=None):
"""Downloads and extracts a model
:Kwargs:
path (str, default: '')
Where model should be extracted
"""
self._setup()
if self.model.model_url is None:
print('Model {} is already available'.format(self.nice_name))
elif self.model.model_url == 'manual':
print('WARNING: Unfortunately, you need to download {} manually. '
'Follow the instructions in the documentation.'.format(self.nice_name))
else:
print('Downloading and extracting {}...'.format(self.nice_name))
if path is None:
path = os.getcwd()
text = raw_input('Where do you want the model to be extracted? '
'(default: {})\n'.format(path))
if text != '': path = text
outpath, _ = utils.extract_archive(self.model.model_url,
folder_name=self.safename, path=path)
if self.name == 'phog':
with open(os.path.join(outpath, 'anna_phog.m')) as f:
text = f.read()
with open(os.path.join(outpath, 'anna_phog.m'), 'wb') as f:
s = 'dlmwrite(s,p);'
f.write(text.replace(s, '% ' + s, 1))
print('Model {} is available here: {}'.format(self.nice_name, outpath))
print('If you want to use this model, either give this path when '
'calling the model or add it to your path '
'using {} as the environment variable.'.format(self.safename.upper()))
def _setup(self):
if not hasattr(self, 'model'):
if self.name in CAFFE_MODELS:
self.model = CAFFE_MODELS[self.name](model=self.name, *self.args, **self.kwargs)
else:
self.model = KNOWN_MODELS[self.name](*self.args, **self.kwargs)
self.model.labels = self.labels
self.isflat = self.model.isflat
self.model.verbose = self.verbose
def run(self, *args, **kwargs):
self._setup()
return self.model.run(*args, **kwargs)
def train(self, *args, **kwargs):
self._setup()
return self.model.train(*args, **kwargs)
def test(self, *args, **kwargs):
self._setup()
return self.model.test(*args, **kwargs)
def predict(self, *args, **kwargs):
self._setup()
return self.model.predict(*args, **kwargs)
def gen_report(self, *args, **kwargs):
self._setup()
return self.model.gen_report(*args, **kwargs)
class _Model(object):
def __init__(self, labels=None):
self.name = 'Model'
self.safename = 'model'
self.isflat = False
self.labels = labels
self.model_url = None
def gen_report(self, test_ims, train_ims=None, html=None):
print('input images:', test_ims)
print('processing:', end=' ')
if html is None:
html = report.Report(path=reppath)
html.open()
close_html = True
else:
close_html = False
resps = self.run(test_ims=test_ims, train_ims=train_ims)
html.writeh('Dissimilarity', h=1)
dis = dissimilarity(resps)
plot_data(dis, kind='dis')
html.writeimg('dis', caption='Dissimilarity across stimuli'
'(blue: similar, red: dissimilar)')
html.writeh('MDS', h=1)
mds_res = mds(dis)
plot_data(mds_res, kind='mds', icons=test_ims)
html.writeimg('mds', caption='Multidimensional scaling')
if self.labels is not None:
html.writeh('Linear separability', h=1)
lin = linear_clf(dis, y)
plot_data(lin, kind='linear_clf', chance=1./len(np.unique(self.labels)))
html.writeimg('lin', caption='Linear separability')
if close_html:
html.close()
def run(self, test_ims, train_ims=None, layers='output', return_dict=True):
"""
This is the main function to run the model.
:Args:
test_ims (str, list, tuple, np.ndarray)
Test images
:Kwargs:
- train_ims (str, list, tuple, np.ndarray)
Training images
- layers ('all'; 'output', 'top', None; str, int;
list of str or int; default: None)
Which layers to record and return. 'output', 'top' and None
return the output layer.
- return_dict (bool, default: True`)
Whether a dictionary should be returned. If False, only the last
layer is returned as an np.ndarray.
"""
if train_ims is not None:
self.train(train_ims)
output = self.test(test_ims, layers=layers, return_dict=return_dict)
return output
def train(self, train_ims):
"""
A placeholder for a function for training a model.
If the model is not trainable, then it will default to this function
here that does nothing.
"""
self.train_ims = im2iter(train_ims)
def test(self, test_ims, layers='output', return_dict=True):
"""
A placeholder for a function for testing a model.
:Args:
test_ims (str, list, tuple, np.ndarray)
Test images
:Kwargs:
- layers ('all'; 'output', 'top', None; str, int;
list of str or int; default: 'output')
Which layers to record and return. 'output', 'top' and None
return the output layer.
- return_dict (bool, default: True`)
Whether a dictionary should be returned. If False, only the last
layer is returned as an np.ndarray.
"""
self.layers = layers
# self.test_ims = im2iter(test_ims)
def predict(self, ims, topn=5):
"""
A placeholder for a function for predicting a label.
"""
pass
def _setup_layers(self, layers, model_keys):
if self.safename in CAFFE_MODELS:
filt_layers = self._filter_layers()
else:
filt_layers = model_keys
if layers in [None, 'top', 'output']:
self.layers = [filt_layers[-1]]
elif layers == 'all':
self.layers = filt_layers
elif isinstance(layers, (str, unicode)):
self.layers = [layers]
elif isinstance(layers, int):
self.layers = [filt_layers[layers]]
elif isinstance(layers, (list, tuple, np.ndarray)):
if isinstance(layers[0], int):
self.layers = [filt_layers[layer] for layer in layers]
elif isinstance(layers[0], (str, unicode)):
self.layers = layers
else:
raise ValueError('Layers can only be: None, "all", int or str, '
'list of int or str, got', layers)
else:
raise ValueError('Layers can only be: None, "all", int or str, '
'list of int or str, got', layers)
def _fmt_output(self, output, layers, return_dict=True):
self._setup_layers(layers, output.keys())
outputs = [output[layer] for layer in self.layers]
if not return_dict:
output = output[self.layers[-1]]
return output
def _im2iter(self, ims):
"""
Converts input into in iterable.
This is used to take arbitrary input value for images and convert them to
an iterable. If a string is passed, a list is returned with a single string
in it. If a list or an array of anything is passed, nothing is done.
Otherwise, if the input object does not have `len`, an Exception is thrown.
"""
if isinstance(ims, (str, unicode)):
out = [ims]
else:
try:
len(ims)
except:
raise ValueError('input image data type not recognized')
else:
try:
ndim = ims.ndim
except:
out = ims
else:
if ndim == 1: out = ims.tolist()
elif self.isflat:
if ndim == 2: out = [ims]
elif ndim == 3: out = ims
else:
raise ValueError('images must be 2D or 3D, got %d '
'dimensions instead' % ndim)
else:
if ndim == 3: out = [ims]
elif ndim == 4: out = ims
else:
raise ValueError('images must be 3D or 4D, got %d '
'dimensions instead' % ndim)
return out
def load_image(self, *args, **kwargs):
return utils.load_image(*args, **kwargs)
def dissimilarity(self, resps, kind='mean_euclidean', **kwargs):
return dissimilarity(resps, kind=kind, **kwargs)
def mds(self, dis, ims=None, ax=None, seed=None, kind='metric'):
return mds(dis, ims=ims, ax=ax, seed=seed, kind=kind)
def cluster(self, *args, **kwargs):
return cluster(*args, **kwargs)
def linear_clf(self, resps, y, clf=None):
return linear_clf(resps, y, clf=clf)
def plot_data(data, kind=None, **kwargs):
if kind in ['dis', 'dissimilarity']:
if isinstance(data, dict): data = data.values()[0]
g = sns.heatmap(data, **kwargs)
elif kind == 'mds':
g = plot.mdsplot(data, **kwargs)
elif kind in ['clust', 'cluster']:
g = sns.factorplot('layer', 'dissimilarity', data=df, kind='point')
elif kind in ['lin', 'linear_clf']:
g = sns.factorplot('layer', 'accuracy', data=df, kind='point')
if chance in kwargs:
ax.axhline(kwargs['chance'], ls='--', c='.2')
else:
try:
sns.factorplot(x='layers', y=data.columns[-1], data=data)
except:
raise ValueError('Plot kind "{}" not recognized.'.format(kind))
return g
def dissimilarity(resps, kind='mean_euclidean', **kwargs):
"""
Computes dissimilarity between all rows in a matrix.
:Args:
resps (numpy.array)
A NxM array of model responses. Each row contains an
output vector of length M from a model, and distances
are computed between each pair of rows.
:Kwargs:
- kind (str or callable, default: 'mean_euclidean')
Distance metric. Accepts string values or callables recognized
by :func:`~sklearn.metrics.pairwise.pairwise_distances`, and
also 'mean_euclidean' that normalizes
Euclidean distance by the number of features (that is,
divided by M), as used, e.g., by Grill-Spector et al.
(1999), Op de Beeck et al. (2001), Panis et al. (2011).
.. note:: Up to version 0.6, 'mean_euclidean' was called
'euclidean', and 'cosine' was called 'gaborjet'. Also note
that 'correlation' used to be called 'corr' and is now
returning dissimilarities in the range [0,2] per
scikit-learn convention.
- \*\*kwargs
Keyword arguments for
:func:`~sklearn.metric.pairwise.pairwise_distances`
:Returns:
A square NxN matrix, typically symmetric unless otherwise
defined by the metric, and with NaN's in the diagonal.
"""
if kind == 'mean_euclidean':
dis_func = lambda x: sklearn.metrics.pairwise.pairwise_distances(x, metric='euclidean', **kwargs) / np.sqrt(x.shape[1])
else:
dis_func = lambda x: sklearn.metrics.pairwise.pairwise_distances(x, metric=kind, **kwargs)
if isinstance(resps, (dict, OrderedDict)):
dis = OrderedDict()
for layer, resp in resps.items():
dis[layer] = dis_func(resp)
diag = np.diag_indices(dis[layer].shape[0])
dis[layer][diag] = np.nan
else:
dis = dis_func(resps)
dis[np.diag_indices(dis.shape[0])] = np.nan
return dis
def mds(dis, ims=None, kind='metric', seed=None):
"""
Multidimensional scaling
:Args:
dis
Dissimilarity matrix
:Kwargs:
- ims
Image paths
- seed
A seed if you need to reproduce MDS results
- kind ({'classical', 'metric'}, default: 'metric')
'Classical' is based on MATLAB's cmdscale, 'metric' uses
:func:`~sklearn.manifold.MDS`.
"""
df = []
if ims is None:
if isinstance(dis, dict):
ims = map(str, range(len(dis.values()[0])))
else:
ims = map(str, range(len(dis)))
for layer_name, this_dis in dis.items():
if kind == 'classical':
vals = stats.classical_mds(this_dis)
else:
mds_model = sklearn.manifold.MDS(n_components=2,
dissimilarity='precomputed', random_state=seed)
this_dis[np.isnan(this_dis)] = 0
vals = mds_model.fit_transform(this_dis)
for im, (x,y) in zip(ims, vals):
imname = os.path.splitext(os.path.basename(im))[0]
df.append([layer_name, imname, x, y])
df = pandas.DataFrame(df, columns=['layer', 'im', 'x', 'y'])
# df = stats.factorize(df)
# if self.layers != 'all':
# if not isinstance(self.layers, (tuple, list)):
# self.layers = [self.layers]
# df = df[df.layer.isin(self.layers)]
# plot.mdsplot(df, ax=ax, icons=icons, zoom=zoom)
return df
def cluster(resps, labels, metric=None, clust=None,
bootstrap=True, stratified=False, niter=1000, ci=95, *func_args, **func_kwargs):
if metric is None:
metric = sklearn.metrics.adjusted_rand_score
struct = labels if stratified else None
n_clust = len(np.unique(labels))
if clust is None:
clust = sklearn.cluster.AgglomerativeClustering(n_clusters=n_clust, linkage='ward')
df = []
def mt(data, labels):
labels_pred = clust.fit_predict(data)
qual = metric(labels, labels_pred)
return qual
print('clustering...', end=' ')
for layer, data in resps.items():
labels_pred = clust.fit_predict(data)
qualo = metric(labels, labels_pred)
if bootstrap:
pct = stats.bootstrap_resample(data1=data, data2=labels,
niter=niter, func=mt, struct=struct, ci=None,
*func_args, **func_kwargs)
for i, p in enumerate(pct):
df.append([layer, qualo, i, p])
else:
pct = [np.nan, np.nan]
df.append([layer, qualo, 0, np.nan])
df = pandas.DataFrame(df, columns=['layer', 'iter', 'bootstrap',
'dissimilarity'])
# df = stats.factorize(df)
return df
def linear_clf(resps, y, clf=None):
if clf is None: clf = sklearn.svm.LinearSVC
df = []
n_folds = len(y) / len(np.unique(y))
for layer, resp in resps.items():
# normalize to 0 mean and variance 1 for each feature (column-wise)
resp = sklearn.preprocessing.StandardScaler().fit_transform(resp)
cv = sklearn.cross_validation.StratifiedKFold(y,
n_folds=n_folds, shuffle=True)
# from scikit-learn docs:
# need not match cross_val_scores precisely!!!
preds = sklearn.cross_validation.cross_val_predict(clf(),
resp, y, cv=cv)
for yi, pred in zip(y, preds):
df.append([layer, yi, pred, yi==pred])
df = pandas.DataFrame(df, columns=['layer', 'actual', 'predicted', 'accuracy'])
# df = stats.factorize(df)
return df
class Pixelwise(_Model):
def __init__(self):
"""
Pixelwise model
The most simple model of them all. Uses pixel values only.
"""
super(Pixelwise, self).__init__()
self.name = 'Pixelwise'
self.safename = 'px'
def test(self, test_ims, layers='output', return_dict=False):
self.layers = [self.safename]
ims = self._im2iter(test_ims)
resps = np.vstack([self.load_image(im).ravel() for im in ims])
resps = self._fmt_output(OrderedDict([(self.safename, resps)]), layers,
return_dict=return_dict)
return resps
class Retinex(_Model):
def __init__(self):
"""
Retinex algorithm
Based on A. Torralba's implementation presented at PAVIS 2014.
.. warning:: Experimental
"""
super(Retinex, self).__init__()
self.name = 'Retinex'
self.safename = 'retinex'
def gen(self, im, thres=20./256, plot=True, save=False):
im = self.load_image(im)
# 2D derivative
der = np.array([[0, 0, 0], [-1, 1, 0], [0, 0, 0]])
im_paint = np.zeros(im.shape)
im_illum = np.zeros(im.shape)
for chno in range(3):
ch = im[:,:,chno]
outv = scipy.ndimage.convolve(ch, der)
outh = scipy.ndimage.convolve(ch, der.T)
out = np.dstack([outv, outh])
# threshold
paint = np.copy(out)
paint[np.abs(paint) < thres] = 0
illum = np.copy(out)
illum[np.abs(illum) >= thres] = 0
# plt.imshow(paint[:,:,0]); plt.show()
# plt.imshow(paint[:,:,1]); plt.show()
# plt.imshow(illum[:,:,0]); plt.show()
# plt.imshow(illum[:,:,1]); plt.show()
# Pseudo-inverse (using the trick from Weiss, ICCV 2001; equations 5-7)
im_paint[:,:,chno] = self._deconvolve(paint, der)
im_illum[:,:,chno] = self._deconvolve(illum, der)
im_paint = (im_paint - np.min(im_paint)) / (np.max(im_paint) - np.min(im_paint))
im_illum = (im_illum - np.min(im_illum)) / (np.max(im_illum) - np.min(im_illum))
# paintm = scipy.misc.imread('paint2.jpg')
# illumm = scipy.misc.imread('illum2.jpg')
# print np.sum((im_paint-paintm)**2)
# print np.sum((im_illum-illumm)**2)
if plot:
sns.plt.subplot(131)
sns.plt.imshow(im)
sns.plt.subplot(132)
sns.plt.imshow(im_paint)
sns.plt.subplot(133)
sns.plt.imshow(im_illum)
sns.plt.show()
if save:
name, ext = imname.splitext()
scipy.misc.imsave('%s_paint.%s' %(name, ext), im_paint)
scipy.misc.imsave('%s_illum.%s' %(name, ext), im_illum)
def _deconvolve(self, out, der):
# der = np.dstack([der, der.T])
d = []
gi = []
for i, deri in enumerate([der, der.T]):
d.append(scipy.ndimage.convolve(out[...,i], np.flipud(np.fliplr(deri))))
gi.append(scipy.ndimage.convolve(deri, np.flipud(np.fliplr(deri)), mode='constant'))
d = np.sum(d, axis=0)
gi = np.sum(gi, axis=0)
gi = np.pad(gi, (der.shape[0]/2, der.shape[1]/2), mode='constant')
gi = scipy.ndimage.convolve(gi, np.array([[1,0,0], [0,0,0], [0,0,0]]))
mxsize = np.max(out.shape[:2])
g = np.fft.fft2(gi, s=(mxsize*2, mxsize*2))
g[g==0] = 1
h = 1/g
h[g==0] = 0
tr = h * np.fft.fft2(d, s=(mxsize*2,mxsize*2))
ii = np.fft.fftshift(np.real(np.fft.ifft2(tr)))
n = (gi.shape[0] - 5) / 2
im = ii[mxsize - n : mxsize + out.shape[0] - n,
mxsize - n : mxsize + out.shape[1] - n]
return im
class Zoccolan(_Model):
"""
Based on 10.1073/pnas.0811583106
.. warning:: Not implemented fully
"""
def __init__(self):
super(Zoccolan, self).__init__()
self.name = 'Zoccolan'
self.safename = 'zoccolan'
# receptive field sizes in degrees
#self.rfs = np.array([.6,.8,1.])
#self.rfs = np.array([.2,.35,.5])
self.rfs = [10, 20, 30] # deg visual angle
self.oris = np.linspace(0, np.pi, 12)
self.phases = [0, np.pi]
self.sfs = range(1, 11) # cycles per RF size
self.winsize = [5, 5] # size of each patch on the grid
# window size will be fixed in pixels and we'll adjust degrees accordingly
# self.win_size_px = 300
def get_gabors(self, rf):
lams = float(rf[0])/self.sfs # lambda = 1./sf #1./np.array([.1,.25,.4])
sigma = rf[0]/2./np.pi
# rf = [100,100]
gabors = np.zeros(( len(oris),len(phases),len(lams), rf[0], rf[1] ))
i = np.arange(-rf[0]/2+1,rf[0]/2+1)
#print i
j = np.arange(-rf[1]/2+1,rf[1]/2+1)
ii,jj = np.meshgrid(i,j)
for o, theta in enumerate(self.oris):
x = ii*np.cos(theta) + jj*np.sin(theta)
y = -ii*np.sin(theta) + jj*np.cos(theta)
for p, phase in enumerate(self.phases):
for s, lam in enumerate(lams):
fxx = np.cos(2*np.pi*x/lam + phase) * np.exp(-(x**2+y**2)/(2*sigma**2))
fxx -= np.mean(fxx)
fxx /= np.linalg.norm(fxx)
#if p==0:
#plt.subplot(len(oris),len(lams),count+1)
#plt.imshow(fxx,cmap=mpl.cm.gray,interpolation='bicubic')
#count+=1
gabors[o,p,s,:,:] = fxx
plt.show()
return gabors
def run(self, ims):
ims = self.input2array(ims)
output = [self.test(im) for im in ims]
def test(self, im):
field = im.shape
num_tiles = (15,15)#[field[0]/10.,field[0]/10.]
size = (field[0]/num_tiles[0], field[0]/num_tiles[0])
V1 = []#np.zeros( gabors.shape + num_tiles )
# tiled_im = im.reshape((num_tiles[0],size[0],num_tiles[1],size[1]))
# tiled_im = np.rollaxis(tiled_im, 1, start=3)
# flat_im = im.reshape((num_tiles[0],num_tiles[1],-1))
for r, rf in enumerate(self.rfs):
def apply_filter(window, this_filter):
this_resp = np.dot(this_filter,window)/np.linalg.norm(this_filter)
# import pdb; pdb.set_trace()
return np.max((0,this_resp)) # returns at least zero
def filter_bank(this_filter,rf):
#print 'done0'
resp = scipy.ndimage.filters.generic_filter(
im, apply_filter, size=rf,mode='nearest',
extra_arguments = (this_filter,))
# import pdb; pdb.set_trace()
#print 'done1'
ii,jj = np.meshgrid(np.arange(0,field[0],size[0]),
np.arange(0,field[1],size[1]) )
selresp = resp[jj,ii]
# maxresp = scipy.ndimage.filters.maximum_filter(
# resp,
# size = size,
# mode = 'nearest'
# )
return np.ravel(selresp)
gabors = self.get_gabors(rf)
#import pdb; pdb.set_trace()
gabors = gabors.reshape(gabors.shape[:3]+(-1,))
# gabors_norms = np.apply_along_axis(np.linalg.norm, -1, gabors)
# import pdb; pdb.set_trace()
# V1.append( np.apply_along_axis(filter_bank, -1, gabors,rf) )
V1resp = np.zeros(gabors.shape[:-1]+num_tiles)
# import pdb; pdb.set_trace()
for i,wi in enumerate(np.arange(0,field[0]-rf[0],size[0])):
for j,wj in enumerate(np.arange(0,field[1]-rf[1],size[1])):
window = im[wi:wi+rf[0],wj:wj+rf[1]]
resp = np.inner(gabors,np.ravel(window))
resp[resp<0] = 0
V1resp[:,:,:,i,j] = resp #/gabors_norms
# print 'done'
V1.append(V1resp)
return [V1]
class GaborJet(_Model):
def __init__(self, nscales=5, noris=8, imsize=256, grid_size=0):
"""
Python implementation of the Gabor-Jet model from Biederman lab.
A given image is transformed with a
Gabor wavelet and certain values on a grid are chosen for the output.
Further details are in `Xu et al., 2009
<http://dx.doi.org/10.1016/j.visres.2009.08.021>`_.
Original implementation copyright 2004 '<NAME>
<http://geon.usc.edu/GWTgrid_simple.m>`_.
:Kwargs:
- nscales (int, default: 5)
Spatial frequency scales
- noris (int, default: 8)
Orientation spacing; angle = np.pi/noris
- imsize ({128, 256}, default: 256)
The image can only be 128x128 px or 256x256 px size.
If the image has a different size, it will be rescaled
**without** maintaining the original aspect ratio.
- grid_size (int, default: 0)
How many positions within an image to take:
- 0: grid of 10x10
- 1: grid of 12x12
- else: grid of imsize x imsize
"""
super(GaborJet, self).__init__()
self.name = 'GaborJet'
self.safename = 'gaborjet'
self.isflat = True
self.nscales = nscales
self.noris = noris
self.imsize = imsize
# generate the grid
if grid_size == 0:
s = imsize/128.
rangeXY = np.arange(20*s, 110*s+1, 10*s) - 1 # 10x10
elif grid_size == 1:
s = imsize/128.
rangeXY = np.arange(10*s, 120*s+1, 10*s) - 1 # 12x12
else:
rangeXY = np.arange(imsize) # 128x128 or 256x256
self.rangeXY = rangeXY.astype(int)
[xx,yy] = np.meshgrid(rangeXY,rangeXY)
self.grid = xx + 1j*yy
self.grid = self.grid.T.ravel() # transpose just to match MatLab's grid(:) behavior
self.grid_pos = np.hstack([self.grid.imag, self.grid.real]).T
def test(self,
test_ims,
cell_type='complex',
sigma=2*np.pi,
layers='magnitudes',
return_dict=False
):
"""
Apply GaborJet to given images.
:Args:
test_ims: str or list of str
Image(s) to process with the model.
:Kwargs:
- cell_type (str, default: 'complex')
Choose between 'complex'(40 output values) and 'simple' (80
values)
- sigma (float, default: 2*np.pi)
Control the size of gaussian envelope
- layers ({'all', 'phases', 'magnitudes'}, default: 'magnitudes')
Not truly layers, but two output possibilities: either Fourier
magnitudes or phases.
- return_dict (bool, default: True)
Whether only magnitude should be returned. If True, then also
phase and grid positions are returned in a dict.
:Returns:
Magnitude and, depending on 'return_dict', phase.
"""
mags = []
phases = []
imlist = self._im2iter(test_ims)
for imno, im in enumerate(imlist):
sys.stdout.write("\rRunning %s... %d%%" % (self.name,
100*imno/len(imlist)))
sys.stdout.flush()
im = self.load_image(im, resize=(self.imsize, self.imsize), flatten=True)
mag, phase = self._test(im, cell_type=cell_type, sigma=sigma)
mags.append(mag.ravel())
phases.append(phase.ravel())
sys.stdout.write("\rRunning %s... done\n" % self.name)
output = OrderedDict([('phases', np.array(phases)),
('magnitudes', np.array(mags))])
output = self._fmt_output(output, layers, return_dict=return_dict)
return output
def _test(self, im, cell_type='complex', sigma=2*np.pi):
# FFT of the image
im_freq = np.fft.fft2(im)
# setup the paramers
kx_factor = 2 * np.pi / self.imsize
ky_factor = 2 * np.pi / self.imsize
# setup space coordinates
xy = np.arange(-self.imsize/2, self.imsize/2).astype(float)
[tx,ty] = np.meshgrid(xy, xy)
tx *= kx_factor
ty *= -ky_factor
# initiallize useful variables
nvars = self.nscales * self.noris
if cell_type == 'complex':
mag = np.zeros((len(self.grid), nvars))
phase = np.zeros((len(self.grid), nvars))
else:
mag = np.zeros((len(self.grid), 2*nvars))
phase = np.zeros((len(self.grid), nvars))
for scale in range(self.nscales):
k0 = np.pi/2 * (1/np.sqrt(2))**scale
for ori in range(self.noris):
ka = np.pi * ori / self.noris
k0x = k0 * np.cos(ka)
k0y = k0 * np.sin(ka)
# generate a kernel specified scale and orientation, which has DC on the center
# this is a FFT of a Morlet wavelet (http://en.wikipedia.org/wiki/Morlet_wavelet)
freq_kernel = 2*np.pi * (
|
np.exp( -(sigma/k0)**2/2 * ((k0x-tx)**2 + (k0y-ty)**2) )
|
numpy.exp
|
# Data Parallel Control (dpctl)
#
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import numbers
import numpy as np
import pytest
import dpctl
import dpctl.memory as dpm
import dpctl.tensor as dpt
from dpctl.tensor import Device
@pytest.mark.parametrize(
"shape",
[
(),
(4,),
(0,),
(0, 1),
(0, 0),
(4, 5),
(2, 5, 2),
(2, 2, 2, 2, 2, 2, 2, 2),
5,
],
)
@pytest.mark.parametrize("usm_type", ["shared", "host", "device"])
def test_allocate_usm_ndarray(shape, usm_type):
try:
q = dpctl.SyclQueue()
except dpctl.SyclCreationError:
pytest.skip("Default SYCL queue could not be created")
X = dpt.usm_ndarray(
shape, dtype="d", buffer=usm_type, buffer_ctor_kwargs={"queue": q}
)
Xnp = np.ndarray(shape, dtype="d")
assert X.usm_type == usm_type
assert X.sycl_context == q.sycl_context
assert X.sycl_device == q.sycl_device
assert X.size == Xnp.size
assert X.shape == Xnp.shape
assert X.shape == X.__sycl_usm_array_interface__["shape"]
def test_usm_ndarray_flags():
assert dpt.usm_ndarray((5,)).flags == 3
assert dpt.usm_ndarray((5, 2)).flags == 1
assert dpt.usm_ndarray((5, 2), order="F").flags == 2
assert dpt.usm_ndarray((5, 1, 2), order="F").flags == 2
assert dpt.usm_ndarray((5, 1, 2), strides=(2, 0, 1)).flags == 1
assert dpt.usm_ndarray((5, 1, 2), strides=(1, 0, 5)).flags == 2
assert dpt.usm_ndarray((5, 1, 1), strides=(1, 0, 1)).flags == 3
@pytest.mark.parametrize(
"dtype",
[
"u1",
"i1",
"u2",
"i2",
"u4",
"i4",
"u8",
"i8",
"f2",
"f4",
"f8",
"c8",
"c16",
b"float32",
np.dtype("d"),
np.half,
],
)
def test_dtypes(dtype):
Xusm = dpt.usm_ndarray((1,), dtype=dtype)
assert Xusm.itemsize == np.dtype(dtype).itemsize
expected_fmt = (np.dtype(dtype).str)[1:]
actual_fmt = Xusm.__sycl_usm_array_interface__["typestr"][1:]
assert expected_fmt == actual_fmt
@pytest.mark.parametrize("dtype", ["", ">f4", "invalid", 123])
def test_dtypes_invalid(dtype):
with pytest.raises((TypeError, ValueError)):
dpt.usm_ndarray((1,), dtype=dtype)
@pytest.mark.parametrize("dt", ["d", "c16"])
def test_properties(dt):
"""
Test that properties execute
"""
X = dpt.usm_ndarray((3, 4, 5), dtype=dt)
assert isinstance(X.sycl_queue, dpctl.SyclQueue)
assert isinstance(X.sycl_device, dpctl.SyclDevice)
assert isinstance(X.sycl_context, dpctl.SyclContext)
assert isinstance(X.dtype, np.dtype)
assert isinstance(X.__sycl_usm_array_interface__, dict)
assert isinstance(X.T, dpt.usm_ndarray)
assert isinstance(X.imag, dpt.usm_ndarray)
assert isinstance(X.real, dpt.usm_ndarray)
assert isinstance(X.shape, tuple)
assert isinstance(X.strides, tuple)
assert X.usm_type in ("shared", "device", "host")
assert isinstance(X.size, numbers.Integral)
assert isinstance(X.nbytes, numbers.Integral)
assert isinstance(X.ndim, numbers.Integral)
assert isinstance(X._pointer, numbers.Integral)
assert isinstance(X.device, Device)
@pytest.mark.parametrize("func", [bool, float, int, complex])
@pytest.mark.parametrize("shape", [tuple(), (1,), (1, 1), (1, 1, 1)])
@pytest.mark.parametrize("dtype", ["|b1", "|u2", "|f4", "|i8"])
def test_copy_scalar_with_func(func, shape, dtype):
X = dpt.usm_ndarray(shape, dtype=dtype)
Y = np.arange(1, X.size + 1, dtype=dtype).reshape(shape)
X.usm_data.copy_from_host(Y.reshape(-1).view("|u1"))
assert func(X) == func(Y)
@pytest.mark.parametrize(
"method", ["__bool__", "__float__", "__int__", "__complex__"]
)
@pytest.mark.parametrize("shape", [tuple(), (1,), (1, 1), (1, 1, 1)])
@pytest.mark.parametrize("dtype", ["|b1", "|u2", "|f4", "|i8"])
def test_copy_scalar_with_method(method, shape, dtype):
X = dpt.usm_ndarray(shape, dtype=dtype)
Y = np.arange(1, X.size + 1, dtype=dtype).reshape(shape)
X.usm_data.copy_from_host(Y.reshape(-1).view("|u1"))
assert getattr(X, method)() == getattr(Y, method)()
@pytest.mark.parametrize("func", [bool, float, int, complex])
@pytest.mark.parametrize("shape", [(2,), (1, 2), (3, 4, 5), (0,)])
def test_copy_scalar_invalid_shape(func, shape):
X = dpt.usm_ndarray(shape)
with pytest.raises(ValueError):
func(X)
def test_index_noninteger():
import operator
X = dpt.usm_ndarray(1, "d")
with pytest.raises(IndexError):
operator.index(X)
@pytest.mark.parametrize(
"ind",
[
tuple(),
(None,),
(
None,
Ellipsis,
None,
),
(2, 2, None, 3, 4),
(Ellipsis,),
(None, slice(0, None, 2), Ellipsis, slice(0, None, 3)),
(None, slice(1, None, 2), Ellipsis, slice(1, None, 3)),
(None, slice(None, -1, -2), Ellipsis, slice(2, None, 3)),
(
slice(None, None, -1),
slice(None, None, -1),
slice(0, None, 3),
slice(1, None, 2),
),
],
)
def test_basic_slice(ind):
X = dpt.usm_ndarray((2 * 3, 2 * 4, 3 * 5, 2 * 7), dtype="u1")
Xnp = np.empty(X.shape, dtype=X.dtype)
S = X[ind]
Snp = Xnp[ind]
assert S.shape == Snp.shape
assert S.strides == Snp.strides
assert S.dtype == X.dtype
def test_slice_constructor_1d():
Xh = np.arange(37, dtype="i4")
default_device = dpctl.select_default_device()
Xusm = dpt.from_numpy(Xh, device=default_device, usm_type="device")
for ind in [
slice(1, None, 2),
slice(0, None, 3),
slice(1, None, 3),
slice(2, None, 3),
slice(None, None, -1),
slice(-2, 2, -2),
slice(-1, 1, -2),
slice(None, None, -13),
]:
assert np.array_equal(
dpt.asnumpy(Xusm[ind]), Xh[ind]
), "Failed for {}".format(ind)
def test_slice_constructor_3d():
Xh = np.empty((37, 24, 35), dtype="i4")
default_device = dpctl.select_default_device()
Xusm = dpt.from_numpy(Xh, device=default_device, usm_type="device")
for ind in [
slice(1, None, 2),
slice(0, None, 3),
slice(1, None, 3),
slice(2, None, 3),
slice(None, None, -1),
slice(-2, 2, -2),
slice(-1, 1, -2),
slice(None, None, -13),
(slice(None, None, -2), Ellipsis, None, 15),
]:
assert np.array_equal(
dpt.to_numpy(Xusm[ind]), Xh[ind]
), "Failed for {}".format(ind)
@pytest.mark.parametrize("usm_type", ["device", "shared", "host"])
def test_slice_suai(usm_type):
Xh = np.arange(0, 10, dtype="u1")
default_device = dpctl.select_default_device()
Xusm = dpt.from_numpy(Xh, device=default_device, usm_type=usm_type)
for ind in [slice(2, 3, None), slice(5, 7, None), slice(3, 9, None)]:
assert np.array_equal(
dpm.as_usm_memory(Xusm[ind]).copy_to_host(), Xh[ind]
), "Failed for {}".format(ind)
def test_slicing_basic():
Xusm = dpt.usm_ndarray((10, 5), dtype="c16")
Xusm[None]
Xusm[...]
Xusm[8]
Xusm[-3]
with pytest.raises(IndexError):
Xusm[..., ...]
with pytest.raises(IndexError):
Xusm[1, 1, :, 1]
Xusm[:, -4]
with pytest.raises(IndexError):
Xusm[:, -128]
with pytest.raises(TypeError):
Xusm[{1, 2, 3, 4, 5, 6, 7}]
X = dpt.usm_ndarray(10, "u1")
X.usm_data.copy_from_host(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09")
int(
X[X[2]]
) # check that objects with __index__ method can be used as indices
Xh = dpm.as_usm_memory(X[X[2] : X[5]]).copy_to_host()
Xnp = np.arange(0, 10, dtype="u1")
assert np.array_equal(Xh, Xnp[Xnp[2] : Xnp[5]])
def test_ctor_invalid_shape():
with pytest.raises(TypeError):
dpt.usm_ndarray(dict())
def test_ctor_invalid_order():
with pytest.raises(ValueError):
dpt.usm_ndarray((5, 5, 3), order="Z")
def test_ctor_buffer_kwarg():
dpt.usm_ndarray(10, buffer=b"device")
with pytest.raises(ValueError):
dpt.usm_ndarray(10, buffer="invalid_param")
Xusm = dpt.usm_ndarray((10, 5), dtype="c16")
X2 = dpt.usm_ndarray(Xusm.shape, buffer=Xusm, dtype=Xusm.dtype)
assert np.array_equal(
Xusm.usm_data.copy_to_host(), X2.usm_data.copy_to_host()
)
with pytest.raises(ValueError):
dpt.usm_ndarray(10, buffer=dict())
def test_usm_ndarray_props():
Xusm = dpt.usm_ndarray((10, 5), dtype="c16", order="F")
Xusm.ndim
repr(Xusm)
Xusm.flags
Xusm.__sycl_usm_array_interface__
Xusm.device
Xusm.strides
Xusm.real
Xusm.imag
try:
dpctl.SyclQueue("cpu")
except dpctl.SyclQueueCreationError:
pytest.skip("Sycl device CPU was not detected")
Xusm.to_device("cpu")
def test_datapi_device():
X = dpt.usm_ndarray(1)
dev_t = type(X.device)
with pytest.raises(TypeError):
dev_t()
dev_t.create_device(X.device)
dev_t.create_device(X.sycl_queue)
dev_t.create_device(X.sycl_device)
dev_t.create_device(X.sycl_device.filter_string)
dev_t.create_device(None)
X.device.sycl_context
X.device.sycl_queue
X.device.sycl_device
repr(X.device)
def _pyx_capi_fnptr_to_callable(
X, pyx_capi_name, caps_name, fn_restype=ctypes.c_void_p
):
import sys
mod = sys.modules[X.__class__.__module__]
cap = mod.__pyx_capi__.get(pyx_capi_name, None)
if cap is None:
raise ValueError(
"__pyx_capi__ does not export {} capsule".format(pyx_capi_name)
)
# construct Python callable to invoke these functions
cap_ptr_fn = ctypes.pythonapi.PyCapsule_GetPointer
cap_ptr_fn.restype = ctypes.c_void_p
cap_ptr_fn.argtypes = [ctypes.py_object, ctypes.c_char_p]
fn_ptr = cap_ptr_fn(cap, caps_name)
callable_maker_ptr = ctypes.PYFUNCTYPE(fn_restype, ctypes.py_object)
return callable_maker_ptr(fn_ptr)
def test_pyx_capi_get_data():
X = dpt.usm_ndarray(17)[1::2]
get_data_fn = _pyx_capi_fnptr_to_callable(
X,
"usm_ndarray_get_data",
b"char *(struct PyUSMArrayObject *)",
fn_restype=ctypes.c_void_p,
)
r1 = get_data_fn(X)
sua_iface = X.__sycl_usm_array_interface__
assert r1 == sua_iface["data"][0] + sua_iface.get("offset") * X.itemsize
def test_pyx_capi_get_shape():
X = dpt.usm_ndarray(17)[1::2]
get_shape_fn = _pyx_capi_fnptr_to_callable(
X,
"usm_ndarray_get_shape",
b"Py_ssize_t *(struct PyUSMArrayObject *)",
fn_restype=ctypes.c_void_p,
)
c_longlong_p = ctypes.POINTER(ctypes.c_longlong)
shape0 = ctypes.cast(get_shape_fn(X), c_longlong_p).contents.value
assert shape0 == X.shape[0]
def test_pyx_capi_get_strides():
X = dpt.usm_ndarray(17)[1::2]
get_strides_fn = _pyx_capi_fnptr_to_callable(
X,
"usm_ndarray_get_strides",
b"Py_ssize_t *(struct PyUSMArrayObject *)",
fn_restype=ctypes.c_void_p,
)
c_longlong_p = ctypes.POINTER(ctypes.c_longlong)
strides0_p = get_strides_fn(X)
if strides0_p:
strides0_p = ctypes.cast(strides0_p, c_longlong_p).contents
strides0_p = strides0_p.value
assert strides0_p == 0 or strides0_p == X.strides[0]
def test_pyx_capi_get_ndim():
X = dpt.usm_ndarray(17)[1::2]
get_ndim_fn = _pyx_capi_fnptr_to_callable(
X,
"usm_ndarray_get_ndim",
b"int (struct PyUSMArrayObject *)",
fn_restype=ctypes.c_int,
)
assert get_ndim_fn(X) == X.ndim
def test_pyx_capi_get_typenum():
X = dpt.usm_ndarray(17)[1::2]
get_typenum_fn = _pyx_capi_fnptr_to_callable(
X,
"usm_ndarray_get_typenum",
b"int (struct PyUSMArrayObject *)",
fn_restype=ctypes.c_int,
)
typenum = get_typenum_fn(X)
assert type(typenum) is int
assert typenum == X.dtype.num
def test_pyx_capi_get_flags():
X = dpt.usm_ndarray(17)[1::2]
get_flags_fn = _pyx_capi_fnptr_to_callable(
X,
"usm_ndarray_get_flags",
b"int (struct PyUSMArrayObject *)",
fn_restype=ctypes.c_int,
)
flags = get_flags_fn(X)
assert type(flags) is int and flags == X.flags
def test_pyx_capi_get_queue_ref():
X = dpt.usm_ndarray(17)[1::2]
get_queue_ref_fn = _pyx_capi_fnptr_to_callable(
X,
"usm_ndarray_get_queue_ref",
b"DPCTLSyclQueueRef (struct PyUSMArrayObject *)",
fn_restype=ctypes.c_void_p,
)
queue_ref = get_queue_ref_fn(X) # address of a copy, should be unequal
assert queue_ref != X.sycl_queue.addressof_ref()
def _pyx_capi_int(X, pyx_capi_name, caps_name=b"int", val_restype=ctypes.c_int):
import sys
mod = sys.modules[X.__class__.__module__]
cap = mod.__pyx_capi__.get(pyx_capi_name, None)
if cap is None:
raise ValueError(
"__pyx_capi__ does not export {} capsule".format(pyx_capi_name)
)
# construct Python callable to invoke these functions
cap_ptr_fn = ctypes.pythonapi.PyCapsule_GetPointer
cap_ptr_fn.restype = ctypes.c_void_p
cap_ptr_fn.argtypes = [ctypes.py_object, ctypes.c_char_p]
cap_ptr = cap_ptr_fn(cap, caps_name)
val_ptr = ctypes.cast(cap_ptr, ctypes.POINTER(val_restype))
return val_ptr.contents.value
def test_pyx_capi_check_constants():
X = dpt.usm_ndarray(17)[1::2]
cc_flag = _pyx_capi_int(X, "USM_ARRAY_C_CONTIGUOUS")
assert cc_flag > 0 and 0 == (cc_flag & (cc_flag - 1))
fc_flag = _pyx_capi_int(X, "USM_ARRAY_F_CONTIGUOUS")
assert fc_flag > 0 and 0 == (fc_flag & (fc_flag - 1))
w_flag = _pyx_capi_int(X, "USM_ARRAY_WRITEABLE")
assert w_flag > 0 and 0 == (w_flag & (w_flag - 1))
bool_typenum = _pyx_capi_int(X, "UAR_BOOL")
assert bool_typenum == np.dtype("bool_").num
byte_typenum = _pyx_capi_int(X, "UAR_BYTE")
assert byte_typenum == np.dtype(np.byte).num
ubyte_typenum = _pyx_capi_int(X, "UAR_UBYTE")
assert ubyte_typenum == np.dtype(np.ubyte).num
short_typenum = _pyx_capi_int(X, "UAR_SHORT")
assert short_typenum == np.dtype(np.short).num
ushort_typenum = _pyx_capi_int(X, "UAR_USHORT")
assert ushort_typenum == np.dtype(np.ushort).num
int_typenum = _pyx_capi_int(X, "UAR_INT")
assert int_typenum == np.dtype(np.intc).num
uint_typenum = _pyx_capi_int(X, "UAR_UINT")
assert uint_typenum == np.dtype(np.uintc).num
long_typenum = _pyx_capi_int(X, "UAR_LONG")
assert long_typenum == np.dtype(np.int_).num
ulong_typenum = _pyx_capi_int(X, "UAR_ULONG")
assert ulong_typenum == np.dtype(np.uint).num
longlong_typenum = _pyx_capi_int(X, "UAR_LONGLONG")
assert longlong_typenum == np.dtype(np.longlong).num
ulonglong_typenum = _pyx_capi_int(X, "UAR_ULONGLONG")
assert ulonglong_typenum == np.dtype(np.ulonglong).num
half_typenum = _pyx_capi_int(X, "UAR_HALF")
assert half_typenum == np.dtype(np.half).num
float_typenum = _pyx_capi_int(X, "UAR_FLOAT")
assert float_typenum ==
|
np.dtype(np.single)
|
numpy.dtype
|
import copy
import numpy as np
from scipy import ndimage
import gnomonic_projection as gp
import spherical_coordinates as sc
import polygon
from logger import Logger
log = Logger(__name__)
log.logger.propagate = False
"""
Implement icosahedron projection and stitch with the Gnomonic projection (forward and reverse projection).
Reference:
[1]: https://mathworld.wolfram.com/GnomonicProjection.html
"""
def get_icosahedron_parameters(triangle_index, padding_size=0.0):
"""
Get icosahedron's tangent face's paramters.
Get the tangent point theta and phi. Known as the theta_0 and phi_0.
The erp image origin as top-left corner
:return the tangent face's tangent point and 3 vertices's location.
"""
# reference: https://en.wikipedia.org/wiki/Regular_icosahedron
radius_circumscribed = np.sin(2 * np.pi / 5.0)
radius_inscribed = np.sqrt(3) / 12.0 * (3 + np.sqrt(5))
radius_midradius = np.cos(np.pi / 5.0)
# the tangent point
theta_0 = None
phi_0 = None
# the 3 points of tangent triangle in spherical coordinate
triangle_point_00_theta = None
triangle_point_00_phi = None
triangle_point_01_theta = None
triangle_point_01_phi = None
triangle_point_02_theta = None
triangle_point_02_phi = None
# triangles' row/col range in the erp image
# erp_image_row_start = None
# erp_image_row_stop = None
# erp_image_col_start = None
# erp_image_col_stop = None
theta_step = 2.0 * np.pi / 5.0
# 1) the up 5 triangles
if 0 <= triangle_index <= 4:
# tangent point of inscribed spheric
theta_0 = - np.pi + theta_step / 2.0 + triangle_index * theta_step
phi_0 = np.pi / 2 - np.arccos(radius_inscribed / radius_circumscribed)
# the tangent triangle points coordinate in tangent image
triangle_point_00_theta = -np.pi + triangle_index * theta_step
triangle_point_00_phi = np.arctan(0.5)
triangle_point_01_theta = -np.pi + np.pi * 2.0 / 5.0 / 2.0 + triangle_index * theta_step
triangle_point_01_phi = np.pi / 2.0
triangle_point_02_theta = -np.pi + (triangle_index + 1) * theta_step
triangle_point_02_phi =
|
np.arctan(0.5)
|
numpy.arctan
|
__author__ = '<NAME>'
__version__ = '0.0.3'
__date__ = '27.4.2021'
# load libraries
import os
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegressionCV
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import OneHotEncoder
from sklearn import preprocessing
from tabulate import tabulate
from xgboost import XGBClassifier
# load built package functions
from msl.MetaLearning import *
from plot_metric.functions import MultiClassClassification
from msl.cf_matrix import make_confusion_matrix
#fixing random state
random_state=123
# Load dataset (we just selected 4 classes of digits)
X, Y = load_digits(n_class=4, return_X_y=True)
print(f'Predictors:')
print(f'{X}')
print(f'Outcome:')
print(f'{Y}')
# Add noisy features to make the problem more harder
random_state =
|
np.random.RandomState(123)
|
numpy.random.RandomState
|
"""
Data Abstraction Layer
----------------------
These classes present a high-level API for handling data from various types of neurophysiology experiments.
They do not implement functionality for reading any particular type of data file, though. Rather, they
provide an abstraction layer that separates analysis and visualization from the idiosyncratic
details of any particular data acquisition system.
Each data acquisition system can provide a set of subclasses to adapt its data formats to this API
(as long as the data are reasonably similar), and analysis tools should rely only on this API to ensure
that they will not need to be rewritten in order to support data collected under different acquisition
systems.
This abstraction layer also helps to enforce good coding practice by separating data representation,
analysis, and visualization.
"""
from __future__ import division
import numpy as np
import scipy.signal
from . import util
from collections import OrderedDict
from .stats import ragged_mean
from .baseline import float_mode
from .filter import downsample
class Container(object):
"""Generic hierarchical container.
This class is the basis for most other classes in the DAL.
"""
def __init__(self):
self._meta = OrderedDict()
@property
def parent(self):
return None
@property
def children(self):
return []
@property
def key(self):
"""Key that uniquely identifies this object among its siblings.
"""
return None
@property
def meta(self):
return self._meta
@property
def all_children(self):
allch = [self]
for ch in self.children:
allch.extend(ch.all_children)
return allch
@property
def all_meta(self):
allmeta = OrderedDict()
for obj in self.path:
m = obj.meta
allmeta.update(m)
return allmeta
@property
def path(self):
obj = self
path = []
while obj is not None:
path.append(obj)
obj = obj.parent
return path[::-1]
class Dataset(Container):
"""A generic container for RecordingSequence, SyncRecording, Recording, and TSeries instances that
were acquired together.
The boundaries between one experiment and the next are sometimes ambiguous, but
in general we group multiple recordings into an experiment if they are likely to
be analyzed together. Likewise, recordings that have no causal relationship
to each other probably belong in different Dataset containers. For example,
a series of recordings made on the same cell almost certainly belong in the same
Dataset, whereas recordings made from different pieces of tissue probably
belong in different Datasets.
"""
def __init__(self, data=None, meta=None):
Container.__init__(self)
self._data = data
if meta is not None:
self._meta.update(OrderedDict(meta))
@property
def contents(self):
"""A list of data objects (TSeries, Recording, SyncRecording, RecordingSequence)
directly contained in this experiment.
Grandchild objects are not included in this list.
"""
return self._data[:]
def find(self, type):
return [c for c in self.all_children if isinstance(c, type)]
@property
def all_traces(self):
return self.find(TSeries)
@property
def all_recordings(self):
return self.find(Recording)
@property
def all_sync_recordings(self):
return self.find(SyncRecording)
def meta_table(self, objs):
# collect all metadata
meta = []
for i,o in enumerate(objs):
meta.append(o.all_meta)
# generate a list of common fields (in the correct order)
fields = set(meta[0].keys())
for m in meta[1:]:
fields &= set(m.keys())
order = list(meta[0].keys())
for k in order[:]:
if k not in fields:
order.remove(k)
# transpose
tr = OrderedDict()
for k in order:
tr[k] = [m[k] for m in meta]
# create a table
import pandas
return pandas.DataFrame(tr)
@property
def trace_table(self):
return self.meta_table(self.all_traces)
@property
def parent(self):
"""None
This is a convenience property used for traversing the object hierarchy.
"""
return None
@property
def children(self):
"""Alias for self.contents
This is a convenience property used for traversing the object hierarchy.
"""
return self.contents
class RecordingSequence(Container):
# Acquisition?
# RecordingSet?
# Do we need both SyncRecordingSequence and RecordingSequence ?
# Can multiple RecordingSequence instances refer to the same underlying sequence?
# - Let's say no--otherwise we have to worry about unique identification, comparison, etc.
# - Add ___View classes that slice/dice any way we like.
"""Representation of a sequence of data acquisitions.
For example, this could be a single type of acquisition that was repeated ten times,
or a series of ten acquisitions that varies one parameter across ten values.
Usually the recordings in a sequence all use the same set of devices.
Sequences may be multi-dimensional and may vary more than one parameter.
Items in a sequence are usually SyncRecording instances, but may also be
nested RecordingSequence instances.
"""
@property
def type(self):
"""An arbitrary string representing the type of acquisition.
"""
pass
@property
def shape(self):
"""The array-shape of the sequence.
"""
@property
def ndim(self):
return len(self.shape)
def __getitem__(self, item):
"""Return one item (a SyncRecording instance) from the sequence.
"""
def sequence_params(self):
"""Return a structure that describes the parameters that are varied across each
axis of the sequence.
For example, a two-dimensional sequence might return the following:
[
[param1, param2], # two parameters that vary across the first sequence axis
[], # no parameters vary across the second axis (just repetitions)
[param3], # one parameter that varies across all recordings, regardless of its position along any axis
]
Each parameter must be a key in the metadata for a single recording.
"""
@property
def parent(self):
"""None
This is a convenience property used for traversing the object hierarchy.
"""
return None
@property
def children(self):
"""Alias for self.contents
This is a convenience property used for traversing the object hierarchy.
"""
return self.contents
class SyncRecording(Container):
"""Representation of multiple synchronized recordings.
This is typically the result of recording from multiple devices at the same time
(for example, two patch-clamp amplifiers and a camera).
"""
def __init__(self, recordings=None, parent=None):
self._parent = parent
self._recordings = recordings if recordings is not None else OrderedDict()
Container.__init__(self)
@property
def type(self):
"""An arbitrary string representing the type of acquisition.
"""
pass
@property
def devices(self):
"""A list of the names of devices in this recording.
"""
return list(self._recordings.keys())
def __getitem__(self, item):
"""Return a recording given its device name.
"""
return self._recordings[item]
@property
def recordings(self):
"""A list of the recordings in this syncrecording.
"""
return list(self._recordings.values())
def data(self):
return np.concatenate([self[dev].data()[None, :] for dev in self.devices], axis=0)
@property
def parent(self):
return self._parent
@property
def children(self):
return self.recordings
device_tree = {
'patch clamp amplifier': {
'MultiClamp 700': [
'MultiClamp 700A',
'MultiClamp 700B',
],
},
}
class Recording(Container):
"""Representation of a single continuous data acquisition from a single device,
possibly with multiple channels of data (for example, a recording from a single
patch-clamp headstage with input and output channels, or ).
Each channel is described by a single TSeries instance. Channels are often
recorded with the same timebase, but this is not strictly required.
"""
def __init__(self, channels=None, start_time=None, device_type=None, device_id=None, sync_recording=None, **meta):
Container.__init__(self)
self._meta = OrderedDict([
('start_time', start_time),
('device_type', device_type),
('device_id', device_id),
])
self._meta.update(meta)
if channels is None:
channels = OrderedDict()
else:
channels = OrderedDict(channels)
for k,v in channels.items():
assert isinstance(v, TSeries)
self._channels = channels
self._sync_recording = sync_recording
@property
def device_type(self):
"""A string representing the type of device that generated this recording.
Strings should be described in the global ``device_tree``.
"""
return self._meta['device_type']
@property
def channels(self):
"""A list of channels included in this recording.
"""
return self._channels.keys()
@property
def start_time(self):
"""The starting time (unix epoch) of this recording.
"""
return self._meta['start_time']
@property
def device_id(self):
return self._meta['device_id']
@property
def sync_recording(self):
return self._sync_recording
def time_slice(self, start, stop):
return RecordingView(self, start, stop)
def __getitem__(self, chan):
return self._channels[chan]
def data(self):
return np.concatenate([self[ch].data[:,None] for ch in self.channels], axis=1)
@property
def parent(self):
return self.sync_recording
@property
def children(self):
return [self[k] for k in self.channels]
class RecordingView(Recording):
"""A time-slice of a multi channel recording
"""
def __init__(self, rec, start, stop):
self._parent_rec = rec
self._view_slice = (start, stop)
chans = OrderedDict([(k, rec[k]) for k in rec.channels])
meta = rec.meta.copy()
Recording.__init__(self, channels=chans, sync_recording=rec.sync_recording, **meta)
def __getattr__(self, attr):
return getattr(self._parent_rec, attr)
def __getitem__(self, item):
return self._parent_rec[item].time_slice(*self._view_slice)
@property
def parent(self):
return self._parent_rec
# @property
# def source_indices(self):
# """Return the indices of this view on the original Recording.
# """
# v = self
# start = 0
# while True:
# start += self._view_slice.start
# v = v.parent
# if not isinstance(v, RecordingView):
# break
# return start, start + len(self)
class PatchClampRecording(Recording):
"""Recording from a patch-clamp amplifier.
* Current- or voltage-clamp mode
* Minimum one recorded channel ('primary'), possibly more
* Includes stimulus waveform ('command')
* Stimulus metadata description
* Metadata about amplifier state:
* clamp_mode ('ic' 'i0', or 'vc')
* holding potential (vc only)
* holding_current (ic only)
* bridge_balance (ic only)
* lpf_cutoff
* pipette_offset
Should have at least 'primary' and 'command' channels.
Note: command channel values should _include_ holding potential/current!
"""
def __init__(self, *args, **kwds):
meta = OrderedDict()
extra_meta = ['cell_id', 'clamp_mode', 'patch_mode', 'holding_potential', 'holding_current',
'bridge_balance', 'lpf_cutoff', 'pipette_offset', 'baseline_potential',
'baseline_current', 'baseline_rms_noise', 'stimulus']
for k in extra_meta:
meta[k] = kwds.pop(k, None)
self._baseline_data = None
Recording.__init__(self, *args, **kwds)
self._meta.update(meta)
@property
def cell_id(self):
"""Uniquely identifies the cell attached in this recording.
"""
return self._meta['cell_id']
@property
def clamp_mode(self):
"""The mode of the patch clamp amplifier: 'vc', 'ic', or 'i0'.
"""
return self._meta['clamp_mode']
@property
def patch_mode(self):
"""The state of the membrane patch. E.g. 'whole cell', 'cell attached', 'loose seal', 'bath', 'inside out', 'outside out'
"""
return self._meta['patch_mode']
@property
def stimulus(self):
return self._meta.get('stimulus', None)
@property
def holding_potential(self):
"""The command holding potential if the recording is voltage-clamp, or the
resting membrane potential if the recording is current-clamp.
"""
if self.clamp_mode == 'vc':
return self._meta['holding_potential']
else:
return self.baseline_potential
@property
def rounded_holding_potential(self, increment=5e-3):
"""Return the holding potential rounded to the nearest increment.
The default increment rounds to the nearest 5 mV.
"""
hp = self.holding_potential
if hp is None:
return None
return increment * np.round(hp / increment)
@property
def holding_current(self):
"""The steady-state pipette current applied during this recording.
"""
if self.clamp_mode == 'ic':
return self._meta['holding_current']
else:
return self.baseline_current
@property
def nearest_test_pulse(self):
"""The test pulse that was acquired nearest to this recording.
"""
@property
def baseline_regions(self):
"""A list of (start,stop) time pairs that cover regions of the recording
the cell is expected to be in a steady state.
"""
return []
@property
def baseline_data(self):
"""All items in baseline_regions concatentated into a single trace.
"""
if self._baseline_data is None:
data = [self['primary'].time_slice(start,stop).data for start,stop in self.baseline_regions]
if len(data) == 0:
data = np.empty(0, dtype=self['primary'].data.dtype)
else:
data = np.concatenate(data)
self._baseline_data = TSeries(data, sample_rate=self['primary'].sample_rate, recording=self)
return self._baseline_data
@property
def baseline_potential(self):
"""The mode potential value from all quiescent regions in the recording.
See float_mode()
"""
if self.meta['baseline_potential'] is None:
if self.clamp_mode == 'vc':
self.meta['baseline_potential'] = self.meta['holding_potential']
else:
data = self.baseline_data.data
if len(data) == 0:
return None
self.meta['baseline_potential'] = float_mode(data)
return self.meta['baseline_potential']
@property
def baseline_current(self):
"""The mode current value from all quiescent regions in the recording.
See float_mode()
"""
if self.meta['baseline_current'] is None:
if self.clamp_mode == 'ic':
self.meta['baseline_current'] = self.meta['holding_current']
else:
data = self.baseline_data.data
if len(data) == 0:
return None
self.meta['baseline_current'] = float_mode(data)
return self.meta['baseline_current']
@property
def baseline_rms_noise(self):
"""The standard deviation of all data from quiescent regions in the recording.
"""
if self.meta['baseline_rms_noise'] is None:
data = self.baseline_data.data
if len(data) == 0:
return None
self.meta['baseline_rms_noise'] = data.std()
return self.meta['baseline_rms_noise']
def _descr(self):
mode = self.clamp_mode
if mode == 'vc':
hp = self.holding_potential
if hp is not None:
hp = int(np.round(hp*1e3))
extra = "mode=VC holding=%s" % hp
elif mode == 'ic':
hc = self.holding_current
if hc is not None:
hc = int(np.round(hc*1e12))
extra = "mode=IC holding=%s" % hc
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self._descr())
class TSeries(Container):
"""A homogeneous time series data set.
This is a representation of a single stream of data recorded over time. The
data must be representable as a single N-dimensional array where the first
array axis is time.
Examples:
* A membrane potential recording from a single current-clamp headstage
* A video stream from a camera
* A digital trigger waveform
TSeries may specify units, a starting time, and either a sample period / sample rate
or an array of time values, one per sample.
Parameters
----------
data : array | None
Array of data contained in this TSeries.
dt : float | None
Optional value specifying the time difference between any two adjacent samples
in the data; inverse of *sample_rate*. See ``TSeries.dt``.
t0 : float | None
Optional time value of the first sample in the data, relative to *start_time*. Default is 0.
See ``TSeries.t0``.
sample_rate : float | None
Optional value specifying the sampling rate of the data; inverse of *dt*.
See ``TSeries.sample_rate``.
start_time : float | None
Optional value giving the absloute starting time of the TSeries as a unix timestamp
(seconds since epoch). See ``TSeries.start_time``.
time_values : array | None
Optional array of the time values for each sample, relative to *start_time*.
This option can be used to specify data with timepoints that are irregularly sampled,
and cannot be used with *dt*, *sample_rate*, or *t0*.
units : str | None
Optional string specifying the units associated with *data*. It is recommended
to use unscaled SI units (e.g. 'V' instead of 'mV') where possible.
See ``TSeries.units``.
meta :
Any extra keyword arguments are interpreted as custom metadata and added to ``self.meta``.
"""
def __init__(self, data=None, dt=None, t0=None, sample_rate=None, start_time=None, time_values=None, units=None, channel_id=None, recording=None, **meta):
Container.__init__(self)
if data is not None and data.ndim != 1:
raise ValueError("data must be a 1-dimensional array.")
if time_values is not None:
if data is not None and time_values.shape != data.shape:
raise ValueError("time_values must have the same shape as data.")
if dt is not None:
raise TypeError("Cannot specify both time_values and dt.")
if sample_rate is not None:
raise TypeError("Cannot specify both time_values and sample_rate.")
if t0 is not None:
raise TypeError("Cannot specify both time_values and t0.")
if dt is not None and sample_rate is not None:
raise TypeError("Cannot specify both sample_rate and dt.")
self._data = data
self._meta = OrderedDict([
('start_time', start_time),
('dt', dt),
('t0', t0),
('sample_rate', sample_rate),
('units', units),
('channel_id', channel_id),
])
self._meta.update(meta)
self._time_values = time_values
self._generated_time_values = None
self._regularly_sampled = None
self._recording = recording
@property
def data(self):
"""The array of sample values.
"""
return self._data
@property
def start_time(self):
"""The clock time (seconds since epoch) corresponding to the sample
where t=0.
If self.t0 is equal to 0, then start_time is the clock time of the
first sample.
"""
return self._meta['start_time']
@property
def sample_rate(self):
"""The sample rate for this TSeries.
If no sample rate was specified, then this value is calculated from
self.dt.
"""
rate = self._meta['sample_rate']
if rate is not None:
return rate
else:
return 1.0 / self.dt
@property
def dt(self):
"""The time step between samples for this TSeries.
If no time step was specified, then this value is calculated from
self.sample_rate.
If both dt and sample_rate were not specified, then this value
is calculated as the difference between the first two items in
time_values.
If no timing information was specified at all, then accessing this
property raises TypeError.
"""
# need to be very careful about how we calculate dt and sample rate
# to avoid fp errors.
dt = self._meta['dt']
if dt is not None:
return dt
rate = self._meta['sample_rate']
if rate is not None:
return 1.0 / rate
t = self.time_values
if t is not None:
# assume regular sampling.
# don't cache this value; we want to remember whether the user
# provided dt or samplerate
return t[1] - t[0]
raise TypeError("No sample timing is specified for this trace.")
@property
def t0(self):
"""The value of the first item in time_values.
Setting this property causes the entire array of time values to shift.
"""
t0 = self._meta['t0']
if t0 is not None:
return t0
if self._time_values is not None:
return self._time_values[0]
return 0
@t0.setter
def t0(self, t0):
if self.t0 == t0:
return
if self.has_time_values:
self._time_values = self._time_values + (t0 - self._time_values[0])
else:
self._meta['t0'] = t0
self._generated_time_values = None
@property
def t_end(self):
"""The last time value in this TSeries.
"""
return self.time_at(len(self) - 1)
def time_at(self, index):
"""Return the time at a specified index(es).
Parameters
----------
index : int | array-like
The array index(es) for which time value(s) will be returned.
"""
if not self.has_timing:
raise TypeError("No sample timing is specified for this trace.")
if not np.isscalar(index):
index = np.asarray(index)
if self.has_time_values:
return self.time_values[index]
else:
# Be careful to minimize fp precision errors --
# time * dt != time / sample_rate != time * (1 / sample_rate)
sample_rate = self._meta.get('sample_rate')
if sample_rate is None:
return (index * self.dt) + self.t0
else:
return (index * (1.0 / sample_rate)) + self.t0
def index_at(self, t, index_mode=None):
"""Return the index at specified timepoint(s).
Parameters
----------
t : float | array-like
The time value(s) for which array index(es) will be returned.
index_mode : str
Integer conversion mode: 'round' (default), 'floor', or 'ceil'. This argument is ignored
if self.has_time_values is True.
"""
if not self.has_timing:
raise TypeError("No sample timing is specified for this trace.")
if not np.isscalar(t):
t = np.asarray(t)
if self.has_time_values:
inds1 = np.searchsorted(self.time_values, t)
inds0 = inds1 - 1
# select closest sample
dif1 = abs(self.time_values[np.clip(inds1, 0, len(self)-1)] - t)
dif0 = abs(self.time_values[inds0] - t)
inds = np.where(dif0 < dif1, inds0, inds1)
if np.isscalar(t):
inds = int(inds)
return inds
else:
# Be careful to avoid fp precision errors when converting back to integer index
sample_rate = self._meta.get('sample_rate')
if sample_rate is None:
inds = (t - self.t0) * (1.0 / self.dt)
else:
inds = (t - self.t0) * sample_rate
if index_mode is None or index_mode == 'round':
inds = np.round(inds)
elif index_mode == 'floor':
inds = np.floor(inds)
elif index_mode == 'ceil':
inds = np.ceil(inds)
else:
raise ValueError("index_mode must be 'round', 'ceil', or 'floor'; got %r" % index_mode)
if np.isscalar(t):
return int(inds)
else:
return inds.astype(int)
@property
def time_values(self):
"""An array of sample time values.
Time values are specified in seconds relative to start_time.
If no sample time values were provided for this TSeries, then the array
is automatically generated based on other timing metadata (t0, dt,
sample_rate).
If no timing information at all was specified for this TSeries, then
accessing this property raises TypeError.
"""
if not self.has_timing:
raise TypeError("No sample timing is specified for this trace.")
if self.has_time_values:
return self._time_values
if self._generated_time_values is None:
self._generated_time_values = self.time_at(np.arange(len(self.data)))
return self._generated_time_values
@property
def regularly_sampled(self):
"""Boolean indicating whether the samples in this TSeries have equal
time intervals.
If either dt or sample_rate was specified for this trace, then this
property is True. If only time values were given, then this property
is True if the intervals between samples differ by less than 1%.
If no sample timing was specified for this TSeries, then this property
is False.
"""
if not self.has_timing:
return False
if not self.has_time_values:
return True
if self._regularly_sampled is None:
tvals = self.time_values
dt = np.diff(tvals)
avg_dt = dt.mean()
self._regularly_sampled = bool(np.all(np.abs(dt - avg_dt) < (avg_dt * 0.01)))
return self._regularly_sampled
@property
def has_timing(self):
"""Boolean indicating whether any timing information was specified for
this TSeries.
"""
return (self.has_time_values or
self._meta['dt'] is not None or
self._meta['sample_rate'] is not None)
@property
def has_time_values(self):
"""Boolean indicating whether an array of time values was explicitly
specified for this TSeries.
"""
return self._time_values is not None
def time_slice(self, start, stop, index_mode=None):
"""Return a view of this trace with a specified start/stop time.
Times are given relative to t0, and may be None to specify the
beginning or end of the trace.
Parameters
----------
start : float
Time at the start of the slice
stop : float
Time at the end of the slice (non-inclusive)
index_mode : str
See index_at for a description of this parameter.
"""
i1 = max(0, self.index_at(start, index_mode)) if start is not None else None
i2 = max(0, self.index_at(stop, index_mode)) if stop is not None else None
return self[i1:i2]
def value_at(self, t, interp='linear'):
"""Return the value of this trace at specific timepoints.
By default, values are linearly interpolated from the data array.
Parameters
----------
t : float | array-like
The time value(s) at which data value(s) will be returned.
interp : 'linear' | 'nearest'
If 'linear', then ``numpy.interp`` is used to interpolate values between adjacent samples.
If 'nearest', then the sample nearest to each time value is returned.
Default is 'linear'.
"""
if not np.isscalar(t):
t = np.asarray(t)
if interp == 'linear':
return np.interp(t, self.time_values, self.data)
elif interp == 'nearest':
inds = self.index_at(t)
return self.data[inds]
else:
raise ValueError('unknown interpolation mode "%s"' % interp)
@property
def units(self):
"""Units string for the data in this TSeries.
"""
return self._meta['units']
@property
def shape(self):
"""The shape of the array stored in this TSeries.
"""
return self.data.shape
def __len__(self):
return self.shape[0]
@property
def duration(self):
"""Duration of this TSeries in seconds.
If time values are specified for this trace, then this property
is the difference between the first and last time values.
If only a sample rate or dt are specified, then this returns
``len(self) * dt``.
"""
if self.has_time_values:
return self.time_values[-1] - self.t0
else:
return len(self) * self.dt
@property
def ndim(self):
"""Number of dimensions of the array contained in this TSeries.
"""
return self.data.ndim
@property
def channel_id(self):
"""The name of the Recording channel that contains this TSeries.
For example::
trace = my_recording['primary']
trace.recording # returns my_recording
trace.channel_id # returns 'primary'
"""
return self._meta['channel_id']
@property
def recording(self):
"""The Recording that contains this trace.
"""
return self._recording
def copy(self, data=None, time_values=None, **kwds):
"""Return a copy of this TSeries.
The new TSeries will have the same data, timing information, and metadata
unless otherwise specified in the arguments.
Parameters
----------
data : array | None
If specified, sets the data array for the new TSeries.
time_values : array | None
If specified, sets the time_values array for the new TSeries.
kwds :
All extra keyword arguments will overwrite metadata properties.
These include dt, sample_rate, t0, start_time, units, and
others.
"""
if data is None:
data = self.data.copy()
if time_values is None:
tval = self._time_values
if tval is not None:
tval = tval.copy()
else:
tval = time_values
meta = self._meta.copy()
meta.update(kwds)
return TSeries(data, time_values=tval, recording=self.recording, **meta)
@property
def parent(self):
return self.recording
def __getitem__(self, item):
if isinstance(item, slice):
return TSeriesView(self, item)
else:
raise TypeError("Invalid TSeries slice: %r" % item)
def downsample(self, n=None, f=None):
"""Return a downsampled copy of this trace.
Parameters
----------
n : int
(optional) number of samples to average
f : float
(optional) desired target sample rate
"""
if not self.regularly_sampled:
raise TypeError("downsample requires regularly-sampled data.")
# choose downsampling factor
if None not in (f, n):
raise TypeError("Must specify either n or f (not both).")
if n is None:
if f is None:
raise TypeError("Must specify either n or f.")
n = int(np.round(self.sample_rate / f))
if abs(n - (self.sample_rate / f)) > 1e-6:
raise ValueError("Cannot downsample to %gHz; the resulting downsample factor is not an integer (try TSeries.resample instead)." % f)
if n == 1:
return self
if n <= 0:
raise Exception("Invalid downsampling factor: %d" % n)
# downsample
data = downsample(self.data, n, axis=0)
# handle timing
tvals = self._time_values
if tvals is not None:
tvals = tvals[::n]
dt = self._meta['dt']
if dt is not None:
dt = dt * n
sr = self._meta['sample_rate']
if sr is not None:
sr = float(sr) / n
return self.copy(data=data, time_values=tvals, dt=dt, sample_rate=sr)
def resample(self, sample_rate):
"""Return a resampled copy of this trace.
Parameters
----------
sample_rate : float
The new sample rate of the returned TSeries
Notes
-----
Lowpass filter followed by linear interpolation to extract new samples.
Uses a bessel filter to avoid ringing artifacts, with cutoff=sample_rate
and order=2 chosen to yield decent antialiasing and minimal blurring.
scipy.resample was avoided due to ringing and edge artifacts.
"""
if self.sample_rate == sample_rate:
return self
if not self.regularly_sampled:
raise TypeError("resample requires regularly-sampled data.")
ns = int(np.round(len(self) * sample_rate / self.sample_rate))
# scipy.resample causes ringing and edge artifacts (freq-domain windowing
# did not seem to help)
# data = scipy.signal.resample(self.data, ns)
# bessel filter gives reasonably good antialiasing with no ringing or edge
# artifacts
from .filter import bessel_filter
filt = bessel_filter(self, cutoff=sample_rate, order=2)
t1 = self.time_values
t2 =
|
np.arange(t1[0], t1[-1], 1.0/sample_rate)
|
numpy.arange
|
"""
Created on Thu Oct. 10 2019
Recent changes for the version 0.1.1:
1) Insead of giving the input optical penetration depth only give the input
of the complex refractive index "n". This is a material parameter, so
the input is given in the simulation --> add_layer(.) command.
Now "LB" and "TMM" source are initialized almost in the same way
2) One of the Outputs of sim.run() is T. But now we change it to be a
3 dimensional array, with dim0 = time; dim1 = space; dim2 = subsystem
3) The input for the visual class in the v.contour() function should not be
a string but just numbers corresponding to different systems.
@author: <NAME>
<EMAIL>
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from bspline import Bspline
from bspline.splinelab import aptknt
import time
from matplotlib.animation import FuncAnimation as movie
from tqdm import tqdm #Progressbar
#==============================================================================
class temperature(object):
def __init__(self):
self.plt_points = 30 #number of points in x grid
self.length = np.array([0,0]) #length of x space,starting from 0
self.Left_BC_Type = 1 #Boundary conditions Default is Neumann
self.Right_BC_Type = 1 #1=> Neumann; 0=> Dirichlet
self.init = lambda x: 300+0*x # initial temperature of probe
self.n = np.array([1,1],dtype=complex) # Initial refractive index air|...|air
self.conductivity = [1] #This gets deleted after initialisation
self.heatCapacity = [1] #those values are just here to make space
self.rho = [1] #Actual values are given, when 'addLayer(length, conductivity,heatCapacity,rho)' is executed
self.collocpts = 12
self.setup = False #first time setup to not double calculated
def getProperties(self): #to depict the properties of the object
for i in (self.__dict__):
name = str(i)
value = str(self.__dict__[i])
print('{:<20}{:>10}'.format( name,value ))
def __repr__(self):
return('Temperature')
#for every layer, a function to calculate the derivative of k(T)
def diff_conductivity(self,phi,num_of_material):
eps =1e-9
dc = (self.conductivity[num_of_material](phi+eps)-self.conductivity[num_of_material](phi))/eps
return(dc)
#Creating the key matrices for B-splines. Those are A0,A1,A2
#A0 => Zero derivative; A1 => 1st order derivative....
#We create the matices for every layer, with respective length ect
#then we put them together to Abig => Boundary and interface conditions are applied here.
def Msetup(self):
#Deleting the ifrst element of the default initialization
#After creating the element with 'addLayer' we dont need them!
if not self.setup:
self.length = self.length[1:]
self.conductivity = self.conductivity[1:]
self.heatCapacity = self.heatCapacity[1:]
self.rho = self.rho[1:]
self.setup = True
#Length and numper of grid points for each respective layer
length = self.length
plt_points = self.plt_points
num_of_points = self.collocpts #Number of points per layer used in the spline for collocation
order = 5 #order of the spline
x = np.array(np.zeros([np.size(length)-1,num_of_points]))
x_plt = np.array(np.zeros([np.size(length)-1,plt_points]))
knot_vector = np.array(np.zeros([np.size(length)-1,num_of_points+order+1]))
basis = np.array(np.zeros(np.size(length)-1))
A0h = []; A1h = []; A2h = []; Ch = [];
LayerMat = np.array([np.zeros((num_of_points,num_of_points))])
#Create all the big matices A0,A1,A2 & C. C is used to map on a fine mesh in x- space.
#For every layer we set up splines between the boundaries
for i in range(0,np.size(length)-1):
x[i,:] = np.linspace(length[i], length[i+1] , num_of_points)
x_plt[i,:] = np.linspace(length[i], length[i+1] , plt_points)
knot_vector[i,:] = aptknt(x[i,:], order) #prepare for Spline matrix
basis = Bspline(knot_vector[i,:],order)
A0hinter = basis.collmat(x[i,:], deriv_order = 0); A0hinter[-1,-1] = 1
A1hinter = basis.collmat(x[i,:], deriv_order = 1); A1hinter[-1] = -np.flip(A1hinter[0],0)
A2hinter = basis.collmat(x[i,:], deriv_order = 2); A2hinter[-1,-1] = 1
Chinter = basis.collmat(x_plt[i,:], deriv_order = 0); Chinter[-1,-1] = 1
LayerMat = np.append(LayerMat,np.array([np.dot(A2hinter,np.linalg.inv(A0hinter))]),axis = 0)
A0h = np.append(A0h,A0hinter)
A1h = np.append(A1h,A1hinter)
A2h = np.append(A2h,A2hinter)
Ch = np.append(Ch,Chinter)
#Reshape the long string of appended Matrix, such that
#rows: x-points; colums: i´th basis spline
LayerMat = LayerMat[1:,:,:]
A0h = np.reshape(A0h, (-1,num_of_points))
A1h = np.reshape(A1h, (-1,num_of_points))
A2h = np.reshape(A2h, (-1,num_of_points))
Ch = np.reshape(Ch,(-1,num_of_points))
#Ch => More points in x, but same number of basis splines
#Clearing the interface points, to not double count
N = num_of_points
plp = plt_points
interfaces = np.shape(x)[0]-1
sizeA = np.shape(x)[0]*N-interfaces
sizeCb = np.shape(x)[0]*plp-interfaces
Abig = np.zeros([sizeA,sizeA])
A1b = np.zeros([sizeA,sizeA])
A2b = np.zeros([sizeA,sizeA])
Cb = np.zeros([sizeCb,sizeA])
#Clearing the double counts from the space grid
xflat = x.flatten()
x_plt_flat = x_plt.flatten()
#index of double counts
doublec = np.array([np.arange(1,len(length)-1)])*N
doublec_plt = np.array([np.arange(1,len(length)-1)])*plp
xflat = np.delete(xflat,doublec)
x_plt_flat = np.delete(x_plt_flat,doublec_plt)
#Filling the big matrices.
startA = 0; endA = N-1
startC = 0; endC = plp-1
for i in range(0,interfaces+1):
Abig[startA:endA,startA:endA+1] = A0h[startA+i:endA+i,:]
A1b[startA:endA+1,startA:endA+1] = A1h[startA+i:endA+i+1,:]
A2b[startA:endA+1,startA:endA+1] = A2h[startA+i:endA+i+1,:]
Cb[startC:endC+1,startA:endA+1] = Ch[startC+i:endC+i+1,:]
startA += N-1; endA += N-1
startC += plp-1; endC += plp-1
#Create A00 with no interface condition to correctly compute phi in loop
#The copy needs to be done befor interface conditions are applied in Abig
A00 = Abig.copy()
A00[-1,-1] = 1;
#Here we make init, conductivity & capacity all functions, in case they are
# given as integeres or floats. Also thorw warinings if not every layer has a
# conducitvity or capacity ============================================
#Making init a function, in case it is given as a scalar
if np.size(self.init) == 1 and isinstance(self.init,(int,float)):
dummy = self.init
self.init = lambda x: dummy + 0*x
if len(length) > 2: #multilayer case
if len(length)-1 !=( len(self.heatCapacity) & len(self.conductivity) ):
print('--------------------------------------------------------')
print('The number of different layers must match the number of number of' \
'inputs for Conductivity, heatCapacity, rho.')
print('--------------------------------------------------------')
if np.size(self.conductivity) is not interfaces+1:
print('--------------------------------------------------------')
print('Not every Layer has been given a conductivity function' \
'Adjust your input of the conductivity functions with respect to the layers.')
print('--------------------------------------------------------')
if np.size(self.heatCapacity) is not interfaces+1:
print('--------------------------------------------------------')
print('Not every Layer has been given a heatCapacity function value.'\
'Adjust your input of the heatCapacity functions with respect to the layers.')
print('--------------------------------------------------------')
#Make Functions in case heat capacity/conductivity are given as variables
if (all(self.conductivity) or all(self.heatCapacity) or all(self.init)) == False:
print('No heatCapacity, conductivity or initial function given.')
print('--------------------------------------------------------')
#make the conductivity always a function
if len(length) >2 or np.size(self.conductivity)>=2:
for j in list(range (0,np.size(self.conductivity))):
if isinstance(self.conductivity[j],(int,float,list)) :
dummy3 = self.conductivity[j]
self.conductivity[j] = (lambda b: lambda a: b+0*a)(dummy3)
#make the conductivity always a function
for j in list(range (0,np.size(self.heatCapacity))):
if isinstance(self.heatCapacity[j],(int, float,list)) :
dummy4 = self.heatCapacity[j]
self.heatCapacity[j] = (lambda b: lambda a: b+0*a)(dummy4)
else :
if isinstance(self.conductivity[0],(int,float)):
dummy1 = self.conductivity
self.conductivity = [lambda phi: dummy1 + 0*phi]
if isinstance(self.heatCapacity[0],(int,float)):
dummy2 = self.heatCapacity
self.heatCapacity = lambda phi: dummy2 + 0*phi
self.heatCapacity = [self.heatCapacity]
#End of function creation for init(x), conductivity[l](phi), heatCapacity[l](phi)
# with respect to every layer 'l' =====================================
def interconditions(phi,interfaces):
N = num_of_points
end_i = N-1
intercondiL = np.zeros((interfaces,N))
intercondiR = np.zeros((interfaces,N))
for i in range(interfaces):
intercondiL[i] = self.conductivity[i](phi[end_i])*A1h[end_i+i]
intercondiR[i] = self.conductivity[i+1](phi[end_i])*A1h[end_i+i+1]
end_i += N-1
return(intercondiL,intercondiR)
#Initial Electron temperature
initphi = self.init(xflat)
initphi_large = self.init(x_plt_flat)
intercon = interconditions(initphi,interfaces)
#filling up Abig wiht the interface condition in the middle of the grid
start_i = 0; end_i = N-1
for i in range(0,interfaces):
Abig[end_i,start_i:end_i] = intercon[0][i][:-1]#Lhs interface flow
Abig[end_i,end_i+1:end_i+N] = -intercon[1][i][1:]#Rhs interface flow
Abig[end_i,end_i] = intercon[0][i][-1] -intercon[1][i][0]
start_i += N-1; end_i += N-1
Abig[-1,-1] = 1 #to correct Cox algorithm
#Now Matrix Abig is completed and interface condition is applied.
#Treating 2 types of boundary conditions: 0=> Dirichlet; 1=> Neumann,
# where 0´th and -1´th row need to be first order derivatives for flux.
neumannBL = A1b[0].copy();
neumannBR = A1b[-1].copy();
if self.Left_BC_Type == 1: Abig[0] = -neumannBL
if self.Right_BC_Type == 1: Abig[-1] = neumannBR
#Clear for BC! (first and last row need to be cleared to correctly apply BC)
A1b[0] = 0; A2b[0] = 0;
A1b[-1] = 0; A2b[-1] = 0;
#Get inital c coefficients for splines using init (=phi_init)
c = np.dot(np.linalg.inv(A00),self.init(xflat))
#Passed on properties to the simulation class
return(c,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h)
def addLayer(self,L,refind,conductivity,heatCapacity,rho):
"""
Add parameters of every layer:
(length,conductivity[electron,lattice,spin],heatCapacity[electron, lattice, spin],density, coupling[E-L,L-S,S-E])
The units in SI are:
[length] = m
[n] = complex refractive index
[conductivity] = W/(mK)
[heatCapacity] = J/(m^3K^2)
[density] = kg/m^3
[Coupling] = W/(m^3K)
"""
self.length = np.append(self.length,self.length[-1]+L)
#Squeez in the refracitve index between two layers of air: air|...|...|air
self.n = np.concatenate((self.n[:-1],[refind],[self.n[-1]]))
self.conductivity.append(conductivity)
self.heatCapacity.append(heatCapacity)
self.rho = np.append(self.rho,rho)
#==============================================================================
class simulation(object):
def __init__(self,num_of_temp,source):
self.temp_data = temperature() #import the temperatuer object
self.num_of_temp = num_of_temp #1 if only electron temp. 2 if electron and lattice temp.
self.start_time = 0 #starting time (can be negative)
self.final_time = 10 #time when simulation stops
self.time_step = [] #can either be given or is automatically calculated in stability
self.left_BC = 0 #function or constant what the boundary condition
self.right_BC = 0 #on the left or right side of the problem is.
self.stability_lim = [270,3000]
self.temp_data_Lat = [] #Default case is without lattice temperature
self.temp_data_Spin = []
if num_of_temp >= 2: #if Lattice temp is considered
self.temp_data_Lat = temperature() #in case also a lattice module is given
self.coupling = [] #Coupling between Electron and Lattice system
self.left_BC_L = 0 #Setting the default to zero flux
self.right_BC_L = 0 #The BC type is indicated in the temperature class
if num_of_temp == 3: #In case spin coupling is also considered
self.temp_data_Spin = temperature()
self.coupling_LS = [] #Coupling between Lattice and Spin system
self.coupling_SE = [] #Coupling between Electron and Spin system
self.left_BC_S = 0 #Default zero flux Neumann boundary conditions
self.right_BC_S = 0 #On both sides
self.source = source #object source can be passed on
#to depict the properties of the object
def getProperties(self):
for i in (self.__dict__):
name = str(i)
value = str(self.__dict__[i])
print('{:<20}{:>10}'.format( name,value ))
def __repr__(self):
return('Simulation')
def changeInit(self,system,function):
"""
Change the initial condition of every system.
.changeInit(system,function) has 2 input arguments
system --> string "electron" or "lattice" or "spin"
function --> a function handle or a number defining the value of the
system at t=0 over the entire domain x.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
self.temp_data.init = function
if (system == "lattice") or (system == "Lattice") or (system == 2):
self.temp_data_Lat.init = function
if (system == "spin") or (system == "Spin") or (system == 3):
self.temp_data_Spin = function
def changeBC_Type(self,system,side,BCType):
"""
Function to change the type of the boundary condition on the left and
right side of the material, for every system, "electron", "lattice", "spin"
respectively.
.changeBC_Type(system,side,BCType) has 3 inputs, all of them are strings.
system --> "electron" or "lattice" or "spin". Altenatively: "1", "2", "3"
side --> "left" or "right"
BCType --> "dirichlet" fixing the value/ "neumann" fixing the flux.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data.Right_BC_Type = 1
if (system == "lattice") or (system == "Lattice") or (system == 2):
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Lat.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Lat.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Lat.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Lat.Right_BC_Type = 1
if (system == "spin") or (system == "Spin") or (system == 3):
print("Line 326 Spinsystem")
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Spin.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Spin.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Spin.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Spin.Right_BC_Type = 1
def changeBC_Value(self,system,side,function):
"""
Function to change the value of the boundary condition on the left and
right side of the material, for every system, "electron", "lattice", "spin"
respectively.
.changeBC_Value(system,side,function) the first two are strings,
the last one is a function handle or a number.
system --> "electron" or "lattice" or "spin"| Altenatively: "1", "2", "3"
side --> "left" or "right"
function--> function or number fixing the value on the boundaries for all times.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
if side == "left":
self.left_BC = function
if side == "right":
self.right_BC = function
if (system == "lattice") or (system == "Lattice") or (system == 2):
if side == "left":
self.left_BC_L = function
if side == "right":
self.right_BC_L = function
if (system == "spin") or (system == "Spin") or (system == 3):
if side == "left":
self.left_BC_S = function
if side == "right":
self.right_BC_S = function
def addSubstrate(self,name = "silicon"):
"""
Automatically create in the silicon substrate using input
parameters, mostly taken from:
Contribution of the electron-phonon interaction
to Lindhard energy partition at low energy in Ge and Si
detectors for astroparticle physics applications, by
<NAME> and <NAME>
Note: Refractive index for 400 nm light!
"""
if (name == "Silicon") or (name =="silicon") or (name =="Si"):
k_el_Si = 130#W/(m*K);
k_lat_Si = lambda T: np.piecewise(T,[T<=120.7,T>120.7],\
[lambda T: 100*(0.09*T**3*(0.016*np.exp(-0.05*T)+np.exp(-0.14*T))),
lambda T: 100*(13*1e3*T**(-1.6))])
rho_Si = 2.32e3#kg/(m**3)
C_el_Si = lambda Te: 150/rho_Si *Te
C_lat_Si = 1.6e6/rho_Si
G_Si = 1e17*18#W/(m**3*K)
#Set three layers of Silicon after each other.
#The space resolution on the Film|Substrate edge is high
#and decreases as one moves in bulk direction
if self.num_of_temp == 2:#Lattice only in the 2T
self.temp_data_Lat.addLayer(20e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
self.temp_data_Lat.addLayer(100e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
self.temp_data_Lat.addLayer(100000e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
#In the 1 and 2 temperature case electron always gets appended
self.temp_data.addLayer(20e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
self.temp_data.addLayer(100e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
self.temp_data.addLayer(100000e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
def addLayer(self,L,n,conductivity,heatCapacity,rho,coupling=0,*args):
"""
Add parameters of every layer:
(length,conductivity[electron,lattice,spin],heatCapacity[electron, lattice, spin],density, coupling[E-L,L-S,S-E])
The units in SI are:
[length] = m
[n] = complex refractive index
[conductivity] = W/(mK)
[heatCapacity] = J/(m^3K^2)
[density] = kg/m^3
[Coupling] = W/(m^3K)
"""
#check all input arguments and make them to lists, for the multi layer case
#make list when given as int or float
typecheck = np.array([])
if type(conductivity) is not (list or type(typecheck)):
conductivity = [conductivity]
if type(heatCapacity) is not (list or type(typecheck)):
heatCapacity = [heatCapacity]
#do typecheck only for the lattice system in the 2TM-case
if self.num_of_temp == 2:
if (np.size(conductivity) or np.size(heatCapacity))<2:
print('Lattice parameters are missing.\n Add parameters for Lattice system.')
return(128)
self.temp_data_Lat.addLayer(L,n,conductivity[1],heatCapacity[1],rho)
#Only electron spin coupling is under consideration
self.coupling = np.append(self.coupling,coupling)
#do typecheck for the Lattice and the Spin system
if self.num_of_temp == 3:
if (np.size(conductivity) or np.size(heatCapacity) or np.size(coupling))<3:
print('Input parameters are missing.\n Add parameters for '\
'conductivity/heatCapacity or coupling for Lattice/Spin system.')
return(128)
self.temp_data_Lat.addLayer(L,n,conductivity[1],heatCapacity[1],rho)
self.temp_data_Spin.addLayer(L,n,conductivity[2],heatCapacity[2],rho)
#In the 3Tm case the coupling input arg is a vector of len 3. Unwrap them:
self.coupling = np.append(self.coupling,coupling[0])
self.coupling_LS = np.append(self.coupling_LS,coupling[1])
self.coupling_SE = np.append(self.coupling_SE,coupling[2])
#For the electronic system always add the parameters!
self.temp_data.addLayer(L,n,conductivity[0],heatCapacity[0],rho)
def interconditions(self,phi,interfaces,conductivity,N,A1h):
"""
A function which gives back an array where the intereface condition is returned
for the left and right side of the interface. Gets called in the E.E.-loop.
"""
end_i = N-1
intercondiL = np.zeros((interfaces,N))
intercondiR = np.zeros((interfaces,N))
for i in range(interfaces):
intercondiL[i] = conductivity[i](phi[end_i])*A1h[end_i+i]
intercondiR[i] = conductivity[i+1](phi[end_i])*A1h[end_i+i+1]
end_i += N-1
return(intercondiL,intercondiR)
def sourceprofile(self,absorptionprofile,timeprofile,xflat,x0,t,N):
#Consider Lambert Beers law in space and different types in time
if (absorptionprofile == "LB") and (self.source.fluence is not 0):
optical_penetration_depth = self.source.ref2delta(self.temp_data.n,self.source.lambda_vac)
if (timeprofile == "Gaussian"):
print('-----------------------------------------------------------')
print('Lambert Beer´s absorption law and a Gaussian time profile is applied as source.')
print('-----------------------------------------------------------')
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian)
if (timeprofile == "repGaussian") or (timeprofile == "RepGaussian"):
print('-----------------------------------------------------------')
print('Lambert Beer absorption profile and a repeated Gaussian time profile is taken into account for the source.'\
'The frequency of the pulse repetition has to be indicated via s.frequency = number (in 1/seconds).')
print('-----------------------------------------------------------')
self.source.multipulse = True
xmg, tmg = np.meshgrid(xflat,t)
if (self.source.frequency is not False):
time_range = tmg[-1,-1]-self.source.t0
pulses = int(round(time_range * self.source.frequency))
#Add up Gaussian pulses with different t0, according to the frequency given
#from t0 onwards, until the end of the time grid
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 + i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime)
if(self.source.frequency is not False) and (self.source.num_of_pulses is not False):
#Creating a certain number of pulses according to self.num_of_pulses
time_range = tmg[-1,-1]-self.source.t0
pulses = self.source.num_of_pulses
#If num_of_pulses is bigger too big to fit in the timerange [t0,t_end] throw warning
if (pulses > int(round(time_range * self.source.frequency))):
pulses = int(round(time_range * self.source.frequency))
print('Number of pulses is too big to fit in the timerange under consideration. \n'\
'Adjust t_end or consider a smaller number of pulses.')
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 +i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime)
if(self.source.frequency is False) and (self.source.num_of_pulses is False):
print('-----------------------------------------------------------')
print('Assign the propertiy s.frequncy, to consider a certain pulse frequency.\n'\
'If only a certain number of pulses should be considered, assign the value s.num_of_pulses = integer.')
print('-----------------------------------------------------------')
if (timeprofile == "custom") or (timeprofile == "Custom"):
[ttime,amplitude] = self.source.loadData
#To extract the custom time profile and the scaling factor
[sourcemat,customtime,scaling] = self.source.custom(t,xflat,ttime,amplitude,optical_penetration_depth[0])
#To get the space profile: Source with different optical penetration depth defined on the xflat gird
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime,scaling)
#Consider Transfer Matrix in space and different types in time
if (absorptionprofile == "TMM") and (self.source.fluence is not 0):
"""
This will implement a transfer matrix approach to local absorption
instead as using the Lambert Beer´s law considered in the Gaussian
source type.
"""
#Multiplying with 1e9, since the absorption()-function. In the source module only works if length is in units of nm!
x0m = x0*1e9#converte the lentgh into nm
if len(x0) is not (len(self.temp_data.n)-1):
print('-----------------------------------------------------------')
print('Number of considered layers does not match with given refractive indices.\n'\
'in ´temperature.n(Air|Film layer1|Film layer2|...|Air)´ anly consider the film layers. \n'\
'The refractive index of the substrate gets added automatically later when \n'\
'`simulation.addSubstrate(\'name\')` gets called.')
print('-----------------------------------------------------------')
if (timeprofile == "Gaussian"):
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m)
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile and a Gaussian time profile is taken into account for the source.\n'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
if (timeprofile == "custom") or (timeprofile == "Custom"):
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile of and a custom time profile is taken into account for the source.\n'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
if self.source.loadData is False:
print('-----------------------------------------------------------')
print('Import an array, containing the data of the custom pulse.'\
'arr[0,:] = time; arr[1,:] = amplitude')
print('-----------------------------------------------------------')
[ttime,amplitude] = self.source.loadData
lam = 1#Lamda does not matter here since the spacial absorption is calculated via TMM
[sourceM,customtime,scaling] = self.source.custom(t,xflat,ttime,amplitude,lam)
#The cfeateTMM(xgrid,timegrid,length,*args) has customtime as an optional argument
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime,scaling)
if (timeprofile == "RepGaussian") or (timeprofile== "repGaussian"):
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile and a repeated Gaussian time profile is taken into account for the source.'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
self.source.multipulse = True
xmg, tmg = np.meshgrid(xflat,t)
if (self.source.frequency is not False):
time_range = tmg[-1,-1]-self.source.t0
pulses = int(round(time_range * self.source.frequency))
#Add up Gaussian pulses with different t0, according to the frequency given
#from t0 onwards, until the end of the time grid
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 + i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime)
if(self.source.frequency is not False) and (self.source.num_of_pulses is not False):
#Creating a certain number of pulses according to self.num_of_pulses
time_range = tmg[-1,-1]-self.source.t0
pulses = self.source.num_of_pulses
#If num_of_pulses is bigger too big to fit in the timerange [t0,t_end] throw warning
if (pulses > int(round(time_range * self.source.frequency))):
pulses = int(round(time_range * self.source.frequency))
print('Number of pulses is too big to fit in the timerange under consideration. \n'\
'Adjust t_end or consider a smaller number of pulses.')
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 +i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime)
if(self.source.frequency is False) and (self.source.num_of_pulses is False):
print('-----------------------------------------------------------')
print('Assign the propertiy s.frequncy, to consider a certain pulse frequency.\n'\
'If only a certain number of pulses should be considered, assign the value s.num_of_pulses = integer.')
print('-----------------------------------------------------------')
return(sourceM)
# This is the main Explicit Euler loop where the solution to T(x,t) is calculated.
def run(self):
idealtimestep = self.stability()
if not self.time_step:
self.time_step = idealtimestep
print('-----------------------------------------------------------')
print(' No specific time constant has been indicated. \n '\
'The stability region has been calculated and an appropriate timestep has been chosen.\n '\
'Timestep = {idealtimestep:.2e} s'.format(idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
if (self.time_step-idealtimestep)/idealtimestep > 0.1:
print('-----------------------------------------------------------')
print('The manually chosen time step of {time_step:.2e} is eventually too big and could cause instabilities in the simulation.\n '\
'We suggest a timestep of {idealtimestep:.2e} s'.format(time_step=self.time_step,idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
if(self.time_step-idealtimestep)/idealtimestep < -0.2:
print('-----------------------------------------------------------')
print('The maunually chosen time step of {time_step:.2e} is very small and will eventually cause a long simulation time.\n'\
'We suggest a timestep of {idealtimestep:.2e} s'.format(time_step=self.time_step,idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
#loading simulation relevant properties from the structural tmeperature object
[c_E,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h] = self.temp_data.Msetup()
t = np.arange(self.start_time,self.final_time,self.time_step)
#only if the injection would make the time grid smaller, to not move into instable regime
if self.source.FWHM:
if (6*self.source.FWHM/200 < idealtimestep):
#inject 200 extra points around pulse to fully capture the shape of the pulse
tinj = np.linspace(self.source.t0 - 3*self.source.FWHM,self.source.t0 + 3*self.source.FWHM,200)
smaller = np.where(t<self.source.t0 - 3*self.source.FWHM)[0]
bigger = np.where(t>self.source.t0 + 3*self.source.FWHM)[0]
#new time grid with higher resolution
t = np.concatenate((t[smaller],tinj,t[bigger]),axis=0)
tstep = np.ones(len(t))
tstep[:-1] = np.diff(t); tstep[-1] = np.diff(t)[-1]
#If a more refined grid is choosen around t0. We inject a fine time grid around t0, to correctly capture the pulse shape
if self.source.adjusted_grid is not False:
if self.source.dt0 == False:
print('-----------------------------------------------------------')
print('The option for an adjusted grid is True, but no interval for a more refined grid has been given.'/
'Indicate dt0 (around which values the time grid should have higher resolution) in the source object')
print('-----------------------------------------------------------')
if 2*self.source.dt0/self.source.extra_points < idealtimestep:
print('-----------------------------------------------------------')
print('A refined Grid around t0 has been applied')
print('-----------------------------------------------------------')
tinj = np.linspace(self.source.t0-self.source.dt0,self.source.t0+self.source.dt0,self.source.extra_points)
smaller = np.where(t<self.source.t0 - self.source.dt0)[0]
bigger = np.where(t>self.source.t0 + self.source.dt0)[0]
#new time grid with higher resolution
t = np.concatenate((t[smaller],tinj,t[bigger]),axis=0)
tstep = np.ones(len(t))
tstep[:-1] = np.diff(t); tstep[-1] = np.diff(t)[-1]
else:
print('-----------------------------------------------------------')
print('No refined time grid is applied. The timestep is alerady very small.' \
'You can use the simulation class with the property self.time_step and '\
'assign it to a smaller value as the current time step.')
print('-----------------------------------------------------------')
#Initialize the systems and load the matrices
if self.temp_data_Lat:
if self.temp_data.plt_points is not self.temp_data_Lat.plt_points:
self.temp_data_Lat.plt_points = self.temp_data.plt_points
print('-----------------------------------------------------------')
print('The number of plotting points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
if self.temp_data.collocpts is not self.temp_data_Lat.collocpts:
self.temp_data_Lat.collocpts = self.temp_data.collocpts
print(self.temp_data_Lat.collocpts)
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c_L,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large_L,interfaces,LayerMat,A1h] = self.temp_data_Lat.Msetup()
if self.temp_data_Spin:
print("Line 728 Spinsystem")
if self.temp_data.plt_points is not self.temp_data_Spin.plt_points:
self.temp_data_Spin.plt_points = self.temp_data.plt_points
print('-----------------------------------------------------------')
print('The number of plotting points in the electron system \n'\
'is not the same as in the spin system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
if self.temp_data.collocpts is not self.temp_data_Spin.collocpts:
self.temp_data_Spin.collocpts = self.temp_data.collocpts
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the spin system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c_S,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large_S,interfaces,LayerMat,A1h] = self.temp_data_Spin.Msetup()
if (self.source.fluence == 0):
print('-----------------------------------------------------------')
print('No source is applied.\n'\
'source.fluence = 0')
print('-----------------------------------------------------------')
xmg, tmg = np.meshgrid(xflat,t)
sourceM = np.zeros_like(xmg)
else:
sourceM = self.sourceprofile(self.source.spaceprofile,self.source.timeprofile,xflat,self.temp_data.length,t,N)
#Making the boundary conditions a function of t, in case they are given as scalars
if isinstance(self.left_BC,(int,float)):
dummy = self.left_BC
self.left_BC = lambda t: dummy + 0*t
if isinstance(self.right_BC,(int,float)):
dummy1 = self.right_BC
self.right_BC = lambda t: dummy1 + 0*t
#Makint the boundary conditions a matrix for the electron case
BC_E = np.zeros((len(c_E),len(t)))
BC_E[0] = self.left_BC(t)
BC_E[-1] = self.right_BC(t)
#Checking the Lattice system boundary conditions
if self.temp_data_Lat:
if isinstance(self.left_BC_L,(int,float)):
dummy2 = self.left_BC_L
self.left_BC_L = lambda t: dummy2 + 0*t
if isinstance(self.right_BC_L,(int,float)):
dummy3 = self.right_BC_L
self.right_BC_L = lambda t: dummy3 + 0*t
#Makint the boundary conditions a matrix for the lattice case
BC_L = np.zeros((len(c_L),len(t)))
BC_L[0] = self.left_BC_L(t)
BC_L[-1] = self.right_BC_L(t)
#Checking the Spine system boundary conditions
#It impies that we at least consider 2 temperatures -> under this "if-tree"
if self.temp_data_Spin:
if isinstance(self.left_BC_S,(int,float)):
dummy4 = self.left_BC_S
self.left_BC_S = lambda t: dummy4 + 0*t
if isinstance(self.right_BC_S,(int,float)):
dummy5 = self.right_BC_S
self.right_BC_S = lambda t: dummy5 + 0*t
#Makint the boundary conditions a matrix for the Spin case
BC_S = np.zeros((len(c_S),len(t)))
BC_S[0] = self.left_BC_S(t)
BC_S[-1] = self.right_BC_S(t)
#Check if the Lattice/Spin and Spin/Electron coupling constants have the right size
if np.size(self.coupling_LS)<np.size(length)-1:
self.coupling_LS = self.coupling_LS*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique Lattice-Spin coupling constant \'G_LS \'.\n')\
('=> G_LS will be set to the value of the first layer = {coupling_LS[0]:.2e}\n for all other layers.'.format(coupling_LS=self.coupling_LS))
print('-----------------------------------------------------------')
if np.size(self.coupling_SE)<np.size(length)-1:
self.coupling_SE = self.coupling_SE*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique Spin-Electron coupling constant \'G_SE \'.\n')\
('=> G_SE will be set to the value of the first layer = {coupling_SE[0]:.2e}\n for all other layers.'.format(coupling_SE=self.coupling_SE))
print('-----------------------------------------------------------')
#If only the two temperature model is considered I only need to check one coupling constant
if np.size(self.coupling)<np.size(length)-1:
self.coupling = self.coupling*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique coupling constant \'G \'.\n')\
('=> G will be set to the value of the first layer = {coupling[0]:.2e}\n for all other layers.'.format(coupling=self.coupling))
print('-----------------------------------------------------------')
# The 3 Temperature Case is being considered
if self.temp_data_Spin:
#Setup arrays for electron temperature
phi_E = np.zeros((len(t),len(x_plt_flat))); phi_E[0] = initphi_large
Flow_1E = np.zeros(len(c_E))
Flow_2E = np.zeros(len(c_E))
dphi_E = np.zeros(len(c_E))
intphi_E = np.zeros(len(c_E))
#Setup arrays for lattice temperature
phi_L = np.zeros((len(t),len(x_plt_flat))); phi_L[0] = initphi_large_L #300*np.ones(len(phi_L[0]))
Flow_1L = np.zeros(len(c_L))
Flow_2L = np.zeros(len(c_L))
dphi_L = np.zeros(len(c_L))
intphi_L = np.zeros(len(c_L))
#Setup arrays for the spin temperature
phi_S = np.zeros((len(t),len(x_plt_flat))); phi_S[0] = initphi_large_S #300*np.ones(len(phi_L[0]))
Flow_1S = np.zeros(len(c_S))
Flow_2S = np.zeros(len(c_S))
dphi_S = np.zeros(len(c_S))
intphi_S = np.zeros(len(c_S))
#General setup for E.E. loop
condi = np.array([np.arange(1,len(length)-1)])*(N-1) #Index to apply interface condition
cnfill = np.array([np.arange(1,len(length)-1)])*(plp-1)#correct interface condition with real value for phi
A00[0] = 1; A00[-1] = 1 #Avoide devide through 0 in dphi_L! Clar for BC before intphi calc.
Abig_E = np.copy(Abig) #Since Abig can change due to interconditions we split it here
Abig_L = np.copy(Abig) #The interface conditions are applied on every time step
Abig_S = np.copy(Abig) #Every system gets individual matrix
start_EL = time.time()
for i in tqdm(range(1,len(t)),position = 0):
#Calculate Solution at every time step and respective derivatives
phi0_E = np.dot(A00,c_E); phi1_E = np.dot(A1b,c_E); phi2_E = np.dot(A2b,c_E)
phi0_L = np.dot(A00,c_L); phi1_L = np.dot(A1b,c_L); phi2_L = np.dot(A2b,c_L)
phi0_S = np.dot(A00,c_S); phi1_S = np.dot(A1b,c_S); phi2_S = np.dot(A2b,c_S)
#Calculating interface conditions which are applied later
intercon_E = self.interconditions(phi_E[i-1],interfaces,self.temp_data.conductivity,N,A1h)
intercon_L = self.interconditions(phi_L[i-1],interfaces,self.temp_data_Lat.conductivity,N,A1h)
intercon_S = self.interconditions(phi_S[i-1],interfaces,self.temp_data_Spin.conductivity,N,A1h)
startf = 0;endf = N-1
#Construct all picewise flows and piecewise dphi. Iterate over layers
for j in range(0,interfaces+1):
#electron: d/dx[k(phi) * d/dx(phi)]
Flow_1E[startf:endf] = self.temp_data.diff_conductivity(phi0_E[startf:endf],j)
Flow_2E[startf:endf] = self.temp_data.conductivity[j](phi0_E[startf:endf])
Flow_1E[startf:endf] *=phi1_E[startf:endf]**2
Flow_2E[startf:endf] *= phi2_E[startf:endf]
#lattice
Flow_1L[startf:endf] = self.temp_data_Lat.diff_conductivity(phi0_L[startf:endf],j)
Flow_2L[startf:endf] = self.temp_data_Lat.conductivity[j](phi0_L[startf:endf])
Flow_1L[startf:endf] *=phi1_L[startf:endf]**2
Flow_2L[startf:endf] *= phi2_L[startf:endf]
#Spin
Flow_1S[startf:endf] = self.temp_data_Spin.diff_conductivity(phi0_S[startf:endf],j)
Flow_2S[startf:endf] = self.temp_data_Spin.conductivity[j](phi0_S[startf:endf])
Flow_1S[startf:endf] *=phi1_S[startf:endf]**2
Flow_2S[startf:endf] *= phi2_S[startf:endf]
#calculate delta phi for electron, lattice and spin
#This is the core of the problem
dphi_E[startf:endf] = 1/(self.temp_data.heatCapacity[j](phi0_E)[startf:endf]*self.temp_data.rho[j])*\
(Flow_1E[startf:endf]+Flow_2E[startf:endf]+sourceM[i,startf:endf] +\
self.coupling[j]*(phi0_L[startf:endf]-phi0_E[startf:endf])+self.coupling_SE[j]*(phi0_S[startf:endf]-phi0_E[startf:endf]))
#Lattice time derivative
dphi_L[startf:endf] = 1/(self.temp_data_Lat.heatCapacity[j](phi0_L)[startf:endf]*self.temp_data_Lat.rho[j])*\
(Flow_1L[startf:endf]+Flow_2L[startf:endf] +\
self.coupling[j]*(phi0_E[startf:endf]-phi0_L[startf:endf])+self.coupling_LS[j]*(phi0_S[startf:endf]-phi0_L[startf:endf]))
#Spin system time derivative
dphi_S[startf:endf] = 1/(self.temp_data_Spin.heatCapacity[j](phi0_S)[startf:endf]*self.temp_data_Spin.rho[j])*\
(Flow_1S[startf:endf]+Flow_2S[startf:endf] +\
self.coupling_LS[j]*(phi0_L[startf:endf]-phi0_S[startf:endf])+self.coupling_SE[j]*(phi0_E[startf:endf]-phi0_S[startf:endf]))
startf += N-1; endf +=N-1 #Move one layer further
start_i = 0; end_i = N-1
#Apply interface conditions for all layers in every time step, i.e.:
#filling up Abig wiht the interface condition in the middle of the grid
for k in range(0,interfaces):
#for the electron system
Abig_E[end_i,start_i:end_i] = intercon_E[0][k][:-1]#Lhs interface flow
Abig_E[end_i,end_i+1:end_i+N] = -intercon_E[1][k][1:]#Rhs interface flow
Abig_E[end_i,end_i] = intercon_E[0][k][-1] -intercon_E[1][k][0]
#for the lattice system
Abig_L[end_i,start_i:end_i] = intercon_L[0][k][:-1]#Lhs interface flow
Abig_L[end_i,end_i+1:end_i+N] = -intercon_L[1][k][1:]#Rhs interface flow
Abig_L[end_i,end_i] = intercon_L[0][k][-1] -intercon_L[1][k][0]
#for the Spin system
Abig_S[end_i,start_i:end_i] = intercon_S[0][k][:-1]#Lhs interface flow
Abig_S[end_i,end_i+1:end_i+N] = -intercon_S[1][k][1:]#Rhs interface flow
Abig_S[end_i,end_i] = intercon_S[0][k][-1] -intercon_S[1][k][0]
start_i += N-1; end_i += N-1
#computing the flux for every time step at the boundaries
#If Neumann BC-> devide over k(T) since BC_Type = 1
#If Dirichlet BC -> devide over 1 since BC_Type = 0
Flux_E = BC_E[:,i]#Avoidint 0 in denominator
Flux_E[0] /= self.temp_data.conductivity[0](c_E[0])**self.temp_data.Left_BC_Type + 1e-12
Flux_E[-1] /= self.temp_data.conductivity[-1](c_E[-1])**self.temp_data.Right_BC_Type + 1e-12
Flux_L = BC_L[:,i]
Flux_L[0] /= self.temp_data_Lat.conductivity[0](c_L[0])**self.temp_data_Lat.Left_BC_Type + 1e-12
Flux_L[-1] /= self.temp_data_Lat.conductivity[-1](c_L[-1])**self.temp_data_Lat.Right_BC_Type + 1e-12
Flux_S = BC_S[:,i]
Flux_S[0] /= self.temp_data_Spin.conductivity[0](c_S[0])**self.temp_data_Spin.Left_BC_Type + 1e-12
Flux_S[-1] /= self.temp_data_Spin.conductivity[-1](c_S[-1])**self.temp_data_Spin.Right_BC_Type + 1e-12
#Clear for boundary conditions at the edgeds of the grid
dphi_E[0] = 0; dphi_E[-1] = 0;
phi0_E[0] = 0; phi0_E[-1] = 0;
dphi_L[0] = 0; dphi_L[-1] = 0;
phi0_L[0] = 0; phi0_L[-1] = 0;
dphi_S[0] = 0; dphi_S[-1] = 0;
phi0_S[0] = 0; phi0_S[-1] = 0;
#intermediate phi with low resolution in space according to explicit euler
intphi_E = phi0_E + tstep[i] * dphi_E + Flux_E
intphi_L = phi0_L + tstep[i] * dphi_L + Flux_L
intphi_S = phi0_S + tstep[i] * dphi_S + Flux_S
#Interface condition: Setting the rhs to 0, such that the heat transported (flux = Q = k*d/dx phi)
#from left is what comes out at the right hand side Q_1 -> Q_2
intphi_E[condi] = 0 #Interface condition: Q_1 -Q_2 = 0
intphi_L[condi] = 0
intphi_S[condi] = 0
#electron: use c to map on high resolution x-grid
#since in Abig, k(T(t)) is inserted we have to solve the system for every step
c_E = np.linalg.solve(Abig_E,intphi_E) # c(t) for every timestep
phi_E[i] = np.dot(Cb,c_E) # map spline coefficients to fine Cb grid
phi_E[i,cnfill] = c_E[condi] #correct the values for phi at interface
#lattice
c_L =
|
np.linalg.solve(Abig_L,intphi_L)
|
numpy.linalg.solve
|
import os
import sys
from time import sleep
import numpy as np
from dynamic_graph import plug
from dynamic_graph.sot.core.operator import Selec_of_vector
# from dynamic_graph.sot.torque_control.create_entities_utils import create_estimators
# from dynamic_graph.sot.torque_control.create_entities_utils import create_position_controller
# from dynamic_graph.sot.torque_control.create_entities_utils import create_trajectory_generator
from dynamic_graph.sot.torque_control.create_entities_utils import NJ, addTrace, create_inverse_dynamics
from dynamic_graph.sot.torque_control.filter_differentiator import FilterDifferentiator
from dynamic_graph.sot.torque_control.main import main_v3
from dynamic_graph.sot.torque_control.utils.sot_utils import Bunch, go_to_position, start_sot
from dynamic_graph.tracer_real_time import TracerRealTime
SIM_MODE = False
robot = None # TODO
conf_traj = None # TODO
def get_sim_conf():
import dynamic_graph.sot.torque_control.hrp2.inverse_dynamics_controller_gains as inv_dyn_gains
import dynamic_graph.sot.torque_control.hrp2.base_estimator_sim_conf as base_estimator_conf
import dynamic_graph.sot.torque_control.hrp2.control_manager_sim_conf as control_manager_conf
import dynamic_graph.sot.torque_control.hrp2.force_torque_estimator_conf as force_torque_estimator_conf
import dynamic_graph.sot.torque_control.hrp2.joint_torque_controller_conf as joint_torque_controller_conf
import dynamic_graph.sot.torque_control.hrp2.joint_pos_ctrl_gains_sim as pos_ctrl_gains
import dynamic_graph.sot.torque_control.hrp2.motors_parameters as motor_params
conf = Bunch()
conf.inv_dyn_gains = inv_dyn_gains
conf.base_estimator = base_estimator_conf
conf.control_manager = control_manager_conf
conf.force_torque_estimator = force_torque_estimator_conf
conf.joint_torque_controller = joint_torque_controller_conf
conf.pos_ctrl_gains = pos_ctrl_gains
conf.motor_params = motor_params
return conf
def get_default_conf():
import dynamic_graph.sot.torque_control.hrp2.inverse_dynamics_controller_gains as inv_dyn_gains
import dynamic_graph.sot.torque_control.hrp2.base_estimator_conf as base_estimator_conf
import dynamic_graph.sot.torque_control.hrp2.control_manager_conf as control_manager_conf
import dynamic_graph.sot.torque_control.hrp2.force_torque_estimator_conf as force_torque_estimator_conf
import dynamic_graph.sot.torque_control.hrp2.joint_torque_controller_conf as joint_torque_controller_conf
import dynamic_graph.sot.torque_control.hrp2.joint_pos_ctrl_gains as pos_ctrl_gains
import dynamic_graph.sot.torque_control.hrp2.motors_parameters as motor_params
conf = Bunch()
conf.inv_dyn_gains = inv_dyn_gains
conf.base_estimator = base_estimator_conf
conf.control_manager = control_manager_conf
conf.force_torque_estimator = force_torque_estimator_conf
conf.joint_torque_controller = joint_torque_controller_conf
conf.pos_ctrl_gains = pos_ctrl_gains
conf.motor_params = motor_params
return conf
def create_base_encoders(robot):
base_encoders = Selec_of_vector('base_encoders')
plug(robot.device.robotState, base_encoders.sin)
base_encoders.selec(0, NJ + 6)
return base_encoders
def replug_estimator_kin(robot, estimator_fd):
plug(robot.encoders.sout, estimator_fd.x)
plug(estimator_fd.dx, robot.base_estimator.joint_velocities)
# plug(estimator_fd.dx, robot.ff_locator.joint_velocities);
plug(estimator_fd.dx, robot.pos_ctrl.jointsVelocities)
plug(estimator_fd.x_filtered, robot.estimator_ft.q_filtered)
plug(estimator_fd.dx, robot.estimator_ft.dq_filtered)
robot.estimator_ft.ddq_filtered.value = (0., ) * 30
plug(estimator_fd.dx, robot.torque_ctrl.jointsVelocities)
robot.torque_ctrl.jointsAccelerations.value = (0., ) * 30
plug(estimator_fd.dx, robot.inv_dyn_ctrl.jointsVelocities)
plug(estimator_fd.dx, robot.ctrl_manager.dq)
return
def replug_inv_dyn(robot, inv_dyn_ctrl):
plug(robot.ctrl_manager.joints_ctrl_mode_torque, robot.inv_dyn_ctrl.controlledJoints)
return
def setup_velocity_filter(robot, conf, filter_b, filter_a):
main_v3(robot, startSoT=False, go_half_sitting=False, conf=None)
robot.estimator_fd = FilterDifferentiator("fd_filter")
dt = robot.timeStep
robot.estimator_fd.init(dt, NJ, filter_b, filter_a)
robot.inv_dyn_ctrl = create_inverse_dynamics(robot, conf.inv_dyn_gains, conf.motor_params, dt=dt)
replug_inv_dyn(robot, robot.inv_dyn_ctrl)
replug_estimator_kin(robot, robot.estimator_fd)
def wait_for_motion():
while not robot.traj_gen.isTrajectoryEnded():
sleep(1.0)
return
def dump_stop_tracer(tracer):
print("Dumping Tracer")
tracer.stop()
sleep(0.2)
tracer.dump()
sleep(0.2)
tracer.close()
sleep(0.2)
tracer.clear()
sleep(0.2)
return
def conf_velocity_filter():
conf = Bunch()
conf.j_name = "re"
conf.x_f = -1.0
conf.w_min = 0.01
conf.w_max = 0.7
conf.deltaT = 1.0
return conf
def conf_filter_list():
conf = Bunch()
conf.b_list = [
np.array([0.00902094, 0.01800633, 0.00902094]),
np.array([0.00542136, 0.01084273, 0.00542136]),
np.array([0.00355661, 0.00711322, 0.00355661]),
|
np.array([0.00208057, 0.00416113, 0.00208057])
|
numpy.array
|
from __future__ import annotations
import numpy as np
from typing import List, Dict
from src.models.metrics import metrics_report, get_metrics
from src.models.transformer import TransformerAutoEncoder
from src.models.datasets import CustomMinMaxScaler
from src.models.utils import load_pickle_file, find_optimal_threshold, classify, create_checkpoint, \
create_experiment_report, save_experiment, get_all_divisors, get_normal_dist
SEED = 160121
np.random.seed(SEED)
EXPERIMENT_PATH = '../../models/TAE-hyperparameters-embeddings-HDFS1.json'
def train_transformer(x_train: List, x_test: List, y_train: np.array, y_test: np.array) -> Dict:
sc = CustomMinMaxScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
model = TransformerAutoEncoder()
n_experiments = 100
embeddings_dim = x_train[0].shape[1]
divisors = get_all_divisors(embeddings_dim)
params = {
'epochs': np.random.choice(np.arange(1, 5), size=n_experiments).tolist(),
'learning_rate': np.random.choice(10 ** np.linspace(-4, -0.5), size=n_experiments).tolist(),
'batch_size': np.random.choice([2 ** i for i in range(3, 8)], size=n_experiments).tolist(),
'input_dim': [embeddings_dim] * n_experiments,
'heads': np.random.choice(divisors, size=n_experiments, p=get_normal_dist(divisors)).tolist(),
'n_encoders': np.random.randint(1, 5, size=n_experiments).tolist(),
'n_decoders': np.random.randint(1, 5, size=n_experiments).tolist(),
'dim_feedforward': np.random.randint(100, 2000, size=n_experiments).tolist(),
'window': np.random.randint(10, 100, size=n_experiments).tolist(),
'dropout': np.random.uniform(0, 0.5, size=n_experiments).tolist()
}
evaluated_hyperparams = random_search((x_train[y_train == 0], x_test, None, y_test), model, params)
return evaluated_hyperparams
def random_search(data_and_labels: tuple, model: TransformerAutoEncoder, params: Dict) -> Dict:
x_train, x_test, _, y_test = data_and_labels
scores = []
for conf in zip(*params.values()):
kwargs = {k: val for k, val in zip(params.keys(), conf)}
model.set_params(**kwargs)
print(f'Model current hyperparameters are: {kwargs}.')
model.fit(x_train)
y_pred = model.predict(x_test) # return reconstruction errors
theta, f1 = find_optimal_threshold(y_test, y_pred)
y_pred = classify(y_pred, theta)
metrics_report(y_test, y_pred)
scores.append(create_experiment_report(get_metrics(y_test, y_pred), kwargs))
# visualize_distribution_with_labels(y_pred, y_test, to_file=False)
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test, y_pred))
create_checkpoint({'experiments': scores}, EXPERIMENT_PATH)
return {
'experiments': scores
}
if __name__ == '__main__':
X_train = load_pickle_file('../../data/processed/HDFS1/X-train-HDFS1-cv1-1-block.pickle')
X_val = load_pickle_file('../../data/processed/HDFS1/X-val-HDFS1-cv1-1-block.pickle')
y_train = np.load('../../data/processed/HDFS1/y-train-HDFS1-cv1-1-block.npy')
y_val =
|
np.load('../../data/processed/HDFS1/y-val-HDFS1-cv1-1-block.npy')
|
numpy.load
|
import numpy as np
import pickle
import tqdm
from wordfreq import word_frequency
class ElFiltrator:
def __init__(self, dataset_name = '', filtration_type = ''):
self.prop = 0.5
self.filtration_type = filtration_type
# to save
if filtration_type == '':
self.filename = 'content\\scores\\content_scores_{}'.format(dataset_name)
self.logical_filename = 'logical\\scores\\logical_scores_{}'.format(dataset_name)
else:
self.filename = 'content\\scores\\content_scores_{}_{}'.format(dataset_name, filtration_type)
self.logical_filename = 'logical\\scores\\logical_scores_{}_{}'.format(dataset_name, filtration_type)
# To load
self.prompts_filename = 'prompts\\best\\content_best_prompts_{}'.format(dataset_name)
self.keys_filename = 'prompts\\best\\content_best_keys_{}'.format(dataset_name)
self.prompts_scores_filename = 'prompts\\scores\\content_prompts_scores_{}'.format(dataset_name)
self.content_scores_filename = 'content\\scores\\content_scores_{}'.format(dataset_name)
self.logical_scores_filename = 'logical\\scores\\logical_scores_{}'.format(dataset_name)
def load_content_scores(self, filename):
savefile = open(filename, 'rb')
self.content_scores_dict = pickle.load(savefile)
savefile.close()
def load_logical_scores(self, filename):
savefile = open(filename, 'rb')
self.logical_scores_dict = pickle.load(savefile)
savefile.close()
def load_best_prompts(self, filename):
savefile = open(filename, 'rb')
self.best_prompts_dict = pickle.load(savefile)
savefile.close()
def load_best_keys(self, filename):
savefile = open(filename, 'rb')
self.best_keys_dict = pickle.load(savefile)
savefile.close()
def load_prompts_scores(self, filename):
savefile = open(filename, 'rb')
self.prompts_scores_dict = pickle.load(savefile)
savefile.close()
def construct_filtered_scores(self, filtered_keys):
"""
Arg:
filtered_keys : list of keys like 'judo---sport'
Returns:
dict -> key from filtered_keys
value content_scores_dict[key]
"""
filtered_content_scores = {}
for key in filtered_keys:
filtered_content_scores[key] = self.content_scores_dict[key]
return filtered_content_scores
def filtrate(self):
"""
pass
"""
self.load_content_scores(self.content_scores_filename)
if self.filtration_type == '':
return
elif self.filtration_type == 'model_freq':
filtered_keys = self.model_freq_filtration()
elif self.filtration_type == 'word_freq':
filtered_keys = self.word_freq_filtration()
else:
raise Exception('This filtration type is not implemented. Please try model_freq or word_freq.')
filtered_content_scores = self.construct_filtered_scores(filtered_keys)
# Save scores
savefile = open(self.filename, 'wb')
pickle.dump(filtered_content_scores, savefile)
savefile.close()
def filtrate_logical(self):
"""
We load the filtrate content scores and just output the logical scores for those pairs.
"""
if self.filtration_type == '':
return
# Load filtered content scores
savefile = open(self.filename, 'rb')
filtered_content_scores = pickle.load(savefile)
savefile.close()
# Content filtered keys
content_filtered_keys = list(filtered_content_scores.keys())
# Load logical scores
self.load_logical_scores(self.logical_scores_filename)
# Go
filtered_logical_scores = {}
for pair_key in tqdm.tqdm(content_filtered_keys, total = len(content_filtered_keys)):
filtered_logical_scores[pair_key] = self.logical_scores_dict[pair_key]
print("{} pairs kept!".format(len(content_filtered_keys)))
# Save scores
savefile = open(self.logical_filename, 'wb')
pickle.dump(filtered_logical_scores, savefile)
savefile.close()
def model_freq_filtration(self):
"""
Filtrate according to the score given by :
min_{transf} ( min( P[MASK2 = word2 | S*(transf)(MASK1, MASK2)], P[MASK1 = word1 | S*(transf)(MASK1, MASK2)] ) )
where S*(transf) is the optimal vanilla template for the transformation transf
"""
self.load_best_keys(self.keys_filename)
self.load_prompts_scores(self.prompts_scores_filename)
filtration_scores = {} # dict -> key 'judo---sport' ; value filtration score
for pair_key in tqdm.tqdm(self.best_keys_dict.keys(), total = len(list(self.best_keys_dict.keys()))):
transf_scores = []
transf_best_keys = self.best_keys_dict[pair_key]
for transf in transf_best_keys.keys():
vanilla_key = transf_best_keys[transf][0]
transf_scores.append( min(self.prompts_scores_dict[pair_key]['vanilla'][vanilla_key]) )
filtration_scores[pair_key] = min(transf_scores)
values = np.array(list(filtration_scores.values()))
keys = list(filtration_scores.keys())
sorted_idx = np.argsort(values)
filtered_keys = []
for key_idx in sorted_idx[int(len(keys)*self.prop):]:
filtered_keys.append(keys[key_idx])
print("{} pairs kept!".format(len(filtered_keys)))
return filtered_keys
def word_freq_filtration(self):
self.load_best_keys(self.keys_filename)
list_of_pair_keys = list(self.best_keys_dict.keys())
filtration_scores = {}
for pair_key in tqdm.tqdm(list_of_pair_keys, total = len(list_of_pair_keys)):
word1, word2 = pair_key.split('---')
filtration_scores[pair_key] = min(self.compute_expression_freq(word1), self.compute_expression_freq(word2))
values = np.array(list(filtration_scores.values()))
keys = list(filtration_scores.keys())
sorted_idx =
|
np.argsort(values)
|
numpy.argsort
|
"""
Defines the object class that uses a Kepler PRF model to compute apertures and its
metrics
"""
import os
import warnings
import numpy as np
import pandas as pd
from scipy import sparse
from scipy.optimize import minimize_scalar
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib import patches
from astropy.io import fits
from . import PACKAGEDIR, DATAOUTDIR
from .utils import _make_A_polar
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=sparse.SparseEfficiencyWarning)
class KeplerPRF(object):
"""
Class to load PRF models computed from FFI, to create photometric apertures
"""
def __init__(
self,
prf_ws: np.array,
n_r_knots: int = 5,
n_phi_knots: int = 15,
rmin: float = 0.25,
rmax: float = 5,
):
"""
A KeplerPRF object is build by providing the hyperparameters of the spline
model, and the weights of each basis spline. The hyperparameters allow to
reconstruct the same basis splines while the weights are used at evaluation of
the model in new data.
Parameters
__________
prf_ws : numpy.ndarray
Weights corresponding to each basis of the design matrix.
rmin : float
The minimum radius for the PRF model to be fit.
rmax : float
The maximum radius for the PRF model to be fit.
n_r_knots : int
Number of radial knots in the spline model.
n_phi_knots : int
Number of azimuthal knots in the spline model.
Attributes
----------
prf_w : numpy.ndarray
Weights corresponding to each basis of the design matrix.
rmin : float
The minimum radius for the PRF model to be fit.
rmax : float
The maximum radius for the PRF model to be fit.
n_r_knots : int
Number of radial knots in the spline model.
n_phi_knots : int
Number of azimuthal knots in the spline model.
"""
self.prf_ws = prf_ws
self.rmin = rmin
self.rmax = rmax
self.n_r_knots = n_r_knots
self.n_phi_knots = n_phi_knots
@staticmethod
def load_from_file(
quarter: int = 5,
channel: int = 1,
):
"""
Loads a PRF model build from Kepler's FFI for a given quarter and channel.
Note: the file with the PRF models is csv file with a multiindex pandas
DataFrame, the FITS version is in development.
Parameters
----------
channel : int
Channel number of the FFI to be used to model the PRF. Valid values are
between 1 and 84.
quarter : int
Number of the quarter that will be used to model the PRF.
Valid values are between 1 and 17.
Returns
-------
KeplerPRF : KeplerPRF
An object with the PRF model ready to be evaluated in new data.
"""
# load PSF model
fname = "%s/data/ffi_prf_models_v0.1.0.csv" % (PACKAGEDIR)
if not os.path.isfile(fname):
raise FileNotFoundError("No PSF files: ", fname)
try:
tab = pd.read_csv(fname, index_col=0, header=[0, 1])
n_r_knots = int(tab.loc[channel, (str(quarter), "n_r_knots")])
n_phi_knots = int(tab.loc[channel, (str(quarter), "n_phi_knots")])
rmin = int(tab.loc[channel, (str(quarter), "rmin")])
rmax = int(tab.loc[channel, (str(quarter), "rmax")])
prf_ws = tab.loc[channel, str(quarter)].iloc[4:].values
except KeyError:
raise IOError(
"Quarter %i and channel %i has no PRF model data" % (quarter, channel)
)
return KeplerPRF(prf_ws, n_r_knots, n_phi_knots, rmin, rmax)
def evaluate_PSF(self, dx, dy):
"""
Function to evaluate the PRF model in a grid of data. THe function returns
a the prediction of the model as normalized flux. The model is evaluated in
pixels up to r < 7 from the location of the source.
Parameters
----------
dx : numpy.ndarray
Distance between pixels (row direction) and source coordinates.
dx : numpy.ndarray
Distance between pixels (column direction) and source coordinates.
Returns
-------
source_model: scipy.sparse.csr_matrix
Normalized fluxvalues of the PRF model evaluation in the dx, dy grid
"""
r = np.hypot(dx, dy)
phi = np.arctan2(dy, dx)
source_mask = r <= np.floor(self.rmax)
phi[phi >= np.pi] = np.pi - 1e-6
try:
dm = _make_A_polar(
phi[source_mask].ravel(),
r[source_mask].ravel(),
rmin=self.rmin,
rmax=self.rmax,
n_r_knots=self.n_r_knots,
n_phi_knots=self.n_phi_knots,
)
except ValueError:
dm = _make_A_polar(
phi[source_mask].ravel(),
r[source_mask].ravel(),
rmin=np.percentile(r[source_mask].ravel(), 1),
rmax=np.percentile(r[source_mask].ravel(), 99),
n_r_knots=self.n_r_knots,
n_phi_knots=self.n_phi_knots,
)
source_model = sparse.csr_matrix(r.shape)
m = 10 ** dm.dot(self.prf_ws)
source_model[source_mask] = m
source_model.eliminate_zeros()
# psf_models = source_model.multiply(1 / source_model.sum(axis=1)).tocsr()
return source_model
def diagnose_metrics(self, psf_models, idx=0, ax=None, plot=True):
"""
Function to evaluate the flux metrics for a single source as a function of
the parameter that controls the aperture size.
The flux metrics are computed by taking into account the PSF models of
neighbor sources.
This function is meant to be used only to generate the diagnostic or as a
helping function of `optimize_aperture()` to precalculate the values of the
metrics and find the optimal aperture in case of isolated sources, where the
optimal is the full aperture.
Parameters
----------
psf_models : scipy.sparse.csr_matrix
Sparse matrix with the PSF models of all sources in the field. It has shape
of [n_sources, n_pixels]
idx : int
Index of the source for which the metrcs will be computed. Has to be a
number between 0 and psf_models.shape[0].
ax : matplotlib.axes
Axis to be used to plot the figure
plot : boolean
Plot the metrics values.
Returns
-------
ax : matplotlib.axes
Figure axes
"""
compl, crowd, cut = [], [], []
for p in range(0, 101, 1):
cut.append(p)
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, p)
).toarray()[0]
crowd.append(self.compute_CROWDSAP(psf_models, mask, idx))
compl.append(self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask))
self.compl = np.array(compl)
self.crowd = np.array(crowd)
self.cut = np.array(cut)
if plot:
if ax is None:
fig, ax = plt.subplots(1)
ax.plot(self.cut, self.compl, label=r"FLFRCSAP")
ax.plot(self.cut, self.crowd, label=r"CROWDSAP")
ax.set_xlabel("Percentile")
ax.set_ylabel("Metric")
ax.legend()
return ax
def create_aperture_mask(self, psf_models, percentile=0, idx=None):
"""
Function to create the aperture mask of a given source for a given aperture
size. This function can compute aperutre mask for one or all sources available
in the psf_models
Parameters
----------
psf_models : scipy.sparse.csr_matrix
Sparse matrix with the PSF models of all sources in the field. It has shape
of [n_sources, n_pixels]
percentile : float
Percentile value that defines the isophote from the distribution of values
in the psf model of the source
idx : int
Index of the source for which the metrcs will be computed. Has to be a
number between 0 and psf_models.shape[0]. If None, then it computes the
apertures for all sources in psf_models.
Returns
-------
mask : numpy.ndarray
Boolean array with the aperture mask.
completeness : numpy.ndarray
Flux metric indicating flux completeness for the selected aperture.
crowdeness : numpy.ndarray
Flux metric indicating flux contamination for the selected aperture.
"""
if idx is not None:
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, percentile)
).toarray()[0]
# recompute metrics for optimal mask
complet = self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
crowd = self.compute_CROWDSAP(psf_models, mask, idx)
return mask, complet, crowd
else:
masks, completeness, crowdeness = [], [], []
for idx in range(psf_models.shape[0]):
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, percentile)
).toarray()[0]
masks.append(mask)
completeness.append(
self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
)
crowdeness.append(self.compute_CROWDSAP(psf_models, mask, idx))
return np.array(masks), np.array(completeness), np.array(crowdeness)
def optimize_aperture(
self, psf_models, idx=0, target_complet=0.9, target_crowd=0.9, max_iter=100
):
"""
Function to optimize the aperture mask for a given source. There are two
special cases:
* Isolated sources, the optimal aperture is the full aperture.
* If optimizing for one single metric.
For these last two case, no actual optimization if performed, and we use the
results from `diagnose_metrics()`.
The optimization is done using scipy Brent's algorithm and it uses a custom
loss function that uses a Leaky ReLU term to achive the target value for
both metrics.
Parameters
----------
psf_models : scipy.sparse.csr_matrix
Sparse matrix with the PSF models of all sources in the field. It has shape
of [n_sources, n_pixels]
idx : int
Index of the source for which the metrcs will be computed. Has to be a
number between 0 and psf_models.shape[0]. If None, then it computes the
apertures for all sources in psf_models.
target_complet : float
Value of the target completeness metric.
target_crowd : float
Value of the target crowdeness metric.
max_iter : int
Numer of maximum iterations to be performed by the optimizer.
Returns
-------
mask : numpy.ndarray
Boolean array with the aperture mask.
completeness : float
Flux metric indicating flux completeness for the selected aperture.
crowdeness : float
Flux metric indicating flux contamination for the selected aperture.
optimal_percentile : float
Percentile of the normalized flux distribution that defines the isophote.
"""
# Do special cases when optimizing for only one metric
self.diagnose_metrics(psf_models, idx=idx, plot=False)
if target_complet < 0 and target_crowd > 0:
optim_p = self.cut[np.argmax(self.crowd)]
elif target_crowd < 0 and target_complet > 0:
optim_p = self.cut[np.argmax(self.compl)]
# for isolated sources, only need to optimize for completeness, in case of
# asking for 2 metrics
elif target_complet > 0 and target_crowd > 0 and all(self.crowd > 0.99):
optim_p = self.cut[np.argmax(self.compl)]
else:
optim_params = {
"percentile_bounds": [5, 95],
"target_complet": target_complet,
"target_crowd": target_crowd,
"max_iter": max_iter,
"psf_models": psf_models,
"idx": idx,
}
minimize_result = minimize_scalar(
self._goodness_metric_obj_fun,
method="Bounded",
bounds=[5, 95],
options={"maxiter": max_iter, "disp": False},
args=(optim_params),
)
optim_p = minimize_result.x
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, optim_p)
).toarray()[0]
# recompute metrics for optimal mask
complet = self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
crowd = self.compute_CROWDSAP(psf_models, mask, idx)
return mask, complet, crowd, optim_p
def _goodness_metric_obj_fun(self, percentile, optim_params):
"""
The objective function to minimize with scipy.optimize.minimize_scalar called
during optimization of the photometric aperture.
Parameters
----------
percentile : int
Percentile of the normalized flux distribution that defines the isophote.
optim_params : dictionary
Dictionary with the variables needed for evaluate the metric:
psf_models
idx
target_complet
target_crowd
Returns
-------
penalty : int
Value of the objective function to be used for optiization.
"""
psf_models = optim_params["psf_models"]
idx = optim_params["idx"]
# Find the value where to cut
cut = np.percentile(psf_models[idx].data, int(percentile))
# create "isophot" mask with current cut
mask = (psf_models[idx] > cut).toarray()[0]
# Do not compute and ignore if target score < 0
if optim_params["target_complet"] > 0:
completMetric = self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
else:
completMetric = 1.0
# Do not compute and ignore if target score < 0
if optim_params["target_crowd"] > 0:
crowdMetric = self.compute_CROWDSAP(psf_models, mask, idx)
else:
crowdMetric = 1.0
# Once we hit the target we want to ease-back on increasing the metric
# However, we don't want to ease-back to zero pressure, that will
# unconstrain the penalty term and cause the optmizer to run wild.
# So, use a "Leaky ReLU"
# metric' = threshold + (metric - threshold) * leakFactor
leakFactor = 0.01
if (
optim_params["target_complet"] > 0
and completMetric >= optim_params["target_complet"]
):
completMetric = optim_params["target_complet"] + 0.001 * (
completMetric - optim_params["target_complet"]
)
if (
optim_params["target_crowd"] > 0
and crowdMetric >= optim_params["target_crowd"]
):
crowdMetric = optim_params["target_crowd"] + 0.1 * (
crowdMetric - optim_params["target_crowd"]
)
penalty = -(completMetric + 10 * crowdMetric)
return penalty
# def plot_mean_PSF(self, ax=None):
# """
# Function to plot the PRF model as created from the FFI. This is only for
# illustration purposes.
#
# Parameters
# ----------
# ax : matplotlib.axes
# Matlotlib axis can be provided, if not one will be created and returned
#
# Returns
# -------
# ax : matplotlib.axes
# Matlotlib axis with the figure
# """
# if not hasattr(self, "x_data"):
# raise AttributeError("Class doesn't have attributes to plot PSF model")
#
# if ax is None:
# fig, ax = plt.subplots(1, 2, figsize=(8, 3))
# vmin = -0.5
# vmax = -3
# cax = ax[0].scatter(
# self.x_data,
# self.y_data,
# c=self.f_data,
# marker=".",
# s=2,
# vmin=vmin,
# vmax=vmax,
# )
# fig.colorbar(cax, ax=ax[0])
# ax[0].set_title("Data mean flux")
# ax[0].set_ylabel("dy")
# ax[0].set_xlabel("dx")
#
# cax = ax[1].scatter(
# self.x_data,
# self.y_data,
# c=self.f_model,
# marker=".",
# s=2,
# vmin=vmin,
# vmax=vmax,
# )
# fig.colorbar(cax, ax=ax[1])
# ax[1].set_title("Average PSF Model")
# ax[1].set_xlabel("dx")
#
# return ax
def plot_aperture(self, flux, mask=None, ax=None, log=False):
"""
Function to plot the photometric aperture for a given source.
Parameters
----------
flux : numpy.ndarray
Data array with the flux image.
mask : numpy.ndarray
Boolean array with the aperture mask
log : boolean
Plot the image in log or linear scale.
ax : matplotlib.axes
Matlotlib axis can be provided, if not one will be created and returned
Returns
-------
ax : matplotlib.axes
Matlotlib axis with the figure
"""
if ax is None:
fig, ax = plt.subplots(1, figsize=(5, 5))
pc = ax.pcolor(
flux,
shading="auto",
norm=colors.LogNorm() if log else None,
)
plt.colorbar(pc, label="", fraction=0.038, ax=ax)
ax.set_aspect("equal", adjustable="box")
ax.set_title("")
if mask is not None:
for i in range(flux.shape[0]):
for j in range(flux.shape[1]):
if mask[i, j]:
rect = patches.Rectangle(
xy=(j, i),
width=1,
height=1,
color="red",
fill=False,
hatch="",
)
ax.add_patch(rect)
zoom = np.argwhere(mask == True)
ax.set_ylim(
np.maximum(0, zoom[0, 0] - 3),
np.minimum(zoom[-1, 0] + 3, flux.shape[0]),
)
ax.set_xlim(
|
np.maximum(0, zoom[0, -1] - 3)
|
numpy.maximum
|
from deeprl_hw3 import imitation
import gym
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
def reinforce(env, model):
"""Policy gradient algorithm
Parameters
----------
env: your environment
modle: policy with initialized weights
Returns
-------
total_reward: float
"""
# construct graphs
opt = tf.train.AdamOptimizer(learning_rate = 0.01)
Gt = tf.placeholder(tf.float32, shape = [None, 1], name = 'Gt')
At = tf.placeholder(tf.float32, shape = [None, 2], name = 'At')
target = tf.squeeze(model.output)
target1 = tf.reduce_sum(tf.multiply(target, At), axis=[1])
target2 = tf.log(target1)
target3 = -Gt*target2
grad_step = opt.compute_gradients(target3, model.weights)
update_weights = opt.apply_gradients(grad_step)
# start session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(10000):
state_array, action_array, total_rewards_array = generate_episodes(env, model)
sess.run(update_weights, feed_dict = {model.input: state_array, Gt:total_rewards_array, At:action_array})
# set weights
model.set_weights(sess.run(model.weights))
if i%20 == 0:
imitation.test_cloned_policy(env, model, render = False)
def generate_episodes(env, model):
"""Generate episodes of states, actions, and total rewards
Parameters
----------
env: your environment
model: policy
Returns
-------
states array
stochastic actions array
total rewards array
"""
# generate episodes with stochastic actions. similar to imitation.py's generate_expert_training_data
state_array = []
action_array = []
total_rewards_array = []
rewards_array = []
state = env.reset()
is_done = False
time_step = 0
while not is_done:
time_step += 1
state_array.append(state)
action_prob = model.predict(np.reshape(state, (-1,4)))
action = np.random.choice(np.arange(len(action_prob[0])), p=action_prob[0]) # stochastic
one_hot_vec =
|
np.zeros(2)
|
numpy.zeros
|
"""
pgeometry
---------
A collection of usefull functions.
For additional options also see
`numpy <http://numpy.scipy.org/>`_ and `matplotlib <http://matplotlib.sourceforge.net/>`_.
:platform: Unix, Windows
Additions:
Copyright 2012-2016 TNO
Original code:
Copyright 2011 <NAME> <<EMAIL>>
@author: eendebakpt
"""
# %% Load necessary packages
import copy
import logging
import math
import os
import pickle
import pkgutil
import re
import subprocess
import sys
import tempfile
import time
import warnings
from functools import wraps
from typing import List, Optional, Union
import numpy
import numpy as np
import scipy.io
import scipy.ndimage.filters as filters
import scipy.ndimage.morphology as morphology
import shapely.geometry
__version__ = '0.7.0'
# %% Load qt functionality
def qtModules(verbose=0):
""" Return list of Qt modules loaded """
_ll = sys.modules.keys()
qq = [x for x in _ll if x.startswith('Py')]
if verbose:
print('qt modules: %s' % str(qq))
return qq
try:
_applocalqt = None
try:
# by default use qtpy to import Qt
import qtpy
_haveqtpy = True
import qtpy.QtCore as QtCore
import qtpy.QtGui as QtGui
import qtpy.QtWidgets as QtWidgets
from qtpy.QtCore import QObject, Signal, Slot
except ImportError:
_haveqtpy = False
warnings.warn('could not import qtpy, not all functionality available')
pass
_ll = sys.modules.keys()
_pyside = len([_x for _x in _ll if _x.startswith('PySide.QtGui')]) > 0
_pyqt4 = len([_x for _x in _ll if _x.startswith('PyQt4.QtGui')]) > 0
_pyqt5 = len([_x for _x in _ll if _x.startswith('PyQt5.QtGui')]) > 0
def slotTest(txt):
""" Helper function for Qt slots """
class slotObject(QtCore.QObject):
def __init__(self, txt):
QObject.__init__(self)
self.txt = txt
@Slot()
def slot(self, v=None):
if v is None:
print('slotTest: %s' % self.txt)
else:
print('slotTest: %s: %s' % (self.txt, str(v)))
s = slotObject(txt)
return s.slot
class signalTest(QObject):
""" Helper function for Qt signals """
s = Signal()
def __init__(self):
QObject.__init__(self)
def go(self):
self.s.emit()
except Exception as ex:
logging.info('pgeometry: load qt: %s' % ex)
print(ex)
print('pgeometry: no Qt found')
# %% Load other modules
try:
import pylab
import pylab as p
except Exception as inst:
print(inst)
print('could not import pylab, not all functionality available...')
pass
try:
import matplotlib
import matplotlib.pyplot as plt
# needed for 3d plot points, do not remove!
try:
from mpl_toolkits.mplot3d import Axes3D
except BaseException:
pass
except ModuleNotFoundError as ex:
warnings.warn(
'could not find matplotlib, not all functionality available...')
plt = None
pass
try:
import skimage.filters
except ModuleNotFoundError as ex:
warnings.warn(
'could not find skimage.filters, not all functionality is available')
pass
try:
import cv2
_haveOpenCV = True
except (ModuleNotFoundError, ImportError):
_haveOpenCV = False
warnings.warn('could not find or load OpenCV, not all functionality is available')
pass
# %% Utils
try:
import resource
def memUsage():
""" Prints the memory usage in MB
Uses the resource module
"""
# http://chase-seibert.github.io/blog/2013/08/03/diagnosing-memory-leaks-python.html
print('Memory usage: %s (mb)' %
((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / 1024., ))
except BaseException:
def memUsage():
print('Memory usage: ? (mb)')
def memory():
""" Return the memory usage in MB
Returns:
float: memory usage in mb
"""
import os
import psutil
process = psutil.Process(os.getpid())
mem = process.memory_info().rss / (1024. * 1024.)
return mem
def list_objects(objectype=None, objectclassname='__123', verbose=1):
""" List all objects in memory of a specific type or with a specific class name
Args:
objectype (None or class)
objectclassname (str)
Returns:
ll (list): list of objects found
"""
import gc
ll = []
for ii, obj in enumerate(gc.get_objects()):
if ii > 1000000:
break
valid = False
if hasattr(obj, '__class__'):
valid = getattr(obj.__class__, '__name__', 'none').startswith(objectclassname)
if objectype is not None and not valid:
if isinstance(obj, objectype):
valid = True
if valid:
if verbose:
print('list_objects: object %s' % (obj, ))
ll.append(obj)
return ll
def package_versions(verbose=1):
""" Report package versions installed """
print('numpy.__version__ %s' % numpy.__version__)
print('scipy.__version__ %s' % scipy.__version__)
print('matplotlib.__version__ %s' % matplotlib.__version__)
try:
import cv2
print('cv2.__version__ %s' % cv2.__version__)
except BaseException:
pass
try:
import qtpy
import qtpy.QtCore
print('qtpy.API_NAME %s' % (qtpy.API_NAME))
print('qtpy.QtCore %s' % (qtpy.QtCore))
print('qtpy.QtCore.__version__ %s' % (qtpy.QtCore.__version__))
except BaseException:
pass
try:
import sip
print('sip %s' % sip.SIP_VERSION_STR)
except BaseException:
pass
def freezeclass(cls):
""" Decorator to freeze a class
This means that no attributes can be added to the class after instantiation.
"""
cls.__frozen = False
def frozensetattr(self, key, value):
if self.__frozen and not hasattr(self, key):
print("Class {} is frozen. Cannot set {} = {}"
.format(cls.__name__, key, value))
else:
object.__setattr__(self, key, value)
def init_decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
self.__frozen = True
return wrapper
cls.__setattr__ = frozensetattr
cls.__init__ = init_decorator(cls.__init__)
return cls
def static_var(varname, value):
""" Helper function to create a static variable
Args:
varname (str)
value (anything)
"""
def decorate(func):
setattr(func, varname, value)
return func
return decorate
@static_var("time", {'default': 0})
def tprint(string, dt=1, output=False, tag='default'):
""" Print progress of a loop every dt seconds
Args:
string (str): text to print
dt (float): delta time in seconds
output (bool): if True return whether output was printed or not
tag (str): optional tag for time
Returns:
output (bool)
"""
if (time.time() - tprint.time.get(tag, 0)) > dt:
print(string)
tprint.time[tag] = time.time()
if output:
return True
else:
return
else:
if output:
return False
else:
return
def partiala(method, **kwargs):
""" Function to perform functools.partial on named arguments """
raise Exception('Use functools.partial instead')
def t(x):
return method(x, **kwargs)
return t
def setFontSizes(labelsize=20, fsize=17, titlesize=None, ax=None,):
""" Update font sizes for a matplotlib plot """
if ax is None:
ax = plt.gca()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
for x in [ax.xaxis.label, ax.yaxis.label]: # ax.title,
x.set_fontsize(labelsize)
plt.tick_params(axis='both', which='major', labelsize=fsize)
if titlesize is not None:
ax.title.set_fontsize(titlesize)
plt.draw()
def plotCostFunction(fun, x0, fig=None, marker='.', scale=1, c=None):
""" Plot a cost function on specified data points
Example with variation of Booth's function:
>>> fun = lambda x: 2*(x[0]+2*x[1]-7)**2 + (2*x[0]+x[1]-5)**2
>>> plotCostFunction(fun, np.array([1,3]), fig=100, marker='-')
"""
x0 = np.array(x0).astype(float)
nn = x0.size
if fig is not None:
plt.figure(fig)
scale = np.array(scale)
if scale.size == 1:
scale = scale * np.ones(x0.size)
tt = np.arange(-1, 1, 5e-2)
for ii in range(nn):
val = np.zeros(tt.size)
for jj in range(tt.size):
x = x0.copy()
x[ii] += scale[ii] * tt[jj]
val[jj] = fun(x)
if c is None:
plt.plot(tt, val, marker)
else:
plt.plot(tt, val, marker, color=c[ii])
plt.xlabel('Scaled variables')
plt.ylabel('Value of cost function')
class fps_t:
def __init__(self, nn: int = 40):
""" Class for framerate measurements
Args:
nn: number of time measurements to store
Example usage:
>>> fps = fps_t(nn=8)
>>> for kk in range(12):
... fps.addtime(.2*kk)
>>> fps.show()
framerate: 5.000
"""
self.n = nn
self.tt = np.zeros(self.n)
self.x = np.zeros(self.n)
self.ii = 0
def __repr__(self):
ss = 'fps_t: buffer size %d, framerate %.3f [fps]' % (
self.n, self.framerate())
return ss
def addtime(self, t: Optional[float] = None, x: float = 0):
""" Add a timestamp to the object
Args:
t: Timestamp. If None, use `time.perf_counter`
x: Optional value to store with the timestamp
"""
if t is None:
t = time.perf_counter()
self.ii = self.ii + 1
iim = self.ii % self.n
self.tt[iim] = t
self.x[iim] = x
def value(self) -> float:
""" Return mean of current values """
return self.x.mean()
def iim(self) -> float:
""" Return index modulo number of elements """
return self.ii % self.n
def framerate(self) -> float:
""" Return the current framerate """
iim = self.ii % self.n
iimn = (self.ii + 1) % self.n
dt = self.tt[iim] - self.tt[iimn]
if dt == 0:
return np.NaN
fps = float(self.n - 1) / dt
return fps
def loop(self, s: str = ''):
""" Helper function """
self.addtime(time.time())
self.showloop(s='')
def showloop(self, dt: float = 2, s: str = ''):
""" Print current framerate in a loop
The print statement is only executed once every `dt` seconds
"""
fps = self.framerate()
if len(s) == 0:
tprint('loop %d: framerate: %.1f [fps]' % (self.ii, fps), dt=dt)
else:
tprint(
'%s: loop %d: framerate: %.1f [fps]' % (s, self.ii, fps), dt=dt)
def show(self):
""" Print the current framerate """
fps = self.framerate()
print('framerate: %.3f' % fps)
def mkdirc(d):
""" Similar to mkdir, but no warnings if the directory already exists """
try:
os.mkdir(d)
except BaseException:
pass
return d
def projectiveTransformation(H, x):
""" Apply a projective transformation to a kxN array
>>> y = projectiveTransformation( np.eye(3), np.random.rand( 2, 10 ))
"""
k = x.shape[0]
kout = H.shape[0] - 1
xx = x.transpose().reshape((-1, 1, k))
if (xx.dtype is np.integer or xx.dtype == 'int64'):
xx = xx.astype(np.float32)
if xx.size > 0:
ww = cv2.perspectiveTransform(xx, H)
ww = ww.reshape((-1, kout)).transpose()
return ww
else:
return copy.copy(x)
def rottra2mat(rot, tra):
""" create 4x4 matrix from 3x3 rot and 1x3 tra """
out = np.eye(4)
out[0:3, 0:3] = rot
out[0:3, 3] = tra.transpose()
return out
def breakLoop(wk=None, dt=0.001, verbose=0):
""" Break a loop using OpenCV image feedback """
if wk is None:
wk = cv2.waitKey(1)
time.sleep(dt)
wkm = wk % 256
if wkm == 27 or wkm == ord('q') or wk == 1048689:
if verbose:
print('breakLoop: key q pressed, quitting loop')
return True
return False
def hom(x):
""" Create affine to homogeneous coordinates
Args:
x (kxN array): affine coordinates
Returns:
h ( (k+1xN) array): homogeneous coordinates
"""
nx = x.shape[1]
return np.vstack((x, np.ones(nx)))
def dehom(x):
""" Convert homogeneous points to affine coordinates """
return x[0:-1, :] / x[-1, :]
def null(a, rtol=1e-5):
""" Calculate null space of a matrix """
u, s, v = np.linalg.svd(a)
rank = (s > rtol * s[0]).sum()
return rank, v[rank:].T.copy()
def intersect2lines(l1, l2):
""" Calculate intersection between 2 lines
Args:
l1 (array): first line in homogeneous format
l2 (array): first line in homogeneous format
Returns:
array: intersection in homogeneous format. To convert to affine coordinates use `dehom`
"""
r = null(np.vstack((l1, l2)))
return r[1]
def runcmd(cmd, verbose=0):
""" Run command and return output """
output = subprocess.check_output(cmd, shell=True)
return output
# %% Geometry functions
def angleDiff(x, y):
""" Return difference between two angles in radians modulo 2* pi
>>> d=angleDiff( 0.01, np.pi+0.02)
>>> d=angleDiff( 0.01, 2*np.pi+0.02)
"""
return np.abs(((x - y + np.pi) % (2 * np.pi)) - np.pi)
def angleDiffOri(x, y):
""" Return difference between two angles in radians modulo pi
>>> d=angleDiff( 0.01, np.pi+0.02)
>>> d=angleDiff( 0.01, 2*np.pi+0.02)
"""
return np.abs(((x - y + np.pi / 2) % (np.pi)) - np.pi / 2)
def opencvpose2attpos(rvecs, tvecs):
tvec = np.array(tvecs).flatten()
rvec = np.array(rvecs).flatten()
R, tmp = cv2.Rodrigues(rvec)
att = RBE2euler(R)
pos = -R.transpose().dot(np.array(tvec.reshape((3, 1))))
return att, pos
def opencv2TX(rvecs, tvecs):
""" Convert OpenCV pose to homogenous transform """
T = np.array(np.eye(4))
R = cv2.Rodrigues(rvecs)[0]
T[0:3, 0:3] = R
T[0:3, 3:4] = tvecs
return T
def opencv2T(rvec, tvec):
""" Convert OpenCV pose to homogenous transform """
T = np.array(np.eye(4))
T[0:3, 0:3] = cv2.Rodrigues(rvec)[0]
T[0:3, 3] = tvec
return T
def T2opencv(T):
""" Convert transformation to OpenCV rvec, tvec pair
Example
-------
>>> rvec, tvec = T2opencv(np.eye(4))
"""
rvec = cv2.Rodrigues(T[0:3, 0:3])[0]
tvec = T[0:3, 3]
return rvec, tvec
def euler2RBE(theta):
""" Convert Euler angles to rotation matrix
Example
-------
>>> np.set_printoptions(precision=4, suppress=True)
>>> euler2RBE( [0,0,np.pi/2] )
array([[ 0., -1., 0.],
[ 1., 0., 0.],
[-0., 0., 1.]])
"""
cr = math.cos(theta[0])
sr = math.sin(theta[0])
cp = math.cos(theta[1])
sp = math.sin(theta[1])
cy = math.cos(theta[2])
sy = math.sin(theta[2])
out = np.array([cp * cy, sr * sp * cy - cr * sy, cr * sp * cy + sr * sy,
cp * sy, sr * sp * sy + cr * cy, cr * sp * sy - sr * cy, -sp, sr * cp, cr * cp])
return out.reshape((3, 3))
def RBE2euler(Rbe):
""" Convert rotation matrix to Euler angles """
out = np.zeros([3, 1])
out[0, 0] = math.atan2(Rbe[2, 1], Rbe[2, 2])
out[1, 0] = -math.asin(Rbe[2, 0])
out[2, 0] = math.atan2(Rbe[1, 0], Rbe[0, 0])
return out
# %% Helper functions
def pg_rotation2H(R):
""" Convert rotation matrix to homogenous transform matrix """
X = np.array(np.eye(R.shape[0] + 1))
X[0:-1, 0:-1] = R
return X
def directionMean(vec):
""" Calculate the mean of a set of directions
The initial direction is determined using the oriented direction. Then a non-linear optimization is done.
Args:
vec: List of directions
Returns
Angle of mean of directions
>>> vv=np.array( [[1,0],[1,0.1], [-1,.1]])
>>> a=directionMean(vv)
"""
vec = np.array(vec)
def dist(a, vec):
phi = np.arctan2(vec[:, 0], vec[:, 1])
x = a - phi
x = np.mod(x + np.pi / 2, np.pi) - np.pi / 2
cost = np.linalg.norm(x)
return cost
Nfeval = 1
def callbackF(Xi):
global Nfeval
print(Xi)
print(f'{Nfeval:4d} {Xi[0]: 3.6f}: distance {dist(Xi[0], vec)}')
Nfeval += 1
m = vec.mean(axis=0)
a0 = np.arctan2(m[0], m[1])
def cost_function(a): return dist(a, vec)
r = scipy.optimize.minimize(cost_function, a0, callback=None, options=dict({'disp': False}))
angle = r.x[0]
return angle
def circular_mean(weights, angles):
""" Calculate circular mean of a set of 2D vectors """
x = y = 0.
for angle, weight in zip(angles, weights):
x += math.cos(math.radians(angle)) * weight
y += math.sin(math.radians(angle)) * weight
mean = math.degrees(math.atan2(y, x))
return mean
def dir2R(d, a=None):
""" Convert direction to rotation matrix
Note: numerically not stable near singular points!
Arguments:
d (numpy array of size 3): direction to rotation to a
a (numpy array of size 3): target direction
Returns:
R (3x3 numpy array): matrix R such that R*a = d
Example:
>>> d = np.array([0, 1, 0]); a = np.array([0, -1, 0])
>>> R = dir2R(d, a)
<NAME> <<EMAIL>>
"""
# set target vector
if a is None:
a = np.array([0, 0, 1])
# normalize
b = d.reshape((3, 1)) / np.linalg.norm(d)
a = a.reshape((3, 1))
c = np.cross(a.flat, b.flat)
if np.linalg.norm(c) < 1e-12 and a.T.dot(b) < .01:
# deal with singular case
if(np.linalg.norm(a[1:]) < 1e-4):
R0 = np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
else:
R0 = np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
a = R0.dot(a)
bt = (a + b) / np.linalg.norm(a + b)
R = np.eye(3) - 2 * a.dot(a.T) - 2 * \
(bt.dot(bt.T)).dot(np.eye(3) - 2 * a.dot(a.T))
R = R.dot(R0)
else:
bt = (a + b) / np.linalg.norm(a + b)
R = np.eye(3) - 2 * a.dot(a.T) - 2 * \
(bt.dot(bt.T)).dot(np.eye(3) - 2 * a.dot(a.T))
return R
def frame2T(f):
""" Convert frame into 4x4 transformation matrix """
T = np.array(np.eye(4))
T[0:3, 0:3] = euler2RBE(f[3:7])
T[0:3, 3] = f[0:3].reshape(3, 1)
return T
@static_var("b", np.array(np.zeros((2, 2))))
def rot2D(phi):
""" Return 2x2 rotation matrix from angle
Arguments
---------
phi : float
Angle in radians
Returns
-------
R : array
The 2x2 rotation matrix
Examples
--------
>>> R = rot2D(np.pi)
"""
r = rot2D.b.copy()
c = math.cos(phi)
s = math.sin(phi)
r.itemset(0, c)
r.itemset(1, -s)
r.itemset(2, s)
r.itemset(3, c)
return r
def pg_rotx(phi):
""" Rotate around the x-axis with angle """
c = math.cos(phi)
s = math.sin(phi)
R = np.zeros((3, 3))
R.flat = [1, 0, 0, 0, c, -s, 0, s, c]
return R
def pcolormesh_centre(x, y, im, *args, **kwargs):
""" Wrapper for pcolormesh to plot pixel centres at data points """
dx = np.diff(x)
dy = np.diff(y)
dx = np.hstack((dx[0], dx, dx[-1]))
dy = np.hstack((dy[0], dy, dy[-1]))
xx = np.hstack((x, x[-1] + dx[-1])) - dx / 2
yy = np.hstack((y, y[-1] + dy[-1])) - dy / 2
plt.pcolormesh(xx, yy, im, *args, **kwargs)
def imshowz(im, *args, **kwargs):
""" Show image with interactive z-values """
plt.imshow(im, *args, **kwargs)
sz = im.shape
numrows, numcols = sz[0], sz[1]
def format_coord(x, y):
col = int(x + 0.5)
row = int(y + 0.5)
if col >= 0 and col < numcols and row >= 0 and row < numrows:
z = im[row, col]
try:
if len(z) == 1:
return 'x=%1.4f, y=%1.4f, z=%1.4f' % (x, y, z)
else:
return 'x=%1.4f, y=%1.4f, z=%s' % (x, y, str(z))
except BaseException:
return 'x=%1.4f, y=%1.4f, z=%s' % (x, y, str(z))
else:
return 'x=%1.4f, y=%1.4f' % (x, y)
ax = plt.gca()
ax.format_coord = format_coord
def pg_scaling(scale, cc=None):
""" Create scaling with specified centre
Example
-------
>>> pg_scaling( [1.,2])
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 1.]])
"""
scale = np.array(scale)
scale = np.hstack((scale, 1))
H = np.diag(scale)
if cc is not None:
cc = np.array(cc).flatten()
H = pg_transl2H(cc).dot(H).dot(pg_transl2H(-cc))
return H
def pg_transl2H(tr):
""" Convert translation to homogeneous transform matrix
>>> pg_transl2H( [1,2])
array([[ 1., 0., 1.],
[ 0., 1., 2.],
[ 0., 0., 1.]])
"""
sh = np.array(tr)
H = np.eye(sh.size + 1)
H[0:-1, -1] = sh.flatten()
H = np.array(H)
return H
def setregion(im, subim, pos, mask=None, clip=False):
""" Set region in Numpy image
Arguments
---------
im : Numpy array
image to fill region in
subim : Numpy array
subimage
pos: array
position to place image
mask (None or array): mask to use for the subimage
clip (bool): if True clip the subimage where necessary to fit
"""
h = subim.shape[0]
w = subim.shape[1]
x1 = int(pos[0])
y1 = int(pos[1])
x2 = int(pos[0]) + w
y2 = int(pos[1]) + h
if clip:
x1 = max(x1, 0)
y1 = max(y1, 0)
x2 = min(x2, im.shape[1])
y2 = min(y2, im.shape[0])
w = max(0, x2 - x1)
h = max(0, y2 - y1)
if mask is None:
if len(im.shape) == len(subim.shape):
im[y1:y2, x1:x2, ...] = subim[0:h, 0:w]
else:
im[y1:y2, x1:x2, ...] = subim[0:h, 0:w, np.newaxis]
else:
if len(im.shape) > len(mask.shape):
im[y1:y2, x1:x2] = im[y1:y2, x1:x2] * \
(1 - mask[:, :, np.newaxis]) + (subim * mask[:, :, np.newaxis])
else:
if len(im.shape) == len(subim.shape):
im[y1:y2, x1:x2, ...] = im[y1:y2, x1:x2, ...] * \
(1 - mask[:, :]) + (subim * mask[:, :])
else:
im[y1:y2, x1:x2, ...] = im[y1:y2, x1:x2, ...] * \
(1 - mask[:, :]) + (subim[:, :, np.newaxis] * mask[:, :])
return im
def region2poly(rr):
""" Convert a region (bounding box xxyy) to polygon """
if isinstance(rr, tuple) or isinstance(rr, list):
# x,y,x2,y2 format
rr = np.array(rr).reshape((2, 2)).transpose()
poly = np.array([rr[:, 0:1], np.array([[rr[0, 1]], [rr[1, 0]]]), rr[
:, 1:2], np.array([[rr[0, 0]], [rr[1, 1]]]), rr[:, 0:1]]).reshape((5, 2)).T
return poly
poly = rr.flat[[0, 1, 1, 0, 0, 2, 2, 3, 3, 2]].reshape((2, 5))
return poly
def plotLabels(xx, *args, **kwargs):
""" Plot labels next to points
Args:
xx (2xN array): points to plot
*kwargs: arguments past to plotting function
Example:
>>> xx=np.random.rand(2, 10)
>>> fig=plt.figure(10); plt.clf()
>>> _ = plotPoints(xx, '.b'); _ = plotLabels(xx)
"""
if len(np.array(xx).shape) == 1 and xx.shape[0] == 2:
xx = xx.reshape((2, 1))
if xx.shape[0] > 2 and xx.shape[1] == 2:
xx = xx.T
if len(args) == 0:
v = range(0, xx.shape[1])
lbl = ['%d' % i for i in v]
else:
lbl = args[0]
if isinstance(lbl, int):
lbl = [str(lbl)]
elif isinstance(lbl, str):
lbl = [str(lbl)]
nn = xx.shape[1]
ax = plt.gca()
th = [None] * nn
for ii in range(nn):
lbltxt = str(lbl[ii])
th[ii] = ax.annotate(lbltxt, xx[:, ii], **kwargs)
return th
def plotPoints(xx, *args, **kwargs):
""" Plot 2D or 3D points
Args:
xx (array): array of points to plot
*args: arguments passed to the plot function of matplotlib
**kwargs: arguments passed to the plot function of matplotlib
Example:
>>> plotPoints(np.random.rand(2,10), '.-b')
"""
if xx.shape[0] == 2:
h = plt.plot(xx[0, :], xx[1, :], *args, **kwargs)
elif xx.shape[0] == 3:
h = plt.plot(xx[0, :], xx[1, :], xx[2, :], *args, **kwargs)
if xx.shape[0] == 1:
h = plt.plot(xx[0, :], *args, **kwargs)
else:
h = None
return h
def plot2Dline(line, *args, **kwargs):
""" Plot a 2D line in a matplotlib figure
Args:
line (3x1 array): line to plot
>>> plot2Dline([-1,1,0], 'b')
"""
if np.abs(line[1]) > .001:
xx = plt.xlim()
xx = np.array(xx)
yy = (-line[2] - line[0] * xx) / line[1]
plt.plot(xx, yy, *args, **kwargs)
else:
yy = np.array(plt.ylim())
xx = (-line[2] - line[1] * yy) / line[0]
plt.plot(xx, yy, *args, **kwargs)
# %%
def scaleImage(image, display_min=None, display_max=None):
""" Scale any image into uint8 range
Args:
image (numpy array): input image
display_min (float): value to map to min output range
display_max (float): value to map to max output range
Returns:
image (numpy array): the scaled image
Example:
>>> im=scaleImage(255*np.random.rand( 30,40), 40, 100)
Code modified from: https://stackoverflow.com/questions/14464449/using-numpy-to-efficiently-convert-16-bit-image-data-to-8-bit-for-display-with?noredirect=1&lq=1
"""
image = np.array(image, copy=True)
if display_min is None:
display_min = np.percentile(image, .15)
if display_max is None:
display_max = np.percentile(image, 99.85)
if display_max == display_min:
display_max = np.max(image)
image.clip(display_min, display_max, out=image)
if image.dtype == np.uint8:
image -= int(display_min)
image = image.astype(float)
image //= (display_max - display_min) / 255.
else:
image -= display_min
image //= (display_max - display_min) / 255.
image = image.astype(np.uint8)
return image
def auto_canny(image, sigma=0.33):
""" Canny edge detection with automatic parameter detection
>>> imc=auto_canny(np.zeros( (200,300)).astype(np.uint8))
Arguments
---------
image : array
input image
Returns
-------
edged : array
detected edges
Code from: http://www.pyimagesearch.com/2015/04/06/zero-parameter-automatic-canny-edge-detection-with-python-and-opencv/
"""
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
return edged
# %% Plotting functions
def orthogonal_proj(zfront, zback):
""" see http://stackoverflow.com/questions/23840756/how-to-disable-perspective-in-mplot3d """
a = (zfront + zback) / (zfront - zback)
b = -2 * (zfront * zback) / (zfront - zback)
return numpy.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, a, b],
[0, 0, -1e-9, zback]])
def plotPoints3D(xx, *args, **kwargs):
""" Plot 3D points
Arguments
---------
xx: 3xN array
the 3D data points
Example
-------
>> ax=plotPoints3D(np.random.rand(3, 1) ,'.r', markersize=10, fig=12)
"""
fig = kwargs.get('fig', None)
verbose = kwargs.get('verbose', 0)
if 'fig' in kwargs.keys():
kwargs.pop('fig')
if 'verbose' in kwargs.keys():
kwargs.pop('verbose')
if verbose:
print('plotPoints3D: using fig %s' % fig)
print('plotPoints3D: using args %s' % args)
if fig is None:
ax = p.gca()
else:
fig = p.figure(fig)
ax = fig.gca(projection='3d')
r = ax.plot(np.ravel(xx[0, :]), np.ravel(xx[1, :]),
np.ravel(xx[2, :]), *args, **kwargs)
p.draw()
return ax
# %%
def polyarea(p):
""" Return signed area of polygon
Arguments
---------
p : Nx2 numpy array or list of vertices
vertices of polygon
Returns
-------
area : float
area of polygon
>>> polyarea( [ [0,0], [1,0], [1,1], [0,2]] )
1.5
"""
if len(p) <= 1:
return 0
if isinstance(p, numpy.ndarray):
val = 0
for x in range(len(p)):
x0 = p[x, 0]
y0 = p[x, 1]
xp = x + 1
if xp >= len(p):
xp = 0
x1 = p[xp, 0]
y1 = p[xp, 1]
val += 0.5 * (x0 * y1 - x1 * y0)
return val
def polysegments(p):
""" Helper functions """
if isinstance(p, list):
return zip(p, p[1:] + [p[0]])
else:
return zip(p, np.vstack((p[1:], p[0:1])))
return 0.5 * abs(sum(x0 * y1 - x1 * y0 for ((x0, y0), (x1, y1)) in polysegments(p)))
def polyintersect(x1: np.ndarray, x2: np.ndarray) -> np.ndarray:
""" Calculate intersection of two polygons
Args:
x1: First polygon. Shape is (N, 2) with N the number of vertices
x2: Second polygon
Returns:
Intersection of both polygons
Raises:
ValueError if the intersection consists of multiple polygons
>>> x1=np.array([(0, 0), (1, 1), (1, 0)] )
>>> x2=np.array([(1, 0), (1.5, 1.5), (.5, 0.5)])
>>> x=polyintersect(x1, x2)
>>> _=plt.figure(10); plt.clf()
>>> plotPoints(x1.T, '.:r' )
>>> plotPoints(x2.T, '.:b' )
>>> plotPoints(x.T, '.-g' , linewidth=2)
"""
p1 = shapely.geometry.Polygon(x1)
p2 = shapely.geometry.Polygon(x2)
p = p1.intersection(p2)
if p.is_empty:
return np.zeros((0, 2))
if isinstance(p, shapely.geometry.multipolygon.MultiPolygon):
raise ValueError('intersection of polygons is not a simple polygon')
intersection_polygon = np.array(p.exterior.coords)
return intersection_polygon
# %%
def opencv_draw_points(bgr, imgpts, drawlabel=True, radius=3, color=(255, 0, 0), thickness=-1, copyimage=True):
""" Draw points on image with opencv
Arguments
---------
bgr : numpy array
image to draw points into
impts : array
locations of points to plot
"""
if copyimage:
out = bgr.copy()
else:
out = bgr
fscale = .5 + .5 * (radius * 0.2)
fthickness = int(fscale + 1)
for i, pnt in enumerate(imgpts):
tpnt = tuple(pnt.ravel())
cv2.circle(out, tpnt, radius, color, thickness)
if(drawlabel):
cv2.putText(
out, '%d' % (i + 1), tpnt, cv2.FONT_HERSHEY_SIMPLEX, fscale, color, fthickness)
return out
def enlargelims(factor=1.05):
""" Enlarge the limits of a plot
Args:
factor (float or list of float): Factor to expand the limits of the current plot
Example:
>>> enlargelims(1.1)
"""
if isinstance(factor, float):
factor = [factor]
xl = plt.xlim()
d = (factor[0] - 1) * (xl[1] - xl[0]) / 2
xl = (xl[0] - d, xl[1] + d)
plt.xlim(xl)
yl = plt.ylim()
d = (factor[1] - 1) * (yl[1] - yl[0]) / 2
yl = (yl[0] - d, yl[1] + d)
plt.ylim(yl)
def finddirectories(p, patt):
""" Get a list of files """
lst = os.listdir(p)
rr = re.compile(patt)
lst = [l for l in lst if re.match(rr, l)]
lst = [l for l in lst if os.path.isdir(os.path.join(p, l))]
return lst
def _walk_calc_progress(progress, root, dirs):
""" Helper function """
prog_start, prog_end, prog_slice = 0.0, 1.0, 1.0
current_progress = 0.0
parent_path, current_name = os.path.split(root)
data = progress.get(parent_path)
if data:
prog_start, prog_end, subdirs = data
i = subdirs.index(current_name)
prog_slice = (prog_end - prog_start) / len(subdirs)
current_progress = prog_slice * i + prog_start
if i == (len(subdirs) - 1):
del progress[parent_path]
if dirs:
progress[root] = (current_progress, current_progress + prog_slice, dirs)
return current_progress
def findfilesR(p, patt, show_progress=False):
""" Get a list of files (recursive)
Args:
p (string): directory
patt (string): pattern to match
show_progress (bool)
Returns:
lst (list of str)
"""
lst = []
rr = re.compile(patt)
progress = {}
for root, dirs, files in os.walk(p, topdown=True):
frac = _walk_calc_progress(progress, root, dirs)
if show_progress:
tprint('findfilesR: %s: %.1f%%' % (p, 100 * frac))
lst += [os.path.join(root, f) for f in files if re.match(rr, f)]
return lst
def signedsqrt(val):
""" Signed square root function
>>> signedsqrt([-4.,4,0])
array([-2., 2., 0.])
>>> signedmin(-10, 5)
-5
"""
val =
|
np.sign(val)
|
numpy.sign
|
from __future__ import division, print_function
import time
import cPickle
import gzip
import sys
import numpy as np
import mlp
reload(mlp)
from mlp import MLP
import matplotlib.pyplot as plt
import itertools
from sklearn.metrics import confusion_matrix
# Found at: http://scikit-learn.org/stable/auto_examples/model_selection/...
# plot_confusion_matrix.html
"""
We have decided to plot a confusion matrix and try to see where we get the
mispredictions. The documentation regarding this function can be found in the
link before
"""
def plot_confusion_matrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
x_data = train_set[0]
x_data = x_data
mean_image = np.mean(x_data, axis=0)
x_data -= mean_image
x_data = x_data / 255
t_data = train_set[1]
nb_data = t_data.shape[0]
one_hot_tdata = np.zeros((nb_data, 10))
one_hot_tdata[np.arange(nb_data), t_data] = 1
K_list = [784, 100, 50, 10]
activation_functions = [MLP.relu] * 2 + [MLP.sigmoid]
diff_activation_functions = [MLP.drelu] * 2
mlp = MLP(K_list,
activation_functions,
diff_activation_functions,
init_seed=5)
if 1:
x_test, t_test = test_set
nb_epochs = 250
for epoch in range(nb_epochs):
initialize_weights = (epoch == 0)
now = time.time()
mlp.train(x_data, one_hot_tdata,
epochs=1,
batch_size=60,
initialize_weights=initialize_weights,
eta=0.01,
beta=0,
method='adam',
print_cost=True)
time_passed = time.time() - now
print(time_passed)
mlp.get_activations_and_units((x_test - mean_image) / 255)
nb_correct = np.sum(np.equal(t_test,
|
np.argmax(mlp.y, axis=1)
|
numpy.argmax
|
'''
captcha reader demo
captcha with 4 or 5 char, random color for every char, random rotate some degree, see `example-images/example-captcha.png`
make it binary, see `example-images/example-binary.png`
use opencv findcontontours to cut out every char image, see `example-images/example-split-*.png`
then use tensorflow to train and read the test images
'''
import tensorflow as tf
from tensorflow import keras
import numpy as np
from PIL import Image
from img.imageGenerator import createImg, TEXT_IMAGE_SIZE, CHAR_POOL, CHAR_INDEX_DIC, CHAR_DIC
from img.imageGrouping import imageSplit
import threading
from threading import Thread
import multiprocessing as mp
def rgb2int(arr):
'''
convert rgb color array to int
eg: [r, g, b] => 65536 * r + 256 * g + b
'''
R = arr[0]
G = arr[1]
B = arr[2]
return R * 299/1000 + G * 587/1000 + B * 114/1000
def convertToDataArray(img, i, j, ch):
'''
convert image to data array
and resize to 28*28
'''
BG_COLOR = (255, 255, 255)
base = Image.new('RGB', TEXT_IMAGE_SIZE, color = BG_COLOR)
img.convert('RGB')
size = img.size
left = int((TEXT_IMAGE_SIZE[0] - size[0]) / 2)
top = int((TEXT_IMAGE_SIZE[1] - size[1]) / 2)
if left < 0: left = 0
if top < 0: top = 0
base.paste(img, box=(left, top))
shouldSave = i < 1
if shouldSave:
img.save(
'example-resized-char-b' + str(i) +
'-' + str(j) + '-' + ch + '.png'
)
base.save(
'example-resized-char' + str(i) +
'-' + str(j) + '-' + ch +'.png'
)
arr = np.array(base)
arr = arr.reshape((TEXT_IMAGE_SIZE[0] * TEXT_IMAGE_SIZE[1], 3))
arr1 = []
for x in range(len(arr)):
a = arr[x]
arr1.append(
rgb2int(a)
)
arr1 = (255 -
|
np.array(arr1)
|
numpy.array
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
import random
from skimage import measure
import torch
from torchvision import utils
def make_numpy_grid(tensor_data):
# tensor_data: b x c x h x w, [0, 1], tensor
tensor_data = tensor_data.detach()
vis = utils.make_grid(tensor_data)
vis = np.array(vis.cpu()).transpose((1,2,0))
if vis.shape[2] == 1:
vis = np.stack([vis, vis, vis], axis=-1)
return vis
def cpt_ssim(img, img_gt, normalize=False):
if normalize:
img = (img - img.min()) / (img.max() - img.min() + 1e-9)
img_gt = (img_gt - img_gt.min()) / (img_gt.max() - img_gt.min() + 1e-9)
SSIM = measure.compare_ssim(img, img_gt, data_range=1.0)
return SSIM
def cpt_psnr(img, img_gt, PIXEL_MAX=1.0, normalize=False):
if normalize:
img = (img - img.min()) / (img.max() - img.min() + 1e-9)
img_gt = (img_gt - img_gt.min()) / (img_gt.max() - img_gt.min() + 1e-9)
mse = np.mean((img - img_gt) ** 2)
psnr = 20 * np.log10(PIXEL_MAX / np.sqrt(mse))
return psnr
def cpt_cos_similarity(img, img_gt, normalize=False):
if normalize:
img = (img - img.min()) / (img.max() - img.min() + 1e-9)
img_gt = (img_gt - img_gt.min()) / (img_gt.max() - img_gt.min() + 1e-9)
cos_dist = np.sum(img*img_gt) / np.sqrt(np.sum(img**2)*np.sum(img_gt**2) + 1e-9)
return cos_dist
def cpt_batch_psnr(img, img_gt, PIXEL_MAX):
mse = torch.mean((img - img_gt) ** 2)
psnr = 20 * torch.log10(PIXEL_MAX / torch.sqrt(mse))
return psnr
def cpt_batch_classification_acc(predicted, target):
# predicted: b x c, logits [-inf, +inf]
pred_idx = torch.argmax(predicted, dim=1).int()
pred_idx = torch.reshape(pred_idx, [-1])
target = torch.reshape(target, [-1])
return torch.mean((pred_idx.int()==target.int()).float())
def normalize(img, mask=None, p_min=0, p_max=0):
# img: h x w, [0, 1], np.float32
if mask is None:
sorted_arr = np.sort(img, axis=None) # sort the flattened array
else:
sorted_arr = np.sort(img[mask == 1], axis=None) # sort the flattened array
n = len(sorted_arr)
img_min = sorted_arr[int(n*p_min)]
img_max = sorted_arr[::-1][int(n*p_max)]
img_norm = (img - img_min) / (img_max - img_min + 1e-6)
return np.clip(img_norm, a_min=0, a_max=1.0)
def get_sub_pxl_values(img, ys, xs):
# img: h x w x c, [0, 1], np.float32
h, w, c = img.shape
xs0, ys0, xs1, ys1 = xs.astype(int), ys.astype(int), xs.astype(int) + 1, ys.astype(int) + 1
xs1 = np.clip(xs1, a_min=0, a_max=w - 1)
ys1 = np.clip(ys1, a_min=0, a_max=h - 1)
dx = (xs - xs0).astype(np.float32)
dy = (ys - ys0).astype(np.float32)
weight_tl = (1.0 - dx) * (1.0 - dy)
weight_tr = (dx) * (1.0 - dy)
weight_bl = (1.0 - dx) * (dy)
weight_br = (dx) * (dy)
weight_tl = np.expand_dims(weight_tl, axis=-1)
weight_tr = np.expand_dims(weight_tr, axis=-1)
weight_bl = np.expand_dims(weight_bl, axis=-1)
weight_br = np.expand_dims(weight_br, axis=-1)
pxl_values = weight_tl * img[ys0, xs0, :] + \
weight_tr * img[ys0, xs1, :] + \
weight_bl * img[ys1, xs0, :] + \
weight_br * img[ys1, xs1, :]
return pxl_values
class VideoWriter:
def __init__(self, fname='./demo.mp4',
h=760, w=1280,
frame_rate=10, bottom_crop=False,
layout='default', display=True):
self.w = int(w)
self.h = int(h)
self.bottom_crop = bottom_crop
self.layout = layout
self.display = display
self.bottom_crop = bottom_crop
self.video_writer = cv2.VideoWriter(
fname, cv2.VideoWriter_fourcc(*'MP4V'), frame_rate,
(self.w, self.h))
def write_frame(self, img_after, img_before=None, idx=None):
if img_after.shape[0] != self.h or img_after.shape[1] != self.w:
img_after = cv2.resize(img_after, (self.w, self.h))
if img_before is not None:
img_before = cv2.resize(img_before, (self.w, self.h))
if self.layout == 'default':
img = img_after
if self.layout == 'transfer':
img =
|
np.zeros_like(img_after)
|
numpy.zeros_like
|
'''The statop module handles calculations where uncertainty has to be handle.
All uncertainties are currently handled through quadrature. So they take the
form of "\sigma_y = \sum_i \left(\frac{dy}{dx_i}\right) \sigma_{x_i}". The
functionality in this module would be AWESOME if it were transferred to a
class. However, that's a lot of scaffolding I don't want to deal with right
now. But subclassing Quantity may be worth it in the future, when I'm less
invested in the way things are now.
These functions take a set of three arguments for each variable: the value, the
error, and a code indicating whether it's a limit or not. The codes are given
in the exported constants: UPPER_LIMIT_SYMBOL, LOWER_LIMIT_SYMBOL,
DATA_POINT_SYMBOL, and NO_LIMIT_SYMBOL, abbreviated as UPPER, LOWER, DETECTION,
and NA, respectively. By referencing these constants, it should not be
necessary to use the character symbols themselves, except for debugging
purposes.
The syntax for most of these functions will take the form: func(*values,
*errors, *limits). When exceptions to this form occurs, check the docstring.
The function returns a 3-tuple containing the new value, error and limit.
'''
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Column
UPPER_LIMIT_SYMBOL = 'u'
LOWER_LIMIT_SYMBOL = 'l'
DATA_POINT_SYMBOL = 'd'
NO_LIMIT_SYMBOL = 'n'
UPPER = UPPER_LIMIT_SYMBOL
LOWER = LOWER_LIMIT_SYMBOL
NA = NO_LIMIT_SYMBOL
DETECTION = DATA_POINT_SYMBOL
def generate_limit(testlim, length):
'''If testlim is None, generate an array of default limits with length.
If testlim is valid, then it will be returned.
'''
if testlim is None:
testlim = np.array([DETECTION]*length)
return testlim
def invert_limits(limits):
'''Toggles between upper and lower limits.
UPPER and LOWER limits will switch, while valid/unconstrained values will
remain as they were.
'''
newlimits = np.array(limits, subok=True)
upperindices = np.where(limits == UPPER)
lowerindices = np.where(limits == LOWER)
# For the case when limits is not an array, but just a float.
try:
newlimits[upperindices] = LOWER
newlimits[lowerindices] = UPPER
except IndexError:
if upperindices[0].shape[0] == 1:
newlimits = LOWER
elif lowerindices[0].shape[0] == 1:
newlimits = UPPER
elif limits == DETECTION or limits == NA:
newlimits = limits
else:
raise ValueError("Limit is not recognizable")
return newlimits
def combine_limits(lim1, lim2):
'''Combines arrays of limits according to combine_limit.
See combine_limit for the algebra
'''
limitlist = [combine_limit(v1, v2) for (v1, v2) in zip(lim1, lim2)]
if isinstance(lim1, Column) and isinstance(lim2, Column):
newlimits = Column(limitlist)
else:
newlimits = np.array(limitlist)
return newlimits
def combine_inverted_limits(lim1, lim2):
'''This is used for cases where one of the limits needs to be flipped.
This is common in cases like subtraction or division. Basically if one of
the operations is monotonic decreasing.
'''
return combine_limits(lim1, invert_limits(lim2))
def combine_limit(lim1, lim2):
'''Combines limits in a logically valid way.
The set of rules which govern limits are:
u + u -> u
u + l -> n
u + 0 -> u
u + n -> n
l + u -> n
l + l -> l
l + 0 -> l
l + n -> n
0 + u -> u
0 + l -> l
0 + 0 -> 0
0 + n -> n
n + u -> n
n + l -> n
n + 0 -> n
n + n -> n
'''
# Implementation details.
# Utilizing the symmetric property of these will only require cases for:
## u + u -> u
## u + l -> n
## u + 0 -> u
## u + n -> n
## l + l -> l
## l + 0 -> l
## l + n -> n
## 0 + 0 -> 0
## 0 + n -> n
## n + n -> n
# This makes 10 relations
# One easy thing to program is
if lim2 == NA:
return NA
# 6 left
elif lim1 == lim2:
return lim1
# 3 left
elif lim2 == DETECTION:
return lim1
# 1 left
elif lim1 == UPPER and lim2 == LOWER:
return NA
else:
return combine_limit(lim2, lim1)
def subtract(minuend, subtrahend, minuerr, subtraerr,
minulim=None, subtralim=None):
'''Returns statistically subtracted value of two arrays.
This function takes two arrays involving two measurements with errors which
may be upper or lower limits. It then returns a 3-tuple. The first element
is simply the difference of the values. The second is the error of the
difference. And the third represents whether the differences are limits or
not.
If limits are not given, then the third element will simply be limits
indicating all data points are valid.
'''
try:
minulim = generate_limit(minulim, len(minuend))
except TypeError:
minulim = generate_limit(minulim, 1)[0]
try:
subtralim = generate_limit(subtralim, len(subtrahend))
except TypeError:
subtralim = generate_limit(subtralim, 1)[0]
difference, differr, difflim = add(
minuend, -subtrahend, minuerr, subtraerr, minulim,
invert_limits(subtralim))
return (difference, differr, difflim)
def add(augend, addend, augerr, adderr, auglim=None, addlim=None):
'''Returns the statistically summed value of two arrays.
This function takes two arrays involving two measurements with errors. It
then returns a 2-tuple. The first value is simply the sum, and the second
is the error on the sum.
'''
try:
auglim = generate_limit(auglim, len(augend))
except TypeError:
auglim = generate_limit(auglim, 1)[0]
try:
addlim = generate_limit(addlim, len(addend))
except TypeError:
addlim = generate_limit(addlim, 1)[0]
sums = augend + addend
sumerr =
|
np.sqrt(augerr**2 + adderr**2)
|
numpy.sqrt
|
# MIT License
#
# Copyright (c) 2018 <NAME>
# Copyright (c) 2018 Udacity
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
from torch.utils.data import TensorDataset, DataLoader
import torch.nn as nn
import numpy as np
import os
from string import punctuation
from collections import Counter
# IMPORTANT: change this to where 'data' folder is located
HOME = 'deep-learning-v2-pytorch/sentiment-rnn'
def get_path(path):
return os.path.join(HOME, path)
def pad_features(reviews_ints, seq_length):
''' Return features of review_ints, where each review is padded with 0's
or truncated to the input seq_length.
'''
## implement function
features=[]
for review in reviews_ints:
if len(review) < seq_length:
features.append(([0] * (seq_length - len(review))) + review)
elif len(review) > seq_length:
features.append(review[:seq_length])
else:
features.append(review)
features = np.array(features)
return features
def get_trn_val_tst_split(split_frac, idxs):
|
np.random.shuffle(idxs)
|
numpy.random.shuffle
|
# Core
from io import StringIO
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pandas_summary import DataFrameSummary
import dateparser
from datetime import datetime
from dateutil.relativedelta import relativedelta
import rapidfuzz.process
from sklearn.model_selection import train_test_split
from wordcloud import WordCloud
from stop_words import get_stop_words
import imageio
from tqdm.notebook import tqdm
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
# Preprocessing
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import RobustScaler
# Modelling
from keras.models import Model, Sequential
from keras.layers import Dropout, Embedding, Dense, Input, Flatten
from keras.layers.merge import concatenate
from keras.callbacks import Callback
from IPython.display import clear_output
from tensorflow.keras.preprocessing import image
from keras.applications import MobileNetV2
from keras.applications.mobilenet import preprocess_input
from keras.utils import plot_model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
SEED = 69
DATE_PARSER = dateparser.DateDataParser(languages=['es'])
def load_cars_data():
with open('./data/car_model_list.json') as file:
data = json.load(file)
data = data['results']
return pd.DataFrame.from_records(data)
def generate_cars_dict(cars_data):
cars_data_dict = cars_data[['Category', 'Make', 'Model']]
cars_data_dict = {
k.lower().strip(): v[['Category', 'Model']]
for k, v in cars_data_dict.groupby('Make')
}
cars_data_dict = {
make: {(model, category): model.lower()
for model, category in zip(models['Model'], models['Category'])}
for make, models in cars_data_dict.items()
}
return cars_data_dict
def load_dataset():
"""
Loads train and test sets
"""
csv_text = ''
header = ''
with open('./data/dataset.csv') as file:
for i, line in enumerate(file):
if i == 0:
header = line.strip()
elif line.strip() != header:
csv_text += line + '\n'
csv_text = f'{header}\n{csv_text}'
data = pd.read_csv(StringIO(csv_text))
data.drop(data.columns[0], axis=1, inplace=True)
data.drop_duplicates('image_url', inplace=True)
data.drop_duplicates(['ad_title', 'car_desc'], inplace=True)
return train_test_split(data, test_size=0.2, random_state=SEED)
def configure_plotting():
"""
Set plotting aesthetics.
"""
rc_params = {
'savefig.dpi': 300,
'figure.autolayout': False,
'figure.figsize': (10, 6),
'axes.labelsize': 18,
'axes.titlesize': 24,
'font.size': 20,
'font.family': 'serif',
'lines.linewidth': 2.0,
'lines.markersize': 8,
'legend.fontsize': 14
}
sns.set('paper', 'white', font_scale=1.2, rc=rc_params)
def plot_correlation_matrix(df,
methods=['pearson', 'spearman'],
figsize=(20, 8),
annot_size=18):
"""
Plots a correlation matrix with both Pearson and Spearman coefficients
"""
_, axes = plt.subplots(1, len(methods), sharey=True, figsize=figsize)
for ax, method in zip(axes, methods):
ax.set_title(method)
values = df.corr(method)
sns.heatmap(
values,
center=0,
cmap='RdBu',
annot=True,
fmt='.2f',
ax=ax,
annot_kws={'size': annot_size})
plt.tight_layout()
return axes
def render_table(data,
show_columns=True,
show_index=True,
col_width=3.0,
row_height=0.625,
fontsize=16,
header_color='#40466e',
row_colors=('#f1f1f2', 'w'),
edge_color='w',
bbox=(0, 0, 1, 1),
header_columns=0,
ax=None,
**kwargs):
"""
Plot a pd.DataFrame as Matplotlib table.
"""
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array(
[col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
rowLabels = data.index if show_index else None
colLabels = data.columns if show_columns else None
mpl_table = ax.table(
cellText=data.values,
bbox=bbox,
rowLabels=rowLabels,
colLabels=colLabels,
**kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(fontsize)
for k, cell in mpl_table._cells.items():
cell.set_edgecolor(edge_color)
if (k[0] == 0 and show_columns) or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0] % len(row_colors)])
return ax
def get_summary(df, digits=3):
"""Gets a data summary of the dataframe passed as argument"""
summary = DataFrameSummary(df).summary()
summary.iloc[0:9, :] = summary.iloc[0:9, :].astype(float).round(digits)
return summary
def get_missing_values(df):
"""
Returns a dataframe with the percentage of missing values for each
feature in the dataframe passed as argument
"""
missing = 100 * (1 - df.count() / len(df))
missing = missing.apply(lambda x: '%.3f%%' % x)
return pd.DataFrame(missing, columns=['Missing %'])
def get_relative_delta(date_string):
result = DATE_PARSER.get_date_data(date_string).date_obj
return relativedelta(result, datetime.now)
def extract_car_info_from_title(title, cars_dict):
brand, model, category = None, None, None
title = title.upper()
split_title = [x for x in title.strip().split(' ') if len(x) >= 2 or x.isalpha() or x.isdigit()]
brand = split_title[0]
if len(split_title) > 1:
model_words = ' '.join(split_title[1:]).split(' ')
model_words = [word for word in model_words if 'CV' not in word and 'KW' not in word]
model = ' '.join(model_words[:2]).strip()
model = model or None
query = brand if brand else title.lower()
match = rapidfuzz.process.extractOne(query, cars_dict.keys(), score_cutoff=0.9)
if match:
models = cars_dict[match[0]]
model_match = rapidfuzz.process.extractOne(model, models, processor=None, score_cutoff=0.9)
if model_match:
_, category = model_match[-1]
return brand, model, category
def rearrange_row(row, cars_dict):
km = None
year = None
engine_type = None
door_num = None
power = None
columns = ['car_km', 'car_year', 'car_engine_type', 'car_door_num', 'car_power']
values = row[columns]
for value in values:
value = value.strip()
if 'kms' in value:
km = float(value.replace('kms', '').replace('.', ''))
if 'CV' in value:
power = int(value.replace('CV', ''))
if value in ['Manual', 'Automático']:
engine_type = value
if 'puertas' in value:
door_num = int(value.replace('puertas', ''))
if value.isdigit():
year = int(value)
creation_date = row['ts'] + get_relative_delta('ad_time')
years_since_manufacturing = creation_date.year - year if year else None
brand, model, category = extract_car_info_from_title(
row['ad_title'], cars_dict)
result = [model, brand, category, row['car_desc'], row['advertizer_type'],
row['image_url'], row['region'], row['car_price'],
years_since_manufacturing, km, engine_type, door_num, power]
index = ['model', 'brand', 'category', 'car_desc', 'advertizer_type',
'image_url', 'region', 'car_price', 'years_since_manufacturing',
'car_km', 'car_engine_type', 'car_door_num', 'car_power']
return pd.Series(result, index)
def tidy_data(data, cars_dict):
result = data.drop(['ad_id', 'ad_type'], axis=1)
result['ts'] = pd.to_datetime(result['ts'])
return result.progress_apply(lambda x: rearrange_row(x, cars_dict), axis=1)
def remove_outliers(data):
return data[
(data['car_km'] < 3e6) &
(data['years_since_manufacturing'] >= 0) &
(data['years_since_manufacturing'] <= 30) &
(data['car_power'] >= 40) &
(data['car_power'] <= 800) &
(data['car_price'] >= 500) &
(data['car_price'] <= 200000)
]
def remove_useless_numerical_variables(data):
return data.drop('car_door_num', axis=1)
def plot_lollipop(data, x, y):
ordered_df = data.sort_values(by=x)
my_range = range(1, len(data.index) + 1)
plt.hlines(y=my_range, xmin=0, xmax=ordered_df[x], color='skyblue')
plt.plot(ordered_df[x], my_range, "o")
plt.yticks(my_range, ordered_df[y])
plt.xlabel(x)
plt.ylabel(y)
def plot_wordcloud(data):
text = ''
stopwords = set(get_stop_words('es'))
for val in data:
if not val:
continue
val = str(val)
tokens = val.split()
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
text += " ".join(tokens) + " "
wordcloud = WordCloud(width=800, height=800,
background_color='white',
stopwords=stopwords,
min_font_size=10).generate(text)
plt.figure(figsize=(8, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
def download_image_form_url(img_url, folder, filename):
image = imageio.imread(img_url)
imageio.imwrite(folder / filename, image)
def download_images(image_urls, folder, n_jobs=4):
folder = Path(folder)
folder.mkdir(parents=True, exist_ok=True)
images_with_index = zip(image_urls.index, image_urls)
with ThreadPoolExecutor(max_workers=n_jobs) as executor:
results = list(tqdm(executor.map(
lambda x: download_image_form_url(x[1], folder, f'{x[0]}.jpg'),
images_with_index
), total=len(image_urls)))
def load_images(folder, return_filename=False):
folder = Path(folder)
for img in folder.iterdir():
if img.is_file():
result = imageio.imread(img)
if return_filename:
yield result, img.stem
else:
yield result
def render_image(img, title=None, ax=None):
if not ax:
ax = plt.gca()
ax.imshow(img)
ax.set_axis_off()
if title:
ax.set_title(title)
if not ax:
plt.show()
def render_grid(images, titles, rows=3, suptitle=None, figsize=None):
num_imgs = len(images)
cols = num_imgs // rows + num_imgs % rows
figsize = figsize if figsize else (12, rows * 3)
fig = plt.figure(figsize=figsize)
grid_shape = (rows, cols)
gs = fig.add_gridspec(*grid_shape)
fig.tight_layout()
for i, (img, title) in enumerate(zip(images, titles)):
ax = fig.add_subplot(gs[i // cols, i % cols])
render_image(img, title=title, ax=ax)
if suptitle:
plt.suptitle(suptitle)
plt.show()
def encode_for_embeddings(train, test, column):
labels = set(train[column].unique()) | set(test[column].unique())
labels = {k: i + 1 for i, k in enumerate(labels)}
train = train.copy()
test = test.copy()
train.loc[:, column] = train.loc[:, column].apply(lambda x: labels[x])
test.loc[:, column] = test.loc[:, column].apply(lambda x: labels[x])
return train, test
def preprocess_data(train, test):
train = train.drop('car_desc', axis=1)
test = test.drop('car_desc', axis=1)
preprocessed_train = train[~pd.isnull(train['model']) & ~pd.isnull(train['image_url'])]
preprocessed_test = test[~pd.isnull(test['model']) & ~pd.isnull(test['image_url'])]
preprocessed_train, preprocessed_test = encode_for_embeddings(
preprocessed_train, preprocessed_test, 'model')
preprocessed_train, preprocessed_test = encode_for_embeddings(
preprocessed_train, preprocessed_test, 'brand')
preprocessed_train, preprocessed_test = encode_for_embeddings(
preprocessed_train, preprocessed_test, 'region')
preprocessed_train['car_engine_type'] = \
(preprocessed_train['car_engine_type'] == 'Manual').astype(np.uint8)
preprocessed_test['car_engine_type'] = \
(preprocessed_test['car_engine_type'] == 'Manual').astype(np.uint8)
preprocessed_train['advertizer_type'] = \
(preprocessed_train['advertizer_type'] == 'Particular').astype(np.uint8)
preprocessed_test['advertizer_type'] = \
(preprocessed_test['advertizer_type'] == 'Particular').astype(np.uint8)
robust_scaler = RobustScaler()
numerical_columns = ['car_km', 'car_power', 'years_since_manufacturing']
preprocessed_train[numerical_columns] = robust_scaler.fit_transform(
preprocessed_train[numerical_columns])
preprocessed_test[numerical_columns] = robust_scaler.transform(
preprocessed_test[numerical_columns])
imputer = SimpleImputer(missing_values=None, strategy='most_frequent')
preprocessed_train['category'][pd.isnull(preprocessed_train['category'])] = None
preprocessed_test['category'][pd.isnull(preprocessed_test['category'])] = None
preprocessed_train['category'] = imputer.fit_transform(
preprocessed_train['category'].values.reshape(-1, 1))
preprocessed_test['category'] = imputer.transform(
preprocessed_test['category'].values.reshape(-1, 1))
preprocessed_train, preprocessed_test = encode_for_embeddings(
preprocessed_train, preprocessed_test, 'category')
return preprocessed_train, preprocessed_test
def create_keras_model(train, test, numerical_input_columns):
input_category = Input(shape=(1,), name='input_category')
input_brand = Input(shape=(1,), name='input_brand')
input_model = Input(shape=(1,), name='input_model')
input_region = Input(shape=(1,), name='input_region')
input_numerical = Input(shape=(len(numerical_input_columns),), name='input_numerical')
num_categories = max(train['category'].max(), test['category'].max())
num_brands = max(train['brand'].max(), test['brand'].max())
num_models = max(train['model'].max(), test['model'].max())
num_region = max(train['region'].max(), test['region'].max())
input_layers = [input_category, input_brand, input_model, input_region]
input_sizes = [num_categories, num_brands, num_models, num_region]
embedding_layers = []
for input_layer, num_labels in zip(input_layers, input_sizes):
out_dim = int(
|
np.sqrt(num_labels)
|
numpy.sqrt
|
import numpy as np
import networkx as nx
import sys
from .. import Agent
class maxWeightFixedAgent(Agent):
def __init__(self, epLen, env_config, alpha):
"""
Args:
epLen: number of steps
func: function used to decide action
env_config: parameters used in initialization of environment
data: all data observed so far
"""
self.data = []
self.epLen = epLen
self.num_cars = env_config['num_cars']
self.alpha = alpha
self.num_nodes = len(env_config['starting_state'])
self.graph = nx.Graph(env_config['edges'])
self.num_nodes = self.graph.number_of_nodes()
self.lengths = self.find_lengths(self.graph, self.num_nodes)
self.gamma = env_config['gamma']
self.d_threshold = env_config['d_threshold']
def find_lengths(self, graph, num_nodes):
"""Find the lengths between each pair of nodes in [graph].
Given a graph, find_lengths first calculates the pairwise shortest distance
between all the nodes, which is stored in a (symmetric) matrix.
Args:
graph:
An object containing nodes and edges; each edge has a travel
time.
num_nodes:
An integer representing the number of nodes in the graph.
Returns:
A 2-dimensional symmetric array containing the distances between
each pair of nodes.
"""
dict_lengths = dict(nx.all_pairs_dijkstra_path_length(
graph, cutoff=None, weight='travel_time'))
lengths =
|
np.zeros((num_nodes, num_nodes))
|
numpy.zeros
|
import sys
import os
import numpy as np
import pickle
path = os.path.split(__file__)[0]
path = os.path.abspath(path)
if path not in sys.path:
sys.path.append(path)
from mnist.get_mnist import get as get_mnist
from cifar10.get_cifar import get as get_cifar
from model import XLSTM, XGRU
import torch
import torch.nn as nn
import torch.optim as optim
import random
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def save_with_pickle(obj, fn):
with open(fn, 'wb') as f:
pickle.dump(obj, f)
def get_with_pickle(fn):
with open(fn, 'rb') as f:
result = pickle.load(f)
return result
class Writer:
def __init__(self, save_path):
self.record = {}
self.save_path = save_path
def write(self, key, value):
key_list = self.record.get(key, None)
if key_list is None:
key_list = []
key_list.append(value)
self.record[key] = key_list
def save(self):
save_with_pickle(self.record, self.save_path)
@classmethod
def load(cls, save_path):
return get_with_pickle(save_path)
class MnistDataset(torch.utils.data.Dataset):
def __init__(self, mode='train', permuted=False):
self.mode = mode
x, y = get_mnist()#(N, RGB, H, W)
x = np.transpose(x, (0, 2, 3, 1))#(N, H, W, RGB)
x = np.reshape(x, (-1, 28*28, 1))#(N, H*W, RGB)
x = x/255.0
N, _, _ = x.shape
self.nums = N#70000
self.train_nums = 50000
self.valid_nums = 10000
self.permuted = list(range(0, 784))
if permuted:
np.random.shuffle(self.permuted)
self.x = torch.from_numpy(x).float()
self.y = torch.from_numpy(y).long()
def set_mode(self, mode):
self.mode = mode
def __getitem__(self, index):
begin = None
if self.mode == 'train':
begin = 0
elif self.mode == 'valid':
begin = self.train_nums
elif self.mode == 'test':
begin = self.train_nums + self.valid_nums
x = self.x[begin+index, :, :]
y = self.y[begin+index]
# print(x.shape, len(self.permuted))
return x[self.permuted, :], y
def __len__(self):
if self.mode == 'train':
return self.train_nums
elif self.mode == 'valid':
return self.valid_nums
elif self.mode == 'test':
return self.nums - self.train_nums - self.valid_nums
class CifarDataset(torch.utils.data.Dataset):
def __init__(self, mode='train', permuted=False):
self.mode = mode
x, y = get_cifar()#(N, RGB, H, W)
x = np.transpose(x, (0, 2, 3, 1))#(N, H, W, RGB)
x = np.reshape(x, (-1, 32*32, 3))#(N, H*W, RGB)
x = x/255.0
N, _, _ = x.shape
print(x.shape, y.shape)
self.nums = N#60000
self.train_nums = 50000
self.valid_nums = 5000
self.permuted = list(range(0, 32*32))
if permuted:
np.random.shuffle(self.permuted)
self.x = torch.from_numpy(x).float()
self.y = torch.from_numpy(y).long()
def set_mode(self, mode):
self.mode = mode
def __getitem__(self, index):
begin = None
if self.mode == 'train':
begin = 0
elif self.mode == 'valid':
begin = self.train_nums
elif self.mode == 'test':
begin = self.train_nums + self.valid_nums
x = self.x[begin+index, :, :]
y = self.y[begin+index]
return x[self.permuted, :], y
def __len__(self):
if self.mode == 'train':
return self.train_nums
elif self.mode == 'valid':
return self.valid_nums
elif self.mode == 'test':
return self.nums - self.train_nums - self.valid_nums
class Model(nn.Module):
def __init__(self, seq_len, input_size,
hidden_size_rnn, hidden_size_relu,
RNN):
super(Model, self).__init__()
self.rnn = RNN(input_size=input_size, hidden_size=hidden_size_rnn, seq_len=seq_len)
#self.m = hidden_size_rnn*seq_len
#self.linear = nn.Linear(self.m, hidden_size_relu)
self.linear = nn.Linear(hidden_size_rnn, hidden_size_relu)
self.relu = nn.ReLU(True)
self.out = nn.Linear(hidden_size_relu, 10)
def forward(self, x):
rnn, _ = self.rnn(x)
#rnn1 = torch.reshape(rnn, (-1, self.m))
#rnn1 = rnn[:, -1, :]
linear = self.linear(rnn[:, -1, :])
relu = self.relu(linear)
out = self.out(relu)
return out
epochs = 100
lr = 0.001
batch_size = 128
shuffle = True
hidden_size_rnn = 64
hidden_size_relu = hidden_size_rnn//2
seed = 0
def run(model, optimizer, criterion, batch_size, dataset, writer):
#use_cuda = torch.cuda.is_available()
use_cuda = False
if use_cuda:
model = model.cuda()
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, shuffle=shuffle,
num_workers=1, pin_memory=True)
for epoch in range(epochs):
model.train()
loss_s = 0.0
c = 0
dataset.set_mode('train')
for (x, y) in dataloader:
if use_cuda:
x = x.cuda()
y = y.cuda()
py = model(x)
loss = criterion(py, y)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.rnn.parameters(), 3.0)
optimizer.step()
k = loss.item()
writer.write('train_iteration_loss', k)
loss_s += k
if c%100 == 0:
print(k)
writer.save()
c += 1
loss_s = loss_s/c
writer.write('train_epoch_loss', loss_s)
with torch.no_grad():
model.eval()
dataset.set_mode('valid')
predY = []
trueY = []
for (x, y) in dataloader:
if use_cuda:
x = x.cuda()
pred = model(x)
predY.append(pred.detach().cpu())
trueY.append(y.detach())
predY = torch.cat(predY, 0)#(N, 10)
predY = predY.numpy()
predY = np.argmax(predY, 1)
trueY = torch.cat(trueY, 0)
trueY = trueY.numpy()
acc = trueY == predY
acc = acc.mean()
writer.write('valid_epoch_accuracy', acc)
valid_acc = acc
dataset.set_mode('test')
predY = []
trueY = []
for (x, y) in dataloader:
if use_cuda:
x = x.cuda()
pred = model(x)
predY.append(pred.detach().cpu())
trueY.append(y.detach())
predY = torch.cat(predY, 0)#(N, 10)
predY = predY.numpy()
predY =
|
np.argmax(predY, 1)
|
numpy.argmax
|
"""Algorithms for eclipse and flare detection."""
# Dependencies
import numpy as np
from chandralc import analysis, ml
def flare_detect(lc, binsize=10, sigma=3, threshold=0.3):
"""Detects potential flares in lightcurves.
Parameters
----------
lc : ChandraLightcurve
ChandraLightcurve object
binsize : int
Size of bin, by default 10
sigma : float
No. of std. deviations of slope above mean of slopes of all bins, by default 3
threshold : float
Threshold of clustering of bins which could be part of a flare from 0 to 1, by default 0.3
Returns
-------
bool
Whether flare(s) is/are detected or not
"""
# binsize arrays of times and cumulative counts
binned_time_arrays = analysis.bin_toarrays(lc.time_array, binsize)
binned_count_arrays = analysis.bin_toarrays(lc.cumulative_counts, binsize)
# Array of slopes of each bin from regression line
slopes = [
ml.regression_equation(x, y)[0]
for x, y in zip(binned_time_arrays, binned_count_arrays)
]
# Replacing nan with 0
for i in range(len(slopes)):
if
|
np.isnan(slopes[i])
|
numpy.isnan
|
from mpvr.datamodule.manager import Manager as dm
from mpvr.utils.process import *
from scipy.signal import savgol_filter
import numpy as np
import pandas as pd
dm = dm.from_config(dm.section_list()[0])
for scenario in dm.get_scenarios():
print(scenario)
dm.set_scenario(scenario)
motion_data = np.array([x for x in dm.get_motion_data_gen()])
motion_data_95 = savgol_filter(motion_data, 9, 5, axis=0)
histograms = [np.zeros(5**6), np.zeros(36)]
mot_vis_gen = dm.make_tuple_gen(
dm.get_classified_motion_data_gen(motion_data),
dm.get_classified_visual_data_gen())
make_histogram(mot_vis_gen, histograms)
for hist in histograms:
hist /= np.sum(hist)
mot_vis_gen = dm.make_tuple_gen(
dm.get_classified_motion_data_gen(motion_data),
dm.get_classified_visual_data_gen())
mapped = mapping_src_to_histogram(mot_vis_gen, histograms)
mpe = [x for x in to_mp_entropy(mapped)]
dm._load_timestamp_data()
dm.save_scenario_as_table(mpe, 'mpe')
histograms = [np.zeros(5**6), np.zeros(36)]
mot_vis_gen = dm.make_tuple_gen(
dm.get_classified_motion_data_gen(gen = motion_data_95),
dm.get_classified_visual_data_gen())
make_histogram(mot_vis_gen, histograms)
for hist in histograms:
hist /= np.sum(hist)
mot_vis_gen = dm.make_tuple_gen(
dm.get_classified_motion_data_gen(gen = motion_data_95),
dm.get_classified_visual_data_gen())
mapped = mapping_src_to_histogram(mot_vis_gen, histograms)
mpe = [x for x in to_mp_entropy(mapped)]
dm._load_timestamp_data()
dm.save_scenario_as_table(mpe, 'mpe', remark_dir='savgol95/')
for scenario in dm.get_scenarios():
print(scenario)
dm.set_scenario(scenario)
motion_data = np.array([x for x in dm.get_motion_data_gen()])
histograms = [np.zeros(5**6), np.zeros(36)]
mot_vis_gen = dm.make_tuple_gen(
dm.get_classified_motion_data_gen(motion_data),
dm.get_classified_visual_data_gen())
make_histogram(mot_vis_gen, histograms)
for hist in histograms:
hist /= np.sum(hist)
mot_vis_gen = dm.make_tuple_gen(
dm.get_classified_motion_data_gen(motion_data),
dm.get_classified_visual_data_gen())
mapped = mapping_src_to_histogram(mot_vis_gen, histograms)
mpe = [x for x in to_mp_entropy(mapped)]
dm._load_timestamp_data()
dm.save_scenario_as_table(mpe, 'mpe')
mpe =
|
np.zeros(315)
|
numpy.zeros
|
import cvxpy
import numpy as np
import scipy.sparse
import GPyOpt
import logging
from typing import *
# from .custom_tv import tv_masked, make_tv_ixmask
from .core import place_inside_mask
from .crossvalidation import split_list, bool_from_interval, rss_objective, corr_objective, poisson_log_lik_objective, nb_loglik
from .defaults import ReconstructionConfig
from random import shuffle
from scipy import optimize, sparse
import matplotlib.pyplot as plt
from scipy.special import loggamma, erf, gammaln, digamma
import pickle
from scipy.optimize import LinearConstraint, Bounds
class ReconstructorCVXPY:
"""Optimizer to solve constrained regularized least squares problem
Parameters
----------
alpha: float, default 1.0
Controls instensity of L1 regularization
beta: float, default 0.01
controls intensity of the TV filter
config: ReconstructionConfig
object containing the details of the geometry of the reconstruction
solver: cvxpy solver obejct, default cvxpy.SCS
Can be changed but most performant in the task seem to be SCS
"""
def __init__(self, alpha: float = 1, beta: float = 0.01,
config: ReconstructionConfig = ReconstructionConfig(),
solver: str=cvxpy.SCS, solver_kwargs: dict={}) -> None:
# Alpha and beta parameter might be moved in the formulation to make it more general
# But this would require to remove alpha,beta from the init with something like pars_dict
self.alpha = cvxpy.Parameter(sign="positive", value=alpha)
self.beta = cvxpy.Parameter(sign="positive", value=beta)
self.solver = solver
self.solver_kwargs = solver_kwargs
self.cfg = config
self.mask = self.cfg.mask_bw
self.proj_N = self.cfg.proj_N
self.b_n = cvxpy.Parameter(sign = "positive", value=self.proj_N)
self.w = self.proj_N / (self.proj_N - 1) # a weight to mantain the proportion regularized / RSS in crossvalidation
self._formulate(b=None)
self.formulated = True
self.reformulated = False
self.fit_at_least_once = False
self.norm_factor = None
def _formulate(self, b: np.ndarray=None) -> None:
""" Formulate Problem
Internal methods that fromulate and prepare the cvxpy model to be fit
Args
----
b: np.array, dtype float
Return
------
Nothing
"""
if (self.mask is None):
raise ValueError("mask paramenter not provided. A mask is required to fit the model")
# Define and construct variables and costants
self.x = cvxpy.Variable(self.cfg.A.shape[1], 1)
if b is None:
self.b = cvxpy.Parameter(rows=self.cfg.A.shape[0], cols=1, sign="positive", value=np.zeros(self.cfg.A.shape[0]))
else:
self.b = cvxpy.Parameter(rows=self.cfg.A.shape[0], cols=1, sign="positive", value=b)
self.A = cvxpy.Parameter(rows=self.cfg.A.shape[0], cols=self.cfg.A.shape[1], sign="positive", value=self.cfg.A)
self.x_img = cvxpy.reshape(self.x, self.mask.shape[0], self.mask.shape[1]) # x must be reshaped to allow calling cvx.tv on it. Possible reimplementation of the tv filter might speed-up
self.background = cvxpy.mul_elemwise(1. - self.mask.flat[:], self.x_img)
# The definition of the problem
self.objective = cvxpy.Minimize(cvxpy.sum_squares(self.A * self.x - self.b) +
self.beta * cvxpy.tv(self.x_img) )#+ self.alpha * cvxpy.norm(self.x, 1) )
self.constraints = [self.x >= 0.,
self.background == 0,
cvxpy.sum_entries(self.x) - 0.85*cvxpy.sum_entries(self.b)/2*self.b_n >= 0]
self.problem = cvxpy.Problem(self.objective, self.constraints)
self.formulated = True
def change_par(self, alpha: float = None, beta: float = None,
A: np.array = None, b: np.array = None, b_n: float = None) -> None:
""" Change a parameter without reformulating the model.
"""
if alpha is not None:
self.alpha.value = alpha
if beta is not None:
self.beta.value = beta
if A is not None:
self.A.value = A
if b is not None:
self.b.value = b
if b_n is not None:
self.b_n.value = b_n
def fit(self, b: np.ndarray=None, A: np.ndarray=None, warm_start: bool=True) -> Any:
""" Fit method
Defines the model and fit it to the data.
Args
----
b: np.array, dtype float
A: np.ndarray, dtype float
mask: np.ndarray, dtype int | bool
mask_gray > 0.1
Return
------
reconstructor: Recontructor
The object after fit. To get the data access the attribute 'x' otherwise call fit_predict
Note
----
b, A and mask are not required if the proble has been previously formulated (e.g. if self.warmstart = True)
"""
if not self.formulated:
self._formulate(b=b)
self.solver_kwargs["warm_start"] = False
else:
self.solver_kwargs["warm_start"] = warm_start and self.fit_at_least_once
self.change_par(b=b, A=A)
self.problem.solve(self.solver, verbose=False, **self.solver_kwargs)
self.fit_at_least_once = True
return self
def fit_predict(self, b: np.ndarray=None, A: np.ndarray=None, warm_start: bool=True) -> np.ndarray:
''' Run the oprimization and return the reconstructed image.
The same as self.fit but also returns the reshaped result.
'''
self.fit(b, A, warm_start=warm_start)
return np.array(self.x.value.reshape(self.mask.shape))
def _score_reconstruction(self, x: np.ndarray) -> np.ndarray:
def nb_loglik_g2(xx, y, mu):
r = xx
#mu = mu*x
psi = r / mu
psi_min = 1. / psi
lggamma_fun_ratio = loggamma(y + psi_min) - loggamma(psi_min) - loggamma(y + 1)
log1mupsi = np.log(1 + mu * psi)
lgf1 = - psi_min * log1mupsi
lgf2 = y * (np.log(mu) - log1mupsi + np.log(psi))
return -np.sum(lggamma_fun_ratio + lgf1 + lgf2)
def rescale_nb_g2(b, b_pred):
x_r = optimize.minimize(nb_loglik_g2, x0 = (1, 0.2), args = (b, b_pred), method="L-BFGS-B", bounds=((0, None),(0.01, 8),) )
return x_r.x
x = np.atleast_2d(2**x) # optimization is performed in log2 space
fs = np.zeros((x.shape[0], 1)) # initialize ouput array
SL = list(split_list(list(range(self.proj_N)), (self.proj_N - 1, 1))) # splitted list
#shuffle(SL)
SL = SL[:3]
dics = []
for i in range(x.shape[0]):
alpha, beta = x[i, :]
logging.debug("Testing alpha= %.4f, beta=%.4f" % (alpha, beta))
fs[i] = 0 # does not do anything
self.change_par(alpha=alpha, beta=beta)
lis = []
for (train_list, test_list) in SL:
# I need A and b that are not shared by the one in the reconstruction fromulation
trainset_bool = bool_from_interval(train_list, self.cfg.boundaries)
testset_bool = bool_from_interval(test_list, self.cfg.boundaries)
A_train, b_train = np.copy(self._A), np.copy(self.b_norm)
A_test, b_test =
|
np.copy(self._A)
|
numpy.copy
|
import shutil
import tempfile
from numpy import array, vstack
from numpy.testing import assert_array_almost_equal
from scipy.stats import ttest_ind
from thunder.decoding.uniclassify import MassUnivariateClassifier
from test_utils import PySparkTestCase
from thunder.rdds.series import Series
class ClassificationTestCase(PySparkTestCase):
def setUp(self):
super(ClassificationTestCase, self).setUp()
self.outputdir = tempfile.mkdtemp()
def tearDown(self):
super(ClassificationTestCase, self).tearDown()
shutil.rmtree(self.outputdir)
class TestMassUnivariateClassification(ClassificationTestCase):
"""Test accuracy of mass univariate classification on small
test data sets with either 1 or 2 features
"""
def test_massUnivariateClassificationTTest_1d(self):
"""Simple classification problem, 1d features"""
X = array([-1, -0.1, -0.1, 1, 1, 1.1])
labels = array([1, 1, 1, 2, 2, 2])
params = dict([('labels', labels)])
clf = MassUnivariateClassifier.load(params, "ttest")
# should match direct calculation using scipy
data = Series(self.sc.parallelize(zip([1], [X])))
result = clf.fit(data).values().collect()
groundTruth = ttest_ind(X[labels == 1], X[labels == 2])
assert_array_almost_equal(result[0], groundTruth[0])
def test_massUnivariateClassificationTTest_2d(self):
"""Simple classification problem, 2d features"""
X = array([-1, -2, -0.1, -2, -0.1, -2.1, 1, 1.1, 1, 1, 1.1, 2])
features = array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2])
samples = array([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6])
labels = array([1, 1, 1, 2, 2, 2])
params = dict([('labels', labels), ('features', features), ('samples', samples)])
clf = MassUnivariateClassifier.load(params, "ttest")
# should match direct calculation using scipy
# test first feature only
data = Series(self.sc.parallelize(zip([1], [X])))
result = clf.fit(data, [[1]]).values().collect()
groundTruth = ttest_ind(X[features == 1][:3], X[features == 1][3:])
assert_array_almost_equal(result[0], groundTruth[0])
# test both features
result = clf.fit(data, [[1, 2]]).values().collect()
groundTruth = ttest_ind(vstack((X[features == 1][:3], X[features == 2][:3])).T,
vstack((X[features == 1][3:], X[features == 2][3:])).T)
assert_array_almost_equal(result[0][0], groundTruth[0])
def test_massUnivariateClassificationGNB_1d(self):
"""Simple classification problem, 1d features"""
X1 = array([-1, -1, -1.2, 1, 1, 1.2])
X2 = array([-1, -1, 1.2, 1, 1, 1.2])
labels = array([1, 1, 1, 2, 2, 2])
params = dict([('labels', labels)])
clf = MassUnivariateClassifier.load(params, "gaussnaivebayes", cv=0)
# should predict perfectly
data = Series(self.sc.parallelize(zip([1], [X1])))
result = clf.fit(data).values().collect()
|
assert_array_almost_equal(result[0], [1.0])
|
numpy.testing.assert_array_almost_equal
|
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den =
|
N.array([1,1,1])
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 16 17:25:18 2018
@author: akankshabindal
"""
# Modified version of dart_env.py to support 2 skeletons
import os
from gym import error, spaces
from gym.utils import seeding
import numpy as np
from os import path
import gym
import six
from collections import defaultdict
import nlopt
from math import sqrt
from gym.envs.dart.static_window import *
import scipy.misc
try:
import pydart2 as pydart
from pydart2.gui.trackball import Trackball
pydart.init()
except ImportError as e:
raise error.DependencyNotInstalled("{}. (HINT: you need to install pydart2.)".format(e))
class DartEnv2BotKima(gym.Env):
"""Superclass for dart environment with 2 bots contained in one skel file
"""
def __init__(self, model_paths, frame_skip, \
dt=0.002, obs_type="parameter", action_type="continuous", visualize=True, disableViewer=False,\
screen_width=80, screen_height=45):
assert obs_type in ('parameter', 'image')
assert action_type in ("continuous", "discrete")
print('pydart initialization OK')
self.viewer = None
if len(model_paths) < 1:
raise StandardError("At least one model file is needed.")
if isinstance(model_paths, str):
model_paths = [model_paths]
#list of all skelHolders - convenience class holding all relevant info about a skeleton
self.skelHldrs = []
#load skels, make world
self.loadAllSkels(model_paths, dt)
#build list of slkeleton handlers
self._buildSkelHndlrs()
#set which skeleton is active, which also sets action and observation space
self.setActiveSkelHndlr(True)
self._obs_type = obs_type
self.frame_skip= frame_skip
self.visualize = visualize #Show the window or not
self.disableViewer = False or disableViewer
# random perturbation
self.add_perturbation = False
self.perturbation_parameters = [0.05, 5, 2] # probability, magnitude, bodyid, duration
self.perturbation_duration = 40
self.perturb_force = np.array([0, 0, 0])
# initialize the viewer, get the window size
# initial here instead of in _render in image learning
self.screen_width = screen_width
self.screen_height = screen_height
self._get_viewer()
self._seed()
#self._seed = 5794
#print(self._seed)
#overriding env stuff
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : int(np.round(1.0 / self.dt))
}
#set which skeleton we are currently using for training
#also sets action and observation variables necessary for env to work
def setActiveSkelHndlr(self, isHumanSkel=True):
if(isHumanSkel):
self.actSKHndlrIDX = self.humanIdx
else :
self.actSKHndlrIDX = self.botIdx
self.activeSkelHndlr = self.skelHldrs[self.actSKHndlrIDX]
self.updateObsActSpaces()
#call this to update the observation and action space dimensions if they change
#either because active skeleton handler has changed, or because dimension of assist
#force has changed
def updateObsActSpaces(self):
self.obs_dim = self.activeSkelHndlr.obs_dim
self.act_dim = self.activeSkelHndlr.action_dim
self.action_space = self.activeSkelHndlr.action_space
self.observation_space = self.activeSkelHndlr.observation_space
#set skeleton values for 2D waist-down skeleton
def _setSkel2DWaistDown(self, skelH, widx):
print('Skel {} is in _setSkel2DWaistDown'.format(skelH.skel.name))
skelH.setCntlBnds(np.array([[1.0]*6,[-1.0]*6]))
skelH.setActionScale(np.array([100, 100, 20, 100, 100, 20]))
# observation dimension == # of q (8) , qdot (9) components + # of assistive force components (3)
skelH.setObsDim(20)
return skelH
#3d version of waist down bot
def _setSkel3DWaist(self, skelH, widx):
print('Skel {} is in _setSkel3DWaist'.format(skelH.skel.name))
#'walker3d_waist.skel'
action_scale = np.array([100.0]*15)
action_scale[[-1,-2,-7,-8]] = 20
action_scale[[0, 1, 2]] = 150
skelH.setCntlBnds(np.array([[1.0]*15,[-1.0]*15]))
skelH.setActionScale(action_scale)
# observation dimension == # of q (8) , qdot (9) components + # of assistive force components (3)
#TODO in 3d this is wrong, root dofs are 3 orient, 3 loc , first idx is orient around x, not x pos like in 2d
skelH.setObsDim(41)
return skelH
#[[BodyNode(0): pelvis_aux],
# [BodyNode(1): pelvis],
# [BodyNode(2): l-upperleg],
# [BodyNode(3): l-lowerleg],
# [BodyNode(4): l-foot],
# [BodyNode(5): r-upperleg],
# [BodyNode(6): r-lowerleg],
# [BodyNode(7): r-foot],
# [BodyNode(8): abdomen],
# [BodyNode(9): thorax],
# [BodyNode(10): head],
# [BodyNode(11): l-clavicle],
# [BodyNode(12): l-upperarm],
# [BodyNode(13): l-lowerarm],
# [BodyNode(14): r-clavicle],
# [BodyNode(15): r-upperarm],
# [BodyNode(16): r-lowerarm],
# [BodyNode(17): r-attachment]]
#root orientation 0,1,2
#root location 3,4,5
#left upper leg 6,7,8
#left shin, left heel(2) 9,10,11
#right thigh 12,13,14
#right shin, right heel(2) 15,16,17
#abdoment(2), spine 18, 19, 20
#bicep left(3) 21,22,23
#forearm left 24
#bicep right (3) 25, 26, 27
#forearm right 28
#joints in kima
#[[TranslationalJoint(0): j_pelvis],
# [EulerJoint(1): j_pelvis2],
# [EulerJoint(2): j_thigh_left],
# [RevoluteJoint(3): j_shin_left],
# [UniversalJoint(4): j_heel_left],
# [EulerJoint(5): j_thigh_right],
# [RevoluteJoint(6): j_shin_right],
# [UniversalJoint(7): j_heel_right],
# [UniversalJoint(8): j_abdomen],
# [RevoluteJoint(9): j_spine],
# [WeldJoint(10): j_head],
# [WeldJoint(11): j_scapula_left],
# [EulerJoint(12): j_bicep_left],
# [RevoluteJoint(13): j_forearm_left],
# [WeldJoint(14): j_scapula_right],
# [EulerJoint(15): j_bicep_right],
# [RevoluteJoint(16): j_forearm_right],
# [WeldJoint(17): j_forearm_right_attach]]
#dofs in kima
#[[Dof(0): j_pelvis_x],
# [Dof(1): j_pelvis_y],
# [Dof(2): j_pelvis_z],
# [Dof(3): j_pelvis2_z],
# [Dof(4): j_pelvis2_y],
# [Dof(5): j_pelvis2_x],
# [Dof(6): j_thigh_left_z],
# [Dof(7): j_thigh_left_y],
# [Dof(8): j_thigh_left_x],
# [Dof(9): j_shin_left],
# [Dof(10): j_heel_left_1],
# [Dof(11): j_heel_left_2],
# [Dof(12): j_thigh_right_z],
# [Dof(13): j_thigh_right_y],
# [Dof(14): j_thigh_right_x],
# [Dof(15): j_shin_right],
# [Dof(16): j_heel_right_1],
# [Dof(17): j_heel_right_2],
# [Dof(18): j_abdomen_1],
# [Dof(19): j_abdomen_2],
# [Dof(20): j_spine],
# [Dof(21): j_bicep_left_z],
# [Dof(22): j_bicep_left_y],
# [Dof(23): j_bicep_left_x],
# [Dof(24): j_forearm_left],
# [Dof(25): j_bicep_right_z],
# [Dof(26): j_bicep_right_y],
# [Dof(27): j_bicep_right_x],
# [Dof(28): j_forearm_right]]
def _setSkel3DFullBody(self, skelH, widx):
print('Skel {} is in _setSkel3DFullBody'.format(skelH.skel.name))
#numdofs - 6 root dofs
numActDofs = 23
#clips control to be within 1 and -1
skelH.setCntlBnds(np.array([[1.0]*numActDofs,[-1.0]*numActDofs]))
#scales control to be between 150 and -150
action_scale = np.array([100.0]*numActDofs)
#these are dof idxs -6 (we never apply actions to first 6 dofs)
#irrelevant dofs set to action scale 1/100th to minimize instablity caused by self-collision
#thigh twist and spread
#action_scale[[0,3,6,9, 12, 13, 14, 16, 20]]*= 0.2
action_scale[[1, 2, 7, 8]] *= .1
#head
#action_scale[[21,23, ]] *= .01
#left shoulders, 2 of bicep, and hands
action_scale[[15,17, 19, 21]]*= .05
#right shoulders, 2 of bicep, hands
#action_scale[[25,26]]*= .01
#shoulders/biceps
#action_scale[[19,20,21,22,25,26,27,28]] *= .75
#scale ankles actions less
action_scale[[4,5, 10, 11]]*= .20
#scale feet and forearms much less
action_scale[[18, 22]]*= .1
print('action scale : {}'.format(action_scale))
skelH.setActionScale(action_scale)
#input()
#2 * numDofs : can't ignore 1st element in q in 3d.
skelH.setObsDim(2*skelH.skel.num_dofs())
return skelH
def _initLoadedSkel(self, skel, widx, isHuman, isFullRobot, skelHldrIDX):
#3/16/18 problem is with skeleton
#need to set skeleton joint stiffness and damping, and body friction
#maybe even coulomb friction for joints.
#set for every joint except world joint - joint limits, stiffness, damping
#print('Set Joints for Skel {}'.format(skel.name))
for jidx in range(skel.njoints):
j = skel.joint(jidx)
#don't mess with free joints
if ('Free' in str(j)):
continue
nDof = j.num_dofs()
for didx in range(nDof):
if j.has_position_limit(didx):
j.set_position_limit_enforced(True)
j.set_damping_coefficient(didx, 10.)
j.set_spring_stiffness(didx, 50.)
#eventually check for # dofs in skel to determine init
if(skel.ndofs==9):#2D
stIDX = 3
elif(skel.ndofs==37):
stIDX = 6
elif(skel.ndofs==29):
stIDX = 6
else :
print('DartEnv2BotKima::_initLoadedSkel : Unknown skel type based on # of dofs {} for skel {}'.format(skel.ndofs,skel.name))
return None
if(isHuman):
skelH = humanSkelHolder(self, skel, widx,stIDX, skelHldrIDX)
elif(isFullRobot):
skelH = robotSkelHolder(self, skel, widx,stIDX, skelHldrIDX)
else:
skelH = robotArmSkelHolder(self, skel, widx,stIDX, skelHldrIDX)
return self._setSkel3DFullBody(skelH, widx)
#get skeleton objects from list in dart_world
def _buildSkelHndlrs(self):
numBots = self.dart_world.num_skeletons()
idxSkelHldrs = 0
self.hasHelperBot = False
self.grabLink = None
for idx in range(numBots):
skelType = ''
skel = self.dart_world.skeletons[idx]
skelName = skel.name
# for i in range(self.robot_skeleton.njoints-1):
# #print("joint:%d"%(i),skel.joint(i).name)
# #print("position limit",skel.joint(i).position_upper_limit(0))
# self.robot_skeleton.joint(i).set_position_limit_enforced(True)
# j = self.robot_skeleton.dof(i)
# j.set_damping_coefficient(10.)
# #j.set_spring_stiffness(2.)
# #print("joint name",j.name)
# #print("stiffness",j.spring_stiffness)
# for body in self.robot_skeleton.bodynodes+self.dart_world.skeletons[0].bodynodes:
# body.set_friction_coeff(20.)
#this is humanoid to be helped up
if ('getUpHumanoid' in skelName) or ('biped' in skelName) :
self.skelHldrs.append(self._initLoadedSkel(skel, idx, True, False, idxSkelHldrs))
self.humanIdx = idxSkelHldrs
idxSkelHldrs += 1
skelType = 'Human'
#track getup humanoid in rendering
self.track_skeleton_id = idx
elif 'helperBot' in skelName :
self.skelHldrs.append(self._initLoadedSkel(skel, idx, False, True, idxSkelHldrs))
self.botIdx = idxSkelHldrs
idxSkelHldrs += 1
skelType = 'Robot'
self.hasHelperBot = True
elif 'helperBotArm' in skelName :
self.skelHldrs.append(self._initLoadedSkel(skel, idx, False, False, idxSkelHldrs))
self.botIdx = idxSkelHldrs
idxSkelHldrs += 1
skelType = 'Robot'
self.hasHelperBot = True
elif 'sphere_skel' in skelName :
self.grabLink = skel
skelType = 'Sphere'
self.grabLink.bodynodes[0].set_collidable(False)
# print('{} index : {} #dofs {} #nodes {} root loc : {}'.format(skelType,idx, skel.ndofs, skel.nbodies, skel.states()[:6]))
#give robot a ref to human skel handler
if (self.hasHelperBot):
self.skelHldrs[self.botIdx].setHelpedSkelH(self.skelHldrs[self.humanIdx])
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
#return jacobians of either robot or human contact point
#for debugging purposes to examine jacboian
def getOptVars(self, useRobot=True):
if(useRobot):
skelhldr = self.skelHldrs[self.botIdx]
else:
skelhldr = self.skelHldrs[self.humanIdx]
return skelhldr.getOptVars()
# methods to override:
# ----------------------------
def reset_model(self):
"""
Reset the robot degrees of freedom (qpos and qvel).
Implement this in each subclass.
"""
raise NotImplementedError
def viewer_setup(self):
"""
This method is called when the viewer is initialized and after every reset
Optionally implement this method, if you need to tinker with camera position
and so forth.
"""
pass
# -----------------------------
def set_state(self, qpos, qvel):
self.activeSkelHndlr.set_state(qpos, qvel)
def set_state_vector(self, state):
self.activeSkelHndlr.set_state_vector(state)
def state_vector(self):
return self.activeSkelHndlr.state_vector()
@property
def dt(self):
return self.dart_world.dt * self.frame_skip
def do_simPerturb(self, n_frames):
self.checkPerturb()
#apply their respective torques to all skeletons
for fr in range(n_frames):
for i in range(len(self.skelHldrs)) :
self.skelHldrs[i].add_perturbation( self.perturbation_parameters[2], self.perturb_force)
#tau is set in calling step routine
self.skelHldrs[i].applyTau()
self.dart_world.step()
# #check to see if sim is broken each frame, if so, return with broken flag, frame # when broken, and skel causing break
chk,resDict = self.checkWorldStep(fr)
if(chk):
return resDict
return {'broken':False, 'frame':n_frames, 'skelhndlr':None}
#need to perform on all skeletons that are being simulated
def do_simulation(self, n_frames):
if self.add_perturbation:
return self.do_simPerturb(n_frames)
for fr in range(n_frames):
for i in range(len(self.skelHldrs)):
#tau is set in calling step routine
self.skelHldrs[i].applyTau()
# self.dart_world.skeletons[1].bodynodes[0].add_ext_force([0, self.dart_world.skeletons[1].mass()*9.8, 0])
# print(self._get_obs())
self.dart_world.step()
#check to see if sim is broken each frame, if so, return with broken comment
for i in range(len(self.skelHldrs)) :
brk, chkSt = self.skelHldrs[i].checkSimIsBroken()
if(brk): #means sim is broken, end fwd sim
return {'broken':True, 'frame':fr}
return {'broken':False, 'frame':n_frames}
def checkPerturb(self):
if self.perturbation_duration == 0:
self.perturb_force *= 0
if np.random.random() < self.perturbation_parameters[0]:
axis_rand = np.random.randint(0, 2, 1)[0]
direction_rand = np.random.randint(0, 2, 1)[0] * 2 - 1
self.perturb_force[axis_rand] = direction_rand * self.perturbation_parameters[1]
else:
self.perturbation_duration -= 1
#Take total moments, total force, return a suitable point of application of that force to provide that moment
#COP_tau = COPval cross COP_ttlFrc ==> need to constrain possible COPvals
#so set COPval.y == 0 since we want the COP at the ground, and then solve eqs :
def calcCOPFromTauFrc(self,COP_tau, COP_ttlFrc):
COPval = np.zeros(3)
COPval[0] = COP_tau[2]/COP_ttlFrc[1]
COPval[2] = COP_tau[1]/COP_ttlFrc[0] + (COP_ttlFrc[2] * COPval[0]/COP_ttlFrc[0])
return COPval
#return a dictionary arranged by skeleton, of 1 dictionary per body of contact info
#the same contact might be referenced multiple times
def getContactInfo(self):
contacts = self.dart_world.collision_result.contacts
#dictionary of skeleton-keyed body node colli
cntInfoDict = {}
for i in range(len(self.dart_world.skeletons)):
cntInfoDict[self.dart_world.skeletons[i].name] = defaultdict(contactInfo)
for contact in contacts:
cntInfoDict[contact.bodynode1.skeleton.name][contact.bodynode1.name].addContact(contact,contact.bodynode1,contact.bodynode2)
cntInfoDict[contact.bodynode2.skeleton.name][contact.bodynode2.name].addContact(contact,contact.bodynode2,contact.bodynode1)
return cntInfoDict
######################
#rendering stuff
def _render(self, mode='human', close=False):
if not self.disableViewer:
self._get_viewer().scene.tb.trans[0] = -self.dart_world.skeletons[self.track_skeleton_id].com()[0]*1
if close:
if self.viewer is not None:
self._get_viewer().close()
self.viewer = None
return
if mode == 'rgb_array':
data = self._get_viewer().getFrame()
return data
elif mode == 'human':
self._get_viewer().runSingleStep()
def getViewer(self, sim, title=None):
# glutInit(sys.argv)
win = StaticGLUTWindow(sim, title)
#Currently scene is defined for first person view. Check in pydart/gui/opnegl/kima_firstpersonvieww
#rot 4th component is along the ground
#rot 2nd componend moves the ground
#rot 3rd component moves ground clockwise
# win.scene.add_camera(Trackball(theta=0.0, phi = 0.0, rot=[0, 0.0, 0, 0.8], trans=[-0.0, 0.5, 0], zoom=0.1), 'gym_camera')
# win.scene.add_camera(Trackball( theta = 0.0, phi = 0.0, rot=[0.02, 0.71, -0.02, 0.71],
# zoom=1.5), 'gym_camera')
# win.scene.add_camera(Trackball(theta=0.0, phi = 0.0, rot=[0.02, 0.71, -0.02, 0.71], trans=[0.02, 0.09, 0.39],
# zoom=0.1), 'gym_camera')
# win.scene.set_camera(win.scene.num_cameras()-1)
# win.scene =
# to add speed,
if self._obs_type == 'image':
win.run(self.screen_width, self.screen_height, _show_window=self.visualize)
else:
win.run(_show_window=self.visualize)
# img = win.getGrayscale(128, 128)
# scipy.misc.imsave('image/shot1.png', img)
return win
def _get_viewer(self):
if self.viewer is None and not self.disableViewer:
self.viewer = self.getViewer(self.dart_world)
self.viewer_setup()
return self.viewer
#disable/enable viewer
def setViewerDisabled(self, vwrDisabled):
self.disableViewer = vwrDisabled
####################
# "private" methods
def _reset(self):
self.perturbation_duration = 0
ob = self.reset_model()
return ob
#load all seletons and sim world
def loadAllSkels(self, model_paths, dt):
# convert everything to fullpath
full_paths = []
for model_path in model_paths:
if model_path.startswith("/"):
fullpath = model_path
else:
fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path)
if not path.exists(fullpath):
raise IOError("File %s does not exist"%fullpath)
full_paths.append(fullpath)
if full_paths[0][-5:] == '.skel':
self.dart_world = pydart.World(dt, full_paths[0])
else:
self.dart_world = pydart.World(dt)
for fullpath in full_paths:
self.dart_world.add_skeleton(fullpath)
#class to hold reference for all contact info for a particular skeleton's body node
#needs to be held in a collection indexed by body node, so that it doesn't get overwritten
#this class is a bit weird - doesn't have body node set in ctorbecause used in a default dict
class contactInfo():
def __init__(self) :
self.ttlfrc = np.zeros(3)
self.COPloc = np.zeros(3)
#np 3 dim arrays for point location and force value
self.cntctPt = list()
self.cntctFrc = list()
self.colBodies= list()
self.body = None
#if there's a contact with this body
#thisBody is this node's body node - should not be able to change
def addContact(self, contact, thisBody, otrBody):
if(None == self.body) :
self.body = thisBody
self.skel = thisBody.skeleton
elif (self.skel.name != thisBody.skeleton.name) or (self.body.name != thisBody.name):
print('Error in contactInfo:addContact : attempting to reassign from skel {} body {} to new skel {} body {}'.format(self.skel.name, self.body.name,thisBody.skeleton.name, thisBody.name))
self.colBodies.append(otrBody)
self.cntctPt.append(np.copy(contact.point))
self.cntctFrc.append(np.copy(contact.force))
self.setCopLoc()
#recalculate average location of contacts - called internally
def setCopLoc(self):
self.ttlfrc = np.zeros(3)
ttlTau = np.zeros(3)
self.COPloc = np.zeros(3)
numPts = len(self.cntctPt)
for i in range(numPts):
self.ttlfrc += self.cntctFrc[i]
ttlTau += np.cross(self.cntctPt[i],self.cntctFrc[i])
#coploc is location of cop in world coords
self.COPloc[0] = ttlTau[2]/self.ttlfrc[1]
self.COPloc[2] = ttlTau[1]/self.ttlfrc[0] + (self.ttlfrc[2] * self.COPloc[0]/self.ttlfrc[0])
#for optimization just want jacobian for each body
def getCntctJacob(self):
#want jacobian expressed in world frame, since forces are expressed in world frame
Jtrans = np.transpose(self.body.world_jacobian(offset=self.body.to_local(self.COPloc)))
return Jtrans
#This will get the contact force in generalized coordinates
#using J(self.COPloc)_transpose * self.ttlfrc
def getCntctTauGen(self):
#want jacobian expressed in world frame, since forces are expressed in world frame
Jtrans = self.getCntctJacob()
#Jtrans is dofs x 6 : first 3 of 6 are angular components, 2nd 3 of 6 are linear
frcVec = np.zeros(6)
#no angular component
frcVec[3:]=self.ttlfrc
#numpy matrix * vector -> vector
return Jtrans.dot(frcVec)
from abc import ABC, abstractmethod
#base convenience class holding relevant info and functions for a skeleton
class skelHolder(ABC):
#env is owning environment
#skel is ref to skeleton
#widx is index in world skel array
#stIdx is starting index in force array for tau calc
def __init__(self, env, skel, widx, stIdx, skelHldrIDX):
# print("making skel : {}".format(skel.name))
#ref to owning environment
self.env = env
#ref to skel object
self.skel = skel
#index in owning env's skeleton holder list
self.skelHldrIDX = skelHldrIDX
#index in world
self.worldIdx = widx
#start index for action application in tau array
self.stIdx = stIdx
self.initQPos = None
self.initQVel = None
#number of dofs - get rid of length calc every time needed
self.ndofs = self.skel.ndofs
#timestep
self.timestep = env.dart_world.dt
#state flags
#use the mean state only if set to false, otherwise randomize initial state
self.randomizeInitState = True
#use these to show that we are holding preset initial states - only used if randomizeInitState is false
self.loadedInitStateSet = False
#desired force/torque has been set
self.desForceTorqueSet = False
#initial torques
self.tau = np.zeros(self.skel.ndofs)
#TODO force and torque dimensions get from env
#+/- perturb amount of intial pose and vel for random start state
self.poseDel = .005
#set initial value of force/torque - to be overridden by calling env - temporary placeholder
#val=(.3 * 9.8 * self.skel.mass())
#self.setDesiredExtForce(np.array([val,val,0]))
#hand reaching to other agent
self.reach_hand = None
self.cnstrntLoc = np.zeros(3)
#list of body node names for feet
self.feetBodyNames = ['r-foot', 'l-foot']
# for i in range(len(self.skel.joints)-1):
# #print("joint:%d"%(i),skel.joint(i).name)
# #print("position limit",skel.joint(i).position_upper_limit(0))
# self.skel.joint(i).set_position_limit_enforced(True)
# j = self.skel.dof(i)
# j.set_damping_coefficient(2.)
# j.set_spring_stiffness(2.)
#print("joint name",j.name)
#print("stiffness",j.spring_stiffness)
for body in self.skel.bodynodes+self.env.unwrapped.dart_world.skeletons[0].bodynodes:
body.set_friction_coeff(100.)
self.desFrcTrqVal = [0,0,0]
def setSkelMobile(self, val):
self.skel.set_mobile(val)
def getReachCOM(self):
return self.skel.body(self.reach_hand).com()
#add a ball constraint between this skeleton's reaching hand and the passed body
#the order component is the order to add the object
def addBallConstraint(self,body1, bodyPos):
body2 = self.skel.body(self.reach_hand)
pos = .5*(bodyPos + body2.com())
self.lclCnstLoc = body2.to_local(pos)
print('Name : {} skelHandler ball world loc {} ball lcl loc : {} | cnst world loc : {} and lcl loc : {} on body {}'.format(self.skel.name,bodyPos,body2.to_local(bodyPos), pos,self.lclCnstLoc,body2.name))
constraint = pydart.constraints.BallJointConstraint(body1, body2, pos)
constraint.add_to_world(self.env.dart_world)
#set the name of the body node reaching to other agent
def setReachHand(self, _rchHand):
self.reach_hand = _rchHand
#set the desired external force and torque for this skel, to be applied at
#COM (TODO)
#(either the force applied to the human, or the force being applied by the robot)
def setDesiredExtForce(self, desFrcTrqVal):
self.lenFrcVec = len(desFrcTrqVal)
self.desFrcTrqVal = desFrcTrqVal
self.desForceTorqueSet = True
#set control bounds for this skeleton
def setCntlBnds(self, control_bounds):
self.control_bounds = control_bounds
self.action_dim = len(control_bounds[0])
self.action_space = spaces.Box(control_bounds[1], control_bounds[0])
#set action scaling for this skeleton
def setActionScale(self, action_scale):
self.action_scale = action_scale
#set observation dimension
def setObsDim(self, obsDim):
self.obs_dim = obsDim
high = np.inf*np.ones(self.obs_dim)
low = -high
self.observation_space = spaces.Box(low, high)
#set what this skeleton's init pose should be
def setInitPose(self, _ip):
self.initPose = _ip
self.setToInitPose()
# self.postPoseInit()
#reset this skeleton to its initial pose
def setToInitPose(self):
self.skel.set_positions(self.initPose)
#set initial state externally - call before reset_model is called by training process
#Does not override initPose
def setNewInitState(self, _qpos, _qvel):
#set to false to use specified states
self.randomizeInitState = False
self.initQPos = np.asarray(_qpos, dtype=np.float64)
self.initQVel = np.asarray(_qvel, dtype=np.float64)
self.loadedInitStateSet = True
#set this skeleton's state
def state_vector(self):
return np.concatenate([
self.skel.q,
self.skel.dq
])
#sets skeleton state to be passed position and velocity
def set_state(self, qpos, qvel):
# print(qpos.shape)
# print(qpos)
#assert shouldnt be there
# assert qpos.shape == (self.skel.ndofs,) and qvel.shape == (self.skel.ndofs,)
self.skel.set_positions(qpos)
self.skel.set_velocities(qvel)
#sets skeleton state to be passed state vector, split in half (for external use)
def x(self, state):
numVals = int(len(state)/2.0)
self.skel.set_positions(state[0:numVals])
self.skel.set_velocities(state[numVals:])
#called in do_simulate if perturbation is true
def add_perturbation(self, nodes, frc):
self.skel.bodynodes[nodes].add_ext_force(frc)
#build tau from a, using control clamping, for skel, starting at stIdx
#and action scale for skeleton
def setTau(self, a):
self.a = a
clamped_control = np.array(a)
for i in range(len(clamped_control)):
if clamped_control[i] > self.control_bounds[0][i]:
clamped_control[i] = self.control_bounds[0][i]
if clamped_control[i] < self.control_bounds[1][i]:
clamped_control[i] = self.control_bounds[1][i]
self.tau = np.zeros(self.skel.ndofs)
self.tau[self.stIdx:] = clamped_control * self.action_scale
# print(self.tau)
return self.tau
#get dictionary of optimization variables at constraint location (where applicable)
#debug function
def getOptVars(self):
res = {}
body=self.skel.body(self.reach_hand)
res['M']=self.skel.M
res['CfrcG']=self.skel.coriolis_and_gravity_forces()
res['jacobian']=body.jacobian(offset=self.cnstrntLoc)
res['world_jacobian']=body.world_jacobian(offset=self.cnstrntLoc)
res['linear_jacobian']=body.linear_jacobian(offset=self.cnstrntLoc)
res['angular_jacobian']=body.angular_jacobian()
return res
#for some reason this is called after perturbation is set
#would be better if called from setTau
def applyTau(self):
pass
#return a random initial state for this skeleton
def getRandomInitState(self, poseDel=None):
if(poseDel is None):
poseDel=self.poseDel
#set walker to be laying on ground
self.setToInitPose()
#perturb init state and statedot
qpos = self.skel.q + self.env.np_random.uniform(low=-poseDel, high=poseDel, size=self.skel.ndofs)
qvel = self.skel.dq + self.env.np_random.uniform(low=-poseDel, high=poseDel, size=self.skel.ndofs)
return qpos, qvel
#calculate orientation along certain orientation axes
def procOrient(self, orientAxes):
oVec = np.array(orientAxes)
oVec_W = self.skel.bodynodes[0].to_world(oVec) - self.skel.bodynodes[0].to_world(np.array([0, 0, 0]))
norm = np.linalg.norm(oVec_W)
if(norm == 0):#should never happen, since this is used as a marker of failing, a large value will signal done
return 10
oVec_W /= norm
ang_cos = np.arccos(np.dot(oVec, oVec_W))
return ang_cos
def dispResetDebug(self, notice=''):
print('{} Notice : Setting specified init q/qdot/frc'.format(notice))
print('initQPos : {}'.format(self.initQPos))
print('initQVel : {}'.format(self.initQVel))
# print('initFrc : {}'.format(self.assistForce))
#for external use only - return observation list given passed state and state dots
#obs is slightly different than pure q/qdot (includes height in world frame), requiring skel to be modified
#restores skel pose when finished - make sure q is correctly configured
def getObsFromState(self, q, qdot):
#save current state so can be restored
oldState = self.state_vector()
oldQ = self.skel.q
oldQdot = self.skel.dq
#set passed state
self.set_state(np.asarray(q, dtype=np.float64), np.asarray(qdot, dtype=np.float64))
#get obs - INCLUDES FORCE VALUE - if using to build new force value, need to replace last 3 elements
obs = self.getObs()
#return to original state
self.set_state(oldQ, oldQdot)
return obs
#called at beginning of each rollout - resets this model, resetting its state
def reset_model(self, dispDebug=False):
# print('Randomize state', self.randomizeInitState)
# print('Reset')
self.randomizeInitState=True
if(self.randomizeInitState):#if random, set random perturbation from initial pose
qpos, qvel = self.getRandomInitState()
self.set_state(qpos, qvel)
else:
#reset to be in initial pose
self.setToInitPose()
# print('Loaded state',self.loadedInitStateSet)
# input()
#resetting to pre-set initial pose
if ( self.loadedInitStateSet):
if(dispDebug):
self.dispResetDebug('skelHolder::reset_model')
self.set_state(self.initQPos, self.initQVel)
self.loadedInitStateSet = False
else:
print('skelHolder::reset_model Warning : init skel state not randomized nor set to precalced random state')
return self.getObs()
#init to be called after skeleton pose is set
@abstractmethod
def postPoseInit(self):
pass
#functionality necessary before simulation step is executed
# @abstractmethod
# def preStep(self, a, cnstLoc=np.zeros(3)):
# pass
@abstractmethod
def resetIndiv(self, dispDebug):
pass
#functionality after sim step is taken -
#calculate reward, determine if done(temrination conditions) and return observation
#and return informational dictionary
def postStep(self, resDict):
# vx, vy, vz, rwd, done, d = self.calcRewardAndCheckDone(resDict)
rwd, done, d = self.calcRewardAndCheckDone(resDict)
#want this to be cleared for every non-broken iteration or if it is broken but also done
if ((d['broke_sim'] == False) or done):
self.numBrokenIters = 0
obs = self.getObs()
#ob, reward, done, infoDict
# return vx, vy, vz, obs, rwd, done, d
return obs, rwd, done, d
#return skeleton qdot - maybe clipped, maybe not
def getSkelqDot(self):
return np.clip(self.skel.dq, -10, 10)
#base check goal functionality - this should be same for all agents,
#access by super()
def checkSimIsBroken(self):
s = self.state_vector()
_broken = not (np.isfinite(s).all() and (np.abs(s[2:]) < 100).all() )
return _broken, s
#check body node name to see if part of foot
def checkBNFoot(self, name):
return ('foot' in name)
#check if passed body nodes are on two different, non-ground, skeletons - return true
#don't want this - skeletons should not contact each other except through the ball joint constraint
def checkBNKickOtr(self, bn1, bn2):
if ('ground' in bn1.name) or ('ground' in bn2.name):
return False
#if != then they are different skeletons
return (bn1.skel.name != bn2.skel.name)
#returns true only if one body node is a foot on this skeleton and the other is the ground in a contact
def checkMyFootWithGround(self, bn1, bn2):
return (('ground' in bn1.name) and self.checkBNFoot(bn2.name) and (self.skel.name == bn2.skel.name)) or \
(('ground' in bn2.name) and self.checkBNFoot(bn1.name) and (self.skel.name == bn1.skel.name))
#calculate foot contact count and other terms if we want to use them for reward calc
def calcFootContactRew(self):
contacts = self.env.dart_world.collision_result.contacts
contactDict = defaultdict(float)
#sum of foot contact forces in 3 dirs
contactDict['cFrcX'] = 0
contactDict['cFrcY'] = 0
contactDict['cFrcZ'] = 0
COPval = np.zeros(3)
COP_tau = np.zeros(3)
COP_ttlFrc = np.zeros(3)
#tau = loc x frc
#COP calculation is the location that, when crossed with total force, will give total moment
#we can calculate total moment by crossing all contact forces with all contact locations
#we can get total force, and from this we can find the location that would produce the total
#torque given the total force (by first constraining one of the dimensions of the point in question)
#we choose to constrain the y coordinate to be 0 since we want the cop on the ground
for contact in contacts:
if (self.skel.name != contact.bodynode1.skeleton.name ) and (self.skel.name != contact.bodynode2.skeleton.name ) :
#contact not from this skeleton
continue
#penalize contact between the two skeletons - getup-human should not touch assistant bot
#only true if one skel contacts other
if self.checkBNKickOtr(contact.bodynode1, contact.bodynode2):
contactDict['kickBotContacts'] +=1
#only true if feet are contacting skeleton - kicking self
if (contact.bodynode1.skel.name == contact.bodynode2.skel.name):
contactDict['tripFeetContact'] +=1
#this is a foot contact
#if (self.checkBNFoot(contact.bodynode2.name) or self.checkBNFoot(contact.bodynode1.name)):#ground is usually body 1
if (self.checkMyFootWithGround(contact.bodynode1,contact.bodynode2)):
#print('With Ground : contact body 1 : {} skel : {} | contact body 2 : {} skel : {}'.format(contact.bodynode1,contact.bodynode1.skeleton.name, contact.bodynode2,contact.bodynode2.skeleton.name))
contactDict['footGroundContacts'] +=1
#find total moment of all contacts
COP_tau += np.cross(contact.point, contact.force)
COP_ttlFrc += contact.force
if ('left' in contact.bodynode2.name) or ('left' in contact.bodynode1.name):
contactDict['leftContacts']+=1
else:
contactDict['rightContacts']+=1
contactDict['cFrcX'] += contact.force[0]
contactDict['cFrcY'] += contact.force[1]
contactDict['cFrcZ'] += contact.force[2]
else:
#print('Not With Ground : contact body 1 : {} skel : {} | contact body 2 : {} skel : {}'.format(contact.bodynode1,contact.bodynode1.skeleton.name, contact.bodynode2,contact.bodynode2.skeleton.name))
contactDict['nonFootContacts']+=1
#determines COP based on foot contacts with ground
if(0 < contactDict['footGroundContacts']):
#COP_tau = COPval cross COP_ttlFrc ==> need to constrain possible COPvals -> set COPval.y == 0 since we want the COP at the ground, and then solve eqs :
COPval = self.env.calcCOPFromTauFrc(COP_tau, COP_ttlFrc)
else : #estimate COP as center of both feet body node com locations
COPval = np.zeros(3)
COPval += self.skel.body('r-foot').com()
COPval += self.skel.body('l-foot').com()
COPval /= 2.0
return contactDict, COPval
#this will calculate a bounded velocity as a reward
#passed are the velocities that get max reward, == the max reward given
# and an offset equivalent to
#the minimum value to get a positive reward
#shaped like an inverted parabola
def calcVelRwd(self, vel, vMaxRwd, minVel, maxRwd):
#return (-abs(vel-(vMaxrwd + minVel)) + maxRwd)
a = maxRwd/(vMaxRwd * vMaxRwd)
cval = (vel-(vMaxRwd + minVel))
return (-a *(cval * cval) + maxRwd)
#get the state observation from this skeleton - concatenate to
#whatever extra info we are sending as observation
@abstractmethod
def getObs(self):
pass
#calculate reward for this agent, see if it is done, and return informational dictionary (holding components of reward for example)
@abstractmethod
def calcRewardAndCheckDone(self,resDict):
pass
#class for skeleton holder specifically for the getup human
class humanSkelHolder(skelHolder):
def __init__(self, env, skel, widx, stIdx, skelHldrIDX):
skelHolder.__init__(self,env, skel,widx,stIdx, skelHldrIDX)
#set this so that bot doesn't get stuck in limbo
self.minUpVel = .001
#must set force
#called after pose is set
def postPoseInit(self):
pass
def getObs(self):
state = np.concatenate([
self.skel.q,
self.skel.dq,#self.getSkelqDot(),
#need force as part of observation!
self.desFrcTrqVal
])
#assign COM to state
state[3:6] = self.skel.com()
return state
def applyTau(self):
self.skel.body(self.reach_hand).set_ext_force(self.desFrcTrqVal)
self.skel.set_forces(self.tau)
#get-up skel's individual per-reset settings
def resetIndiv(self, dispDebug):
#initial copcom distance - want this to always get smaller
#set initial COMCOPDist for velocity calcs
print('In COMCOP')
input()
self.COMCOPDist = self.calcCOMCOPDist()
#functionality necessary before simulation step is executed for the human needing assistance
def preStep(self, a):
#self.lenFrcVec = len(desFrcTrqVal)
#self.desFrcTrqVal = desFrcTrqVal
#just add force at reach hand's COM
self.skel.body(self.reach_hand).add_ext_force(self.desFrcTrqVal)
#self.skel.body('l-lowerarm').add_ext_force(self.desFrcTrqVal)
#get x position and height before forward sim
com = self.skel.body('head').com()
self.posBefore = com[0]
self.heightBefore = com[1]
self.sideBefore = com[2]
#state before sim
self.oldState = self.state_vector()
#set torques
self.tau=self.setTau(a)
def calcCOMCOPDist(self, COPVal=np.zeros(3)):
if(np.count_nonzero(COPVal) == 0):
#COPVal will either be center of foot contacts with ground, or if no foot contacts then center of feet node COMs
_,COPVal = self.calcFootContactRew()
COMatFeet = self.skel.com()
COMatFeet[1] = COPVal[1]
#distance between COM projected on ground and COP value is bad
return np.square(COMatFeet - COPVal).sum()**(.5)
#calculate distance of COM projection at feet to center of feet
#and velocity of convergence
def calcCOMCOPRew(self,COPVal):
COMCOPDist = self.calcCOMCOPDist(COPVal)
#new dist needs to be smaller than old dist - this needs to be positive
COMCOP_vel = (self.COMCOPDist - COMCOPDist) / self.env.dt
#bounding convergence speed so we don't get the baseline diverging
#vel, vMaxRwd, minVel, maxRwd
COMCOP_VelScore = self.calcVelRwd(COMCOP_vel, 1.5, self.minUpVel, 5.0)
#keep around for next timestep
self.COMCOPDist = COMCOPDist
return COMCOP_VelScore
#calculate reward for this agent, see if it is done, and return informational dictionary (holding components of reward for example)
#called after fwd sim step
def calcRewardAndCheckDone(self,resDict):
#resDict holds whether the sim was broken or not - if sim breaks, we need
#holds these values : {'broken':False, 'frame':n_frames, 'stableStates':stblState}
#check first if sim is broken - illegal actions or otherwise exploding
broke_sim = resDict['broken']
#s = self.state_vector()
#forceLocAfter = self.skel.body(self.reach_hand).to_world(self.frcOffset)
#get x position and height before forward sim
com = self.skel.body('head').com()
#print(com)
posAfter = com[0]
heightAfter = com[1]
sideVel = (com[2] - self.sideBefore)/self.env.dt
#tight
# sideVelScore = -1/0.2*(sideVel)**2+0.2
#medium bound
sideVelScore = -1/1*(sideVel)**2+1
# sideVelScore = -abs(sideVel)+1
# sideVelScore = float(1e-2*(sideVel))
#angular terms : rotating around z, y and x axis
# ang_cos_swd = self.procOrient([0, 0, 1])
# ang_cos_uwd = self.procOrient([0, 1, 0])
# ang_cos_fwd = self.procOrient([1, 0, 0])
#
# reward function calculation
alive_bonus = 1.0
vel = (posAfter - self.posBefore) / self.env.dt
# velScore = -(vel)**2 + 1.0
#medium
valX = 1.5
velScore = -1/valX*(vel)**2+valX
# velScore = -abs(vel-2) + 2
raiseVel = (heightAfter - self.heightBefore) / self.env.dt
#Keeping raiseVel as square is breaking the same. Goes negative very soon
# raiseVelScore = -abs(raiseVel - (2+ self.minUpVel)) + 2
#peak centered at velocity == 2 + self.minUpVel, peak is 2 high, so spans self.minUpVel to 4+sel.minUpVel
# valY = 1.2/2
valY = 1.75/2
raiseVelScore = -1/valY*(raiseVel-(valY+ self.minUpVel))**2 + valY + self.minUpVel
#ground is at -0.5
height_rew = heightAfter + 0.5
contacts = self.env.dart_world.collision_result.contacts
#total_force_mag = 0
footContacts = 0
leftContacts = 0
rightContacts = 0
nonFootContacts = 0
for contact in contacts:
#print('body 1 : {} | body 2 : {} '.format(contact.bodynode1.name, contact.bodynode2.name))
if ('foot' in contact.bodynode2.name) or ('foot' in contact.bodynode1.name):#ground is usually body 1
footContacts +=1
if ('left' in contact.bodynode2.name) or ('left' in contact.bodynode1.name):
leftContacts +=1
else:
rightContacts +=1
else:
nonFootContacts+=1
#minimize requested torques- should this be requested or actually used? TODO
actionPenalty = float(1e-1* np.square(self.a).sum())
#reward = alive_bonus + raiseVelScore + velScore + contactRew + height_rew - actionPenalty #- angVelPen #- contactPen
reward = raiseVelScore + velScore + float(1e1* (height_rew)) + float(footContacts) - actionPenalty - 1e-1*sideVelScore#- angVelPen #- contactPen
# print('Upward Velocity: {}, Forward velocity:{} sideL{} reward{} '.format(raiseVel, vel, sideVel, reward))
s = self.state_vector()
#if broke_sim:
# reward = reward - 10
# print(vel, velScore)
done = broke_sim or not(
np.isfinite(s).all() and
#(np.abs(s[2:]) < 100).all() and
raiseVelScore>self.minUpVel and #NOT 0
#maybe not good, need to check
# (abs(ang_cos_uwd) < np.pi) and
# (abs(ang_cos_fwd) < np.pi) and
# (abs(ang_cos_swd) < np.pi) and
(heightAfter>-0.4) and
(heightAfter < 0.61) and
velScore > 0 and # allowing for slight negative velocities
sideVelScore > 0 # Not allowing more deviation
)
# print(self.desFrcTrqVal)
# input()
#If Force values are extreme, check the returns
# if(self.desFrcTrqVal[0]>365 and self.desFrcTrqVal[1]>360):
# print("Both forces are high and reward is", self.desFrcTrqVal, reward)
# input()
# if done:
#
# if(broke_sim):
# print('Broken Sim')
# elif(not np.isfinite(s).all()):
# print('Nans')
# elif(not(heightAfter>-0.4)):
# print('Height too less')
# elif(not(heightAfter < 0.61)):
# print('Height too much')
# elif(not raiseVelScore>self.minUpVel):
# print('Raise Vel negative')
# elif(not sideVelScore>0):
# print('Side velocity unbounded')
# elif (not velScore > 0 ):
# print(vel)
# print('Forward velocity unbounded')
# else:
# print(raiseVel )
# print('Something else')
# input()
# print('humanSkelHolder : heightAfter : {}'.format(heightAfter))
# if heightAfter>0.57:
# print('humanSkelHolder : heightAfter : {}'.format(heightAfter))
# input()
#info
dct = {'broke_sim': False,'vy': raiseVel, 'raiseVelScore': raiseVelScore, 'vx': vel, 'velScore': velScore, 'vz': sideVel, 'height_rew':height_rew,
'actionPenalty':actionPenalty, 'is_done': done}
return reward, done, dct
#class to hold assisting robot
class robotSkelHolder(skelHolder):
def __init__(self, env, skel, widx, stIdx, skelHldrIDX):
skelHolder.__init__(self,env, skel,widx,stIdx, skelHldrIDX)
#the holder of the human to be helped -
self.helpedSkelH = None
#this skeleton is not mobile TODO remove this once we have a policy for him
self.skel.set_mobile(False)
#called after initial pose is set
def postPoseInit(self):
self.initOpt()
#set bounds for optimizer, if necessary/useful
#TODO see if this will help, probably algorithm dependent
#idxAra : idxs of y component in fcntct part of bounds array(needs to be offset by ndofs)
def initOptBoundCnstrnts(self, idxAra, n):
#qdotPrime = first ndofs
#fcntc = next 12
#tau = last ndofs
#end idx of contact forces
idxFend = (self.ndofs+12)
lbndVec = np.zeros(n)
ubndVec = np.zeros(n)
#set accel min and max
lbndVec[0:self.ndofs] = -30.0
ubndVec[0:self.ndofs] = 30.0
#set contact force lower and upper bounds
lbndVec[self.ndofs:idxFend] = -100.0
#set lowerbound for y component to be 0
offsetYIdxs =(idxAra + self.ndofs)
lbndVec[offsetYIdxs] = 0
ubndVec[self.ndofs:idxFend] = 100.0
#set min/max torque limits as in program
lbndVec[idxFend:] = self.torqueLims * -1
ubndVec[idxFend:] = self.torqueLims
#set min bounds
self.optimizer.set_lower_bounds(lbndVec)
#set max bounds
self.optimizer.set_upper_bounds(ubndVec)
#initialize nlopt optimizer variables
def initOpt(self):
# n is dim of optimization parameters/decision vars-> 2 * ndofs + 12
n=2*self.ndofs + 12
#index array of locations in fcntct array of y component of contact force - should always be positive
y_idxAra = np.array([1,4,7,10])
#idx aras to isolate variables in constraint and optimization functions
self.qdotIDXs = np.arange(self.ndofs)
self.fcntctIDXs = np.arange(self.ndofs,(self.ndofs+12))
self.tauIDXs = np.arange((self.ndofs+12), n)
#helper values - always calculate 1 time
#always constant - calc 1 time
#dof idxs to be used for pose matching optimization calculations
#ignore waist, and reach hand dofs and root location
#root orientation 0,1,2; root location 3,4,5
#left thigh 6,7,8 : 6 is bend toward chest, 7 is twist along thigh axis, 8 is spread
#left shin, left heel(2), left toe 9,10,11,12
#right thigh ,13,14,15: 13 is bend toward chest, 14 is twist along thigh axis, 15 is spread
#right shin, right heel(2), right toe 16,17,18,19
#abdoment(2), spine 20,21,22; head 23,24
#scap left, bicep left(3) 25,26,27,28 ; forearm left, hand left 29,30
#scap right, bicep right (3) 31,32,33,34 ; forearm right.,hand right 35,36
if('h_hand_right' in self.reach_hand) :
self.optPoseUseIDXs = np.array([0,1,2,6,7,8,9,10,11,12,13,14,15,16,17,18,19,23,24,25,26,27,28])
else :#left hand reaching, match pose of right hand
self.optPoseUseIDXs = np.array([0,1,2,6,7,8,9,10,11,12,13,14,15,16,17,18,19,23,24,31,32,33,34])
#this is pose q of dofs we wish to match
self.matchPose = self.initPose[(self.optPoseUseIDXs)]
#TODO magic number, no idea if this is correct choice
self.kPose = 100
self.tSqrtKpose = 2 * sqrt(self.kPose)
#tau filter- force first 6 dofs to 0
rootZeros = np.zeros(6)
tauOnes = np.ones(self.ndofs-6)
self.tauFltr = np.concatenate([rootZeros,tauOnes])
#torqueLimits
print('{}'.format(self.action_scale))
self.torqueLims = np.concatenate([rootZeros,self.action_scale])
#up vector for all 4 contact forces
self.fcntctUp = np.zeros(12)
self.fcntctUp[y_idxAra] = 1
#for derivs
self.zeroA = np.zeros(self.ndofs)
self.oneA = np.ones(self.ndofs)
self.oneAOvTS = self.oneA/self.timestep
self.zeroTau = np.zeros(self.ndofs)
#negative because used in MA equality constraint
self.tauDot = np.ones(self.ndofs) * -1
self.zeroFrict = np.zeros(12)
# #derive of contact force eq - setting to negative since difference from cone val
self.fcntctDot = np.ones(12)
self.fcntctDot[y_idxAra] *= -1
#create optimizer
#only LD_MMA and LD_SLSQP support nonlinear inequality constraints
#only LD_SLSQP supports nonlinear equality cosntraints
self.optimizer = nlopt.opt(nlopt.LN_COBYLA, n) #NLOPT_LN_COBYLA
#set bounds - not sure if needed/wise
self.initOptBoundCnstrnts(y_idxAra,n)
#set initial guess to be all ones TODO better choice?
self.nextGuess= np.ones(n)
#set root dof initial guess to be 0
stRoot=(self.ndofs+12)
self.nextGuess[stRoot:(stRoot+6)] = np.zeros(6)
#tolerances
self.cnstTol = np.ones(n) * 1e-8
def applyTau(self):
self.skel.set_forces(self.tau)
#this is the skelHolder for the assisting robot, so set the human to be nonNull
def setHelpedSkelH(self, skelH):
self.helpedSkelH = skelH
#reset observation dim to match this skel and human skel's combined obs
self.setObsDim(self.obs_dim + skelH.obs_dim)
#robot uses combined human and robot state, along with force observation
def getObs(self):
stateHuman = self.helpedSkelH.getObs()
#stateHuman has current force as part of observation, need to replace with local desired force TODO
state = np.concatenate([
self.skel.q,
self.getSkelqDot(),
stateHuman,
])
#assign COM to observation
state[3:6] = self.skel.com()
return state
#assistant robot skel's individual per-reset settings
def resetIndiv(self, dispDebug):
# print('bot head height : {} '.format(self.skel.body('h_head').com()[1] ))
# print('bot foot height : {} '.format(self.skel.body('h_heel_left').com()[1] ))
# print('bot com : {} '.format(self.skel.com() ))
pass
#functionality before sim step is executed on this skel
def preStep(self, a):
# self.lenFrcVec = len(desFrcTrqVal)
#self.desFrcTrqVal = [-1] * self.helpedSkelH.desFrcTrqVal
#self.skel.body(self.reach_hand).add_ext_force(self.desFrcTrqVal)
#get x position and height before forward sim
com = self.skel.com()
self.posBefore = com[0]
self.heightBefore = com[1]
self.sideBefore = com[2]
#state before sim
self.oldState = self.state_vector()
self.tau=self.setTau(a)
#calculate reward for this agent, see if it is done, and return informational dictionary (holding components of reward for example)
def calcRewardAndCheckDone(self,resDict):
#resDict holds whether the sim was broken or not - if sim breaks, we need
#holds these values : {'broken':False, 'frame':n_frames, 'stableStates':stblState}
#check first if sim is broken - illegal actions or otherwise exploding
broke_sim = resDict['broken']
com = self.skel.com()
posAfter = com[0]
heightAfter = com[1]
sideVel = (com[2] - self.sideBefore)/self.env.dt
#angular terms : rotating around z, y and x axis
# ang_cos_swd = self.procOrient([0, 0, 1])
# ang_cos_uwd = self.procOrient([0, 1, 0])
# ang_cos_fwd = self.procOrient([1, 0, 0])
#
# reward function calculation
alive_bonus = 1.0
vel = (posAfter - self.posBefore) / self.env.dt
velScore = -abs(vel-2)+2
raiseVel = (heightAfter - self.heightBefore) / self.env.dt
#peak centered at velocity == 2 + self.minUpVel, peak is 2 high, so spans self.minUpVel to 4+sel.minUpVel
raiseVelScore = -abs(raiseVel-(2+ self.minUpVel)) + 2
height_rew = float(10 * heightAfter)
#minimize requested torques- should this be requested or actually used? TODO
actionPenalty = float(1e-3 * np.square(self.a).sum())
reward = alive_bonus - actionPenalty
done = broke_sim or not(
(raiseVelScore >= 0) and
#maybe not good, need to check
# (abs(ang_cos_uwd) < np.pi) and
# (abs(ang_cos_fwd) < np.pi) and
# (abs(ang_cos_swd) < np.pi) and
(heightAfter < 1.7)
)
#print('RobotSkelHolder : heightAfter : {}'.format(heightAfter))
dct = {'broke_sim': broke_sim, 'raiseVelScore': raiseVelScore, 'height_rew':height_rew,
'actionPenalty':actionPenalty, 'is_done': done}
return vel, raiseVel, sideVel, reward, done, dct
##class to hold assisting robot arm (not full body biped robot)
class robotArmSkelHolder(skelHolder):
def __init__(self, env, skel, widx, stIdx, skelHldrIDX):
skelHolder.__init__(self,env, skel,widx,stIdx, skelHldrIDX)
#the holder of the human to be helped -
self.helpedSkelH = None
#called after initial pose is set
def postPoseInit(self):
self.initOpt()
#set bounds for optimizer, if necessary/useful
#TODO see if this will help, probably algorithm dependent
#no contacts forces
def initOptBoundCnstrnts(self, n):
lbndVec = np.zeros(n)
ubndVec = np.zeros(n)
#TODO
#set min bounds
self.optimizer.set_lower_bounds(lbndVec)
#set max bounds
self.optimizer.set_upper_bounds(ubndVec)
#initialize nlopt optimizer variables
def initOpt(self):
# n is dim of optimization parameters/decision vars-> 2 * ndofs + 12
n=2*self.ndofs
#idx aras to isolate variables in constraint and optimization functions
self.accelIDXs = np.arange(self.ndofs)
self.tauIDXs = np.arange(self.ndofs, n)
#helper values - always calculate 1 time
#always constant - calc 1 time
#TODO magic number, no idea if this is correct choice
self.kPose = 100
self.tSqrtKpose = 2 * sqrt(self.kPose)
#tau filter- force first 6 dofs to 0
rootZeros = np.zeros(6)
tauOnes = np.ones(self.ndofs-6)
self.tauFltr =
|
np.concatenate([rootZeros,tauOnes])
|
numpy.concatenate
|
# -*- coding: utf-8 -*--
"""
Created on Tue Oct 23 09:42:24 2018
@author: William
"""
import re #import regex
import os
path_to_cpp = ''
#OS walk to find the cpp compilation
for root, dirs, files in os.walk(".", topdown=False):
for branch in dirs:
if 'ssa_cpp' in branch:
path_to_cpp = os.path.join(root, branch)
if path_to_cpp != '':
try:
cwd = os.getcwd()
os.chdir(path_to_cpp)
import ssa_translation
os.chdir(cwd)
except:
os.chdir(cwd)
try:
from snapgene_reader import snapgene_file_to_dict, snapgene_file_to_seqrecord
except:
pass
import time
import json, codecs
from scipy import sparse
from scipy.stats import pearsonr
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as mpatches
import matplotlib.animation as animation
from matplotlib.collections import PatchCollection
from matplotlib import cm
from matplotlib import gridspec
from matplotlib.patches import Ellipse
#import scipy.stats.trim_mean as tmean
from scipy.stats import kde
try:
from Bio import SeqIO
from Bio import Entrez
except:
print('BioPython is not installed, polling genbank will not be possible')
pass
import translation_models as models
class rSNAPsim():
"""
The Single Molecule Simulator (SMS) provides a python class for running
single molecule mRNA translation simulations
When presented with a valid protein sequence the SMS can find open reading frames
and simulate intensity trajectories from translation of the protein with given fluorescent tags.
*model description*
link to paper here / image
*main functions*
-open_seq_file(filepath), opens a txt or .gb file and gets the sequence
-get_orfs(nt_sequence, min_codons), returns open reading frames of a given
sequence and a minimum codon length per protein
-get_temporal_proteins(), gets the proteins after get_orfs
-analyze_poi(aa_seq,nt_seq), analyzes the proteins of intrest for
codon sensitivity and elongation rates
-__.poi(), class to contain proteins of intrest after analyzed
-run_default(), runs get_orfs, get_temporal proteins, and analyze_poi
with the first protien found in the sequence
*attributes*
**gene_sequence_str** = string of the nucleotide sequence
**tag_dict** = dictionary with various types of fluorescent tag epitopes
**tag_full** = dictionary of full tag sequences
**aa_keys** = amino acid single letter keys
**codon_types** = flag dictionary of which amino acids are set to Wild-type, fast, or slow
**aa_table** = dictionary of amino acids
**aa_table_r** = reverse dictionary (amino acid letters are the keys)
**strGeneCopy** = dictionary of wild-type tRNA copy numbers
**strGeneCopy_fast** = dictionary of fast tRNA copy numbers
**strGeneCopy_slow** = dictionary of slow tRNA copy numbers
**slow_codons_value** = list of slowest codon tRNA copy numbers
**fast_codons_value** = list of fastest codon tRNA copy numbers
**sensitivity_fast_slow** = list of sensitivity for amino acids
**poi** = Class container for proteins of intrest
**orfs** = dictionary of open reading frames with keys 1,2,3
**seq_str** = sequence string
**proteins** = dictionary of proteins detected in the sequence by ORF
**tagged_proteins** = dictionary of proteins that were detected and tagged
*POI*
Protein of intrest has the following attributes:
**aa_seq** = amino acid sequence
**nt_seq** = nucleotide sequence
**gene_length** = length of the gene
**tag_length** = length of the tags
**total_length** = total length of the full amino acid sequence
**name** = name of the gene
**tag_types** = what types of tags does the protien have
**tag_epitopes** = type of tags and epitope lists per tag
**codon_sensitivity** = how sensitive is the protein per amino acid sequence?
**CAI** = codon activation index
**CAI_codons** = means of the codon activation
*ssa*
The ssa container class has the following attributes:
**no_ribosomes** = number of ribosomes
**n_traj** = number of trajectories
**k** = all kelongation rates (calculated from codon sequence)
**no_rib_per_mrna** = number of ribosomes per mRNA strand on average
**rib_density** = ribosome density
**rib_means** = ribosome means
**rib_vec** = raw ribosome location matrix for each trajectory
**intensity_vec** = fluorescence intensities
**time_vec_fixed** = the time vector
**start_time** = the time the simulation was started
**evaluating_inhibitor** = was there an inhibitor present?
**evaluating_frap** = was the simulation subjected to a FRAP test
**time_inhibit** = the time of the perturbation
**autocorr_vec** = autocorrelation vector of intensities
**mean_autocorr** = the average autocorrelations, averaged over trajectories
**error_autocorr** = the standard deviation of the autocorrelation
**dwell_time** = how long do the ribosomes stay on the mRNA strand calculated by the simulation
**ke_sim** = the calculated average elongation rate from the simulations
"""
def __init__(self):
self.gene_sequence_str = ''
self.tag_dict = {'T_SunTag':'EELLSKNYHLENEVARLKK',
'T_Flag':'DYKDDDDK',
'T_Hemagglutinin':'YPYDVPDYA'}
self.tag_colors = {'T_SunTag':'green',
'T_Flag':'blue',
'T_Hemagglutinin':'blue'}
self.tag_full = {'T_Flag':('ATGGACTACAAGGACGACGACGACAAAGGTGAC'
'TACAAAGATGATGACGATAAAGGCGACTATA'
'AGGACGATGACGACAAGGGCGGAAACTCACTGA'
'TCAAGGAAAACATGCGGATGAAGGTGGTGAT'
'GGAGGGCTCCGTGAATGGTCACCAGTTCAAGTG'
'CACCGGAGAGGGAGAGGGAAACCCGTACATG'
'GGAACTCAGACCATGCGCATTAAGGTCATCGAA'
'GGAGGTCCGCTGCCGTTCGCTTTCGATATCC'
'TGGCCACTTCGTTCGGAGGAGGGTCGCGCACGTTC'
'ATCAAGTACCCGAAGGGAATCCCGGACTT'
'CTTTAAGCAGTCATTCCCGGAAGGATTCACTTGGG'
'AACGGGTGACCCGGTATGAAGATGGAGGT'
'GTGGTGACTGTCATGCAAGATACTTCGCTGGAGGATGGG'
'TGCCTCGTGTACCACGTCCAAGTCC'
'GCGGAGTGAATTTCCCGTCCAACGGACCAGTGATGCAG'
'AAAAAGACGAAGGGTTGGGAACCTAA'
'TACTGAAATGATGTACCCCGCAGACGGAGGGCTGAGGG'
'GCTACACCCACATGGCGCTGAAGGTC'
'GACGGAGGAGATTACAAGGATGACGACGATAAGCAACAA'
'GATTACAAAGACGATGATGACAAGG'
'GCCAGCAGGGCGACTACAAGGACGACGACGACAAGCAG'
'CAGGACTACAAAGATGACGATGATAA'
'AGGAGGAGGACATCTGTCCTGTTCGTTCGTGACCACCT'
'ACAGATCAAAGAAAACCGTGGGAAAC'
'ATCAAGATGCCGGGCATTCATGCCGTCGACCACCGCCT'
'GGAGCGGCTCGAAGAATCAGACAATG'
'AGATGTTCGTCGTGCAAAGAGAACATGCCGTGGCCAAGTT'
'CGCGGGACTGGGAGGCGGTGGAGG'
'CGATTACAAAGACGATGATGACAAGGGTGACTATAAAGA'
'CGACGATGACAAAGGGGATTACAAG'
'GATGATGATGATAAGGGAGGCGGTGGATCAGGTGGAG'
'GAGGTTCACTGCAG')}
self.aa_keys = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F',
'P', 'S', 'T', 'W', 'Y', 'V', '*']
self.codon_types = dict(zip(self.aa_keys, np.ones((1, 21)).flatten().astype(int).tolist()))
self.aa_table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',
'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',
'AUA':'I', 'AUC':'I', 'AUU':'I', 'AUG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACU':'T',
'AAC':'N', 'AAU':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGU':'S', 'AGA':'R', 'AGG':'R',
'CUA':'L', 'CUC':'L', 'CUG':'L', 'CUU':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCU':'P',
'CAC':'H', 'CAU':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGU':'R',
'GUA':'V', 'GUC':'V', 'GUG':'V', 'GUU':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCU':'A',
'GAC':'D', 'GAU':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGU':'G',
'UCA':'S', 'UCC':'S', 'UCG':'S', 'UCU':'S',
'UUC':'F', 'UUU':'F', 'UUA':'L', 'UUG':'L',
'UAC':'Y', 'UAU':'Y', 'UAA':'*', 'UAG':'*',
'UGC':'C', 'UGU':'C', 'UGA':'*', 'UGG':'W',}
self.aa_table_r = {'A':['GCA', 'GCC', 'GCG', 'GCT','GCU'],
'R':['CGA', 'CGC', 'CGG', 'CGT','AGG','AGA','CGU'],
'N':['AAC', 'AAT','AAU'],
'D':['GAC', 'GAT','GAU'],
'C':['TGC', 'TGT','UGC','UGU'],
'Q':['CAA', 'CAG'],
'E':['GAA', 'GAG'],
'G':['GGT', 'GGC', 'GGA', 'GGC','GGU'],
'H':['CAC', 'CAT','CAU'],
'I':['ATT', 'ATC', 'ATA','AUU','AUC','AUA'],
'L':['CTA', 'CTC', 'CTG', 'CTT', 'TTA', 'TTG','CUA', 'CUC', 'CUG', 'CUU', 'UUA', 'UUG'],
'K':['AAA', 'AAG'],
'M':['ATG','AUG'],
'F':['TTC', 'TTT','UUC','UUU'],
'P':['CCT', 'CCC', 'CCG', 'CCA','CCU'],
'S':['TCA', 'TCC', 'TCG', 'TCT','AGC','AGT','UCA','UCC','UCG'],
'T':['ACA', 'ACC', 'ACG', 'ACT','ACU'],
'W':['TGG','UGG'],
'Y':['TAT', 'TAC','UAC','UAU'],
'V':['GTA', 'GTC', 'GTT','GTG','GUG','GUU','GUC','GUA'],
'*':['TGA', 'TAG', 'TAA','UGA','UAG','UAA']
}
self.strGeneCopy = {'TTT': 17.6, 'TCT': 15.2, 'TAT': 12.2, 'TGT': 10.6, 'TTC': 20.3,
'TCC': 17.7, 'TAC': 15.3, 'TGC': 12.6, 'TTA': 7.7, 'TCA': 12.2,
'TAA': 1.0, 'TGA': 1.6, 'TTG': 12.9, 'TCG': 4.4, 'TAG': 0.8,
'TGG': 13.2, 'CTT': 13.2, 'CCT': 17.5, 'CAT': 10.9, 'CGT': 4.5,
'CTC': 19.6, 'CCC': 19.8, 'CAC': 15.1, 'CGC': 10.4, 'CTA': 7.2,
'CCA': 16.9, 'CAA': 12.3, 'CGA': 6.2, 'CTG': 39.6, 'CCG': 6.9,
'CAG': 34.2, 'CGG': 11.4, 'ATT': 16.0, 'ACT': 13.1, 'AAT': 17.0,
'AGT': 12.1, 'ATC': 20.8, 'ACC': 18.9, 'AAC': 19.1, 'AGC': 19.5,
'ATA': 7.5, 'ACA': 15.1, 'AAA': 24.4, 'AGA': 12.2, 'ATG': 22.0,
'ACG': 6.1, 'AAG': 31.9, 'AGG': 12.0, 'GTT': 11.0, 'GCT': 18.4,
'GAT': 21.8, 'GGT': 10.8, 'GTC': 14.5, 'GCC': 27.7, 'GAC': 25.1,
'GGC': 22.2, 'GTA': 7.1, 'GCA': 15.8, 'GAA': 29.0, 'GGA': 16.5,
'GTG': 28.1, 'GCG': 7.4, 'GAG': 39.6, 'GGG': 16.5}
# add the U codons
for key in list(self.strGeneCopy.keys()):
if 'T' in key:
val = self.strGeneCopy[key]
newkey = key.replace('T','U')
self.strGeneCopy[newkey] = val
self.strGeneCopy_fast = {'GCT': 27.7, 'GCC': 27.7, 'GCA': 27.7, 'GCG': 27.7, #A
'CGT': 12.2, 'CGC': 12.2, 'CGA': 12.2, 'CGG': 12.2,
'AGA': 12.2, 'AGG': 12.2, # R
'AAT': 19.1, 'AAC': 19.1, #N
'GAT': 25.1, 'GAC': 25.1, # D
'TGT': 12.6, 'TGC': 12.6, # C
'CAA': 34.2, 'CAG': 34.2, # Q
'GAA': 39.6, 'GAG': 39.6, #E
'GGT': 22.2, 'GGC': 22.2, 'GGA': 22.2, 'GGG': 22.2, # G
'CAT': 15.1, 'CAC': 15.1, # H
'ATT': 20.8, 'ATC': 20.8, 'ATA': 20.8, # I
'TTA': 39.6, 'TTG': 39.6, 'CTT': 39.6, 'CTC': 39.6,
'CTA': 39.6, 'CTG': 39.6, # L
'AAA': 31.9, 'AAG': 31.9, # K
'ATG': 22.0, #M
'TTT': 20.3, 'TTC': 20.3, # F
'CCT': 19.8, 'CCC': 19.8, 'CCA': 19.8, 'CCG': 19.8, # P
'TCT': 19.5, 'TCC': 19.5, 'TCA': 19.5, 'TCG': 19.5,
'AGT': 19.5, 'AGC': 19.5, # S
'ACT': 18.9, 'ACC': 18.9, 'ACA': 18.9, 'ACG': 18.9, # T
'TGG': 13.2, #W
'TAT': 15.3, 'TAC': 15.3, # Y
'GTT': 28.1, 'GTC': 28.1, 'GTA':28.1, 'GTG': 28.1, # V
'TAA': 1.6, 'TAG': 1.6, 'TGA':1.6 #STOP
}
for key in list(self.strGeneCopy_fast.keys()):
if 'T' in key:
val = self.strGeneCopy_fast[key]
newkey = key.replace('T','U')
self.strGeneCopy_fast[newkey] = val
self.strGeneCopy_slow = {'GCT': 7.4, 'GCC': 7.4, 'GCA': 7.4, 'GCG': 7.4, #A
'CGT': 4.5, 'CGC': 4.5, 'CGA': 4.5, 'CGG': 4.5,
'AGA':4.5, 'AGG':4.5, #R
'AAT': 17.0, 'AAC':17.0, #%N
'GAT': 21.8, 'GAC': 21.8, #D
'TGT': 10.6, 'TGC':10.6, #C
'CAA': 12.3, 'CAG': 12.3, #Q
'GAA': 29.0, 'GAG': 29.0, #E
'GGT': 10.8, 'GGC': 10.8, 'GGA': 10.8, 'GGG': 10.8, #G
'CAT': 10.9, 'CAC':10.9, #H
'ATT': 7.5, 'ATC': 7.5, 'ATA': 7.5, #I
'TTA': 7.2, 'TTG':7.2, 'CTT': 7.2, 'CTC': 7.2,
'CTA': 7.2, 'CTG': 7.2, #L
'AAA': 24.4, 'AAG': 24.4, #K
'ATG': 22.0, #M
'TTT': 17.6, 'TTC': 17.6, #F
'CCT': 6.9, 'CCC': 6.9, 'CCA': 6.9, 'CCG': 6.9, #P
'TCT': 4.4, 'TCC': 4.4, 'TCA': 4.4, 'TCG': 4.4,
'AGT': 4.4, 'AGC': 4.4, #S
'ACT': 6.1, 'ACC': 6.1, 'ACA': 6.1, 'ACG': 6.1,#T
'TGG': 13.2, #W
'TAT': 12.2, 'TAC': 12.2, #Y
'GTT': 7.1, 'GTC':7.1, 'GTA': 7.1, 'GTG': 7.1, # V
'TAA': 0.8, 'TAG': 0.8, 'TGA': 0.8 #STOP CODON}
}
for key in list(self.strGeneCopy_slow.keys()):
if 'T' in key:
val = self.strGeneCopy_slow[key]
newkey = key.replace('T','U')
self.strGeneCopy_slow[newkey] = val
self.fast_codons_value = [27.7, 12.2, 19.1, 25.1, 12.6, 34.2, 39.6, 22.2, 15.1,
20.8, 39.6, 31.9, 22, 20.3, 19.8, 19.5,
18.9, 13.2, 15.3, 28.1, 1.6]
self.slow_codons_value = [7.4, 4.5, 17, 21.8, 10.6, 12.3, 29, 10.8, 10.9, 7.5, 7.2,
24.4, 22, 17.6, 6.9, 4.4, 6.1, 13.2, 12.2, 7.1, .8]
fullcodonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT',
'ATT', 'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT',
'ACT', 'TGG', 'TAT', 'GTT', 'TAA',
'GCU', 'CGU', 'AAU', 'GAU', 'UGU', 'CAA', 'GAA', 'GGU', 'CAU',
'AUU', 'UUA', 'AAA', 'AUG', 'UUU', 'CCU', 'TCU',
'ACU', 'UGG', 'UAU', 'GUU', 'UAA', ]
codonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT',
'ATT', 'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT',
'ACT', 'TGG', 'TAT', 'GTT', 'TAA']
self.sensitivity_fast_slow = []
for i in range(len(codonkeys)):
self.sensitivity_fast_slow.append(self.strGeneCopy_fast[codonkeys[i]] / self.strGeneCopy_slow[codonkeys[i]])
def __update_sensitivity(self):
"""
updates sensitivities for the GUI implementation call
"""
self.fast_codons_value = []
for key in self.aa_keys:
values = []
codons = self.aa_table_r[key]
for codon in codons:
values.append(self.strGeneCopy[codon])
self.fast_codons_value.append(max(values))
for codon in codons:
self.strGeneCopy_fast[codon] = max(values)
self.slow_codons_value = []
for key in self.aa_keys:
values = []
codons = self.aa_table_r[key]
for codon in codons:
values.append(self.strGeneCopy_slow[codon])
self.slow_codons_value.append(min(values))
for codon in codons:
self.strGeneCopy_slow[codon] = min(values)
codonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT', 'ATT',
'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT', 'ACT', 'TGG', 'TAT', 'GTT', 'TAA']
self.sensitivity_fast_slow = []
for i in range(len(codonkeys)):
self.sensitivity_fast_slow.append(self.strGeneCopy_fast[codonkeys[i]] / self.strGeneCopy_slow[codonkeys[i]])
def load_tags(self):
f= open("custom_tags.txt","r")
raw = f.readlines()
previous_tags = []
for line in raw:
if line != '\n':
previous_tags.append(line)
for line in previous_tags:
custom_tag = line.strip('\n').split('---')
if custom_tag[0] not in self.tag_dict.keys():
self.tag_dict[custom_tag[0]] = custom_tag[2]
self.tag_full[custom_tag[0]] = custom_tag[1]
f.close()
def add_tag(self,nt_seq,name):
'''
add a custom tag sequence
'''
f= open("custom_tags.txt","r")
raw = f.readlines()
previous_tags = []
for line in raw:
if line != '\n':
previous_tags.append(line)
if not set(nt_seq.lower()).issubset( set(['a','t','c','g','u'])):
print('invalid NT sequence')
f.close()
return
aa_seq = self.nt2aa(nt_seq)
newtag =name+'---'+ nt_seq.lower() + '---'+ aa_seq.upper()+'\n'
if newtag not in previous_tags:
previous_tags.append(newtag)
f.close()
f= open("custom_tags.txt","w+")
for item in previous_tags:
f.write('%s' % item)
f.close()
def nt2aa(self, nt_seq):
'''
Translates nucleotides sequences to amino acid sequences
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**aa_seq**, amino acid sequence as string
'''
aa = ''
for i in range(0, len(nt_seq), 3):
aa += self.aa_table[nt_seq[i:i+3]]
return aa
def get_orfs(self, nt_seq='', min_codons=80):
'''
Returns open reading frames of the nucleotide sequence given
orfs = {'1':[proteins],
'2':[proteins],
'3':[proteins]}
*keyword args*
**nt_seq**, nucleotide sequence as a string. If left blank uses
the self.sequence_str
**min_codons**, minimum amount of codons to be considered
a protein in the open reading frame
'''
if nt_seq == '':
nt_seq = self.sequence_str
allstarts = np.array([m.start() for m in re.finditer('(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))', nt_seq)])
#allsegments = re.findall('(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))',self.sequence_str)
allstops = np.array([m.start() for m in re.finditer('(?=[TU](?:AG|AA|GA))', nt_seq)])
start_frames = allstarts%3
stop_frames = allstops%3
min_len = min_codons*3
orf1_starts = allstarts[np.where(start_frames == 0)]
orf2_starts = allstarts[np.where(start_frames == 1)]
orf3_starts = allstarts[np.where(start_frames == 2)]
orf1_stops = allstops[np.where(stop_frames == 0)]
orf2_stops = allstops[np.where(stop_frames == 1)]
orf3_stops = allstops[np.where(stop_frames == 2)]
self.starts = [orf1_starts, orf2_starts, orf3_starts]
self.stops = [orf1_stops, orf2_stops, orf3_stops]
self.orfs = {'1':[], '2':[], '3':[]}
self.orfs = {'1':[], '2':[], '3':[]}
laststop = 0
for start in orf1_starts:
nextstop = orf1_stops[np.where(orf1_stops > start)[0][0]]
if (nextstop - start) > min_len:
if nextstop != laststop:
self.orfs['1'].append((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf2_starts:
nextstop = orf2_stops[np.where(orf2_stops > start)[0][0]]
if (nextstop - start) > min_len:
if nextstop != laststop:
self.orfs['2'].append((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf3_starts:
nextstop = orf3_stops[np.where(orf3_stops > start)[0][0]]
if (nextstop - start) > min_len:
if nextstop != laststop:
self.orfs['3'].append((start, nextstop))
laststop = nextstop
def get_k_construct(self, nt_seq, k_init, k_elong_mean, codon_types=None):
'''
Returns the k_elongation rates of a given nucleotide sequence under constructed conditions
given some sort of key describing which amino acids are slow, fast or natural
*args*
**nt_seq**, nucleotide sequence to get the propensities of
**k_init**, initiation rate of starting translation
**k_elong_mean**, average rate of elongation for the protein translation
*keyword args*
**codon_types**, a dictonary or identifier determining which amino acids are slow, fast or natural
self.codon_types is an example dictionary for the user to change / utilize, if codon_types is left blank
get_k_construct uses this internal dictonary
ex: codon_types = 'slow' or 'rare' all amino acids set to slow
codon_types = 'fast' or 'common' all amino acids set to fast
codon_types = 'natural' all amino acids set to fast
codon_types = {'A':[0], 'T':[2]} A set to slow, T set to fast
codon_types = {'rare':['A','R'],'common':['L']} A and R set to slow, L set to fast
'''
if codon_types == None:
codon_types = self.codon_types
else:
all_natural = dict(zip(self.aa_keys, np.ones((1, 20)).flatten().astype(int).tolist()))
if isinstance(codon_types, str):
if codon_types == 'rare' or codon_types == 'slow':
all_natural = dict(zip(self.aa_keys, np.zeros((1, 20)).flatten().astype(int).tolist()))
if codon_types == 'common' or codon_types == 'fast':
all_natural = dict(zip(self.aa_keys, (2*np.ones((1, 20))).flatten().astype(int).tolist()))
if isinstance(codon_types, dict):
for key in codon_types.keys():
if isinstance(key, str):
if key.lower() not in ['rare', 'common', 'natural']:
if key.upper() in self.aa_keys:
if codon_types[key] in [0, 1, 2]:
all_natural[key] = key
if codon_types[key] in ['rare', 'common', 'natural']:
if codon_types[key] == 'rare':
all_natural[key] = 0
if codon_types[key] == 'common':
all_natural[key] = 2
if codon_types[key] == 'natural':
all_natural[key] = 1
else:
newkeys = codon_types[key]
for newkey in newkeys:
if newkey.upper() in self.aa_keys:
if key.lower() == 'rare':
all_natural[newkey.upper()] = 0
if key.lower() == 'common':
all_natural[newkey.upper()] = 2
if key.lower() == 'natural':
all_natural[newkey.upper()] = 1
if isinstance(key, int):
newkeys = codon_types[key]
for newkey in newkeys:
all_natural[newkey] = key
codon_types = all_natural
aa_seq = self.nt2aa(nt_seq)
tRNA_design = np.zeros((1, len(aa_seq)))
tRNA_norm = np.zeros((1, len(aa_seq)))
seperated_codons = [nt_seq[i:i+3] for i in range(0, len(nt_seq), 3)] #split codons by 3
for i in range(len(seperated_codons)):
tRNA_norm[0, i] = self.strGeneCopy[seperated_codons[i]]
for i in range(len(self.aa_keys)-1):
fs = codon_types[self.aa_keys[i]]
indexes = [m.start() for m in re.finditer(self.aa_keys[i], aa_seq)]
for index in indexes:
if fs == 0:
tRNA_design[0, index] = self.slow_codons_value[i]
if fs == 2:
tRNA_design[0, index] = self.fast_codons_value[i]
if fs == 1:
tRNA_design[0, index] = tRNA_norm[0, index]
tRNA_design[0, -1] = tRNA_norm[0, -1]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation_design = (tRNA_design / mean_tRNA_copynumber) * k_elong_mean
all_k_design = [k_init] + k_elongation_design.flatten().tolist() + [k_elong_mean]
return all_k_design
def get_ui(self, nt_seq):
'''
return the ratio of average gene copy number / sequence codon copy number
'''
mean_u = np.mean(self.strGeneCopy.values())
ui = []
for i in range(0, len(nt_seq), 3):
ui.append(mean_u/ self.strGeneCopy[nt_seq[i:i+3]])
return ui
def get_k_3_frame(self,nt_seq,k_elong_mean):
kelongs = []
for n in range(3):
if n !=0:
codons = nt_seq[n:-(3-n)]
else:
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
k_elongation = np.zeros((1, genelength))
tRNA_copynumber = np.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / mean_tRNA_copynumber) * k_elong_mean
k_elongation.flatten().tolist()[:-1]
kelongs = kelongs + k_elongation.flatten().tolist()[:-1]
return kelongs
def get_k(self, nt_seq, k_init, k_elong_mean):
'''
returns all propensities for a given nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
**k_initiation**, initiation rate of ribosome binding
**k_elong_mean**, average rate of elgonation experimentally found
'''
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
k_elongation = np.zeros((1, genelength))
tRNA_copynumber = np.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / mean_tRNA_copynumber) * k_elong_mean
all_k = [k_init] + k_elongation.flatten().tolist()[:-1] + [10]
return all_k
def get_temporal_proteins(self):
'''
gets all the temporal proteins after getting the ORFs
__.tagged_proteins = dictionary with keys of tag types and a list of proteins
__.pois = list of proteins of intrest
__.pois_seq = list of nucleotide sequences of proteins of sequences
__.proteins = dictonary with keys of 1 2 or 3 orfs
'''
self.proteins = {'1':[], '2':[], '3':[]}
self.tagged_proteins = {a:[] for a in self.tag_dict.keys()}
self.tagged_protein_seq = {a:[] for a in self.tag_dict.keys()}
for i in range(len(self.orfs)):
for j in range(len(self.orfs[str(i+1)])):
pro = self.nt2aa(self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3])
nt_seq = self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3]
self.proteins[str(i+1)].append(pro)
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in pro:
self.tagged_protein_seq[tag].append(nt_seq)
self.tagged_proteins[tag].append(pro)
tags = 0
for key in self.tagged_proteins.keys():
tags += len(self.tagged_proteins[key])
self.pois = []
self.pois_seq = []
for tag in self.tag_dict.keys():
for i in range(len(self.tagged_proteins[tag])):
if self.tagged_proteins[tag][i] not in self.pois:
self.pois.append(self.tagged_proteins[tag][i])
self.pois_seq.append(self.tagged_protein_seq[tag][i])
if len(self.pois) == 0:
POIs = []
pois_s = []
pois_nt = []
for i in range(len(self.gb_obj.features)):
try:
self.gb_obj.features[i].qualifiers['translation']
if tags == 0:
POIs.append(self.gb_obj.features[i])
pois_s.append(self.nt2aa(self.tag_full['T_Flag']) + self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.append(self.tag_full['T_Flag'] + str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
else:
POIs.append(self.gb_obj.features[i])
pois_s.append(self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.append(str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
except:
pass
self.pois = pois_s
self.pois_seq = pois_nt
def analyze_poi(self, protein, sequence, epitope_loc = 'front'):
'''
Analyzes the protein of intrest and stores it in __.POI
*args*
**protein**, amino acid sequence as a string
**sequence**, nucleotide sequence that goes with the protein
**epitope_loc**, consider the epitope location as the front, middle or back:
DDYDDK: front: 0, middle: 3, back: 6 for epitope location
'''
self.POI = poi()
self.POI.nt_seq = sequence
self.POI.aa_seq = protein
self.POI.name = self.sequence_name
self.POI.total_length = len(protein)
'''
for key in self.tagged_proteins:
if protein in self.tagged_proteins[key]:
self.POI.tag_types.append(key)
'''
self.POI.tag_types = []
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in protein:
self.POI.tag_types.append(tag)
#''.join(sms.poi[0].split('DYKDDDDK')
self.POI.tag_epitopes = {a:[] for a in self.POI.tag_types}
gs = protein
for i in range(len(self.POI.tag_types)):
try:
nt_tag = self.tag_full[self.POI.tag_types[i]]
aa_tag = self.nt2aa(nt_tag)
except:
epi = self.tag_dict[self.POI.tag_types[i]]
firstep = self.POI.aa_seq.find(epi)
lastep = len(self.POI.aa_seq) - self.POI.aa_seq[::-1].find(epi[::-1])
aa_tag = self.POI.aa_seq[firstep:lastep]
nt_tag = self.POI.nt_seq[3*firstep:3*lastep]
if epitope_loc == 'front':
offset = 0
if epitope_loc == 'middle':
offset = int(len(self.tag_dict[self.POI.tag_types[i]])/2)
if epitope_loc == 'back':
offset = len(self.tag_dict[self.POI.tag_types[i]])
self.POI.tag_epitopes[self.POI.tag_types[i]] = [m.start()+1+offset for m in re.finditer(self.tag_dict[self.POI.tag_types[i]], self.POI.aa_seq)]
gs = gs.replace(aa_tag, '')
self.POI.gene_seq = gs
self.POI.gene_length = len(gs)
codons = []
for i in range(0, len(sequence), 3):
codons.append(sequence[i:i+3])
self.POI.codons = codons
self.POI.codon_sensitivity, self.POI.CAI, self.POI.CAI_codons = self.codon_usage(self.POI.nt_seq)
def open_seq_file(self, seqfile):
'''
Reads a sequence file, either a .txt file or a .gb genbank file
*args*
**seqfile**, sequence file either in txt, gb, gbk format
'''
seq = seqfile
self.sequence_name = ''
if '.dna' in seq:
self.sequence_name = seq[:-4]
try:
seq_record = snapgene_file_to_seqrecord(seq)
except:
print('To read .dna files please install snapegenereader: pip install snapgene_reader - https://github.com/IsaacLuo/SnapGeneFileReader' )
self.sequence_str = seq_record.seq.tostring()
if '.txt' in seq:
with open(seq) as f:
raw = f.readlines()
raw = ''.join(raw)
onlychar = re.split(r'[^A-Za-z]', raw)
validt = ['A', 'G', 'T', 'C']
validu = ['A', 'G', 'U', 'C']
namelen = 0
self.sequence_str = ''
for i in range(len(onlychar)):
section = onlychar[i]
if set(section.upper()) == set(validt):
self.sequence_str += section.upper()
elif set(section.upper()) == set(validu):
self.sequence_str += section.upper()
else:
if len(section)>namelen:
self.sequence_name = section
namelen = len(section)
if '.gb' in seq:
gb_record = SeqIO.read(open(seq, "r"), "genbank")
self.sequence_str = str(gb_record.seq)
self.sequence_name = gb_record.name
self.gb_obj = gb_record
if self.sequence_name == '':
self.sequence_name = seqfile.replace('.txt','')
self.sequence_name = seqfile.replace('.gb','')
def codon_usage(self, nt_seq):
'''
Analyzes codon useage from the nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**codon_sensitivity**, a list of codon sensitivity for the nucleotide sequence
**cai**, cai value
'''
codon_usage = np.zeros((1, 21))
gene_len = len(nt_seq)/3
aa_seq = self.nt2aa(nt_seq)
for i in range(len(self.aa_keys)-1):
codon_usage[0, i] = len(re.findall(self.aa_keys[i], aa_seq))
codon_usage[0, 20] = len(re.findall('\*', aa_seq))
codon_norm = codon_usage/gene_len
codon_sensitivity = np.round(codon_norm*self.sensitivity_fast_slow, 2)
cai_codons = []
for i in range(0, len(nt_seq), 3):
cai_codons.append(self.strGeneCopy[nt_seq[i:i+3]] / self.strGeneCopy_fast[nt_seq[i:i+3]])
cai = self.geomean(cai_codons)
return codon_sensitivity, cai, cai_codons
def get_probvec(self):
'''
returns the probe vectors (epitope positions by codon position) associated with the tagged sequence stored in POI
*returns*
**probe_vec**, cumlative probe intensity vector by codon position. Ex: [0,0,0,0,1,1,1,1,2,2,2,3,3,3 etc]
**probe_loc**, epitope posistion as a binary vector, 1 for epitope pos, 0 for everything else
'''
probePositions = []
keylist = list(self.POI.tag_epitopes.keys())
for n in range(len(keylist)):
probePosition = []
key = keylist[n]
probePosition = probePosition + self.POI.tag_epitopes[key]
if probePosition != []:
probePosition = np.unique(probePosition).tolist()
probePositions.append(probePosition)
genelength = self.POI.total_length
pvfull = np.zeros((1, genelength+1)).astype(int).flatten()
if len(probePositions) > 1:
k = 0
for n in range(len(keylist)):
pv = np.zeros((1, genelength+1)).astype(int).flatten()
key = keylist[n]
probePosition = probePositions[k]
k+=1
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull = np.vstack((pvfull,pv))
else:
pvfull = pv
else:
probePosition = probePositions[0]
for n in range(len(keylist)):
pv = np.zeros((1, genelength+1)).astype(int).flatten()
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull = np.vstack((pvfull,pv))
else:
pvfull = pv
numtags = 0
for key in keylist:
if len(self.POI.tag_epitopes[key]) != 0:
numtags += 1
ploc = np.zeros((numtags, self.POI.total_length+1)).astype(int)
numind = 0
for n in range(len(keylist)):
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
ploc[numind][self.POI.tag_epitopes[key]] = 1
numind += 1
return pvfull, ploc
def simple_model(self, poi, tag, ki,ke):
'''
Simplified model
returns the analytical tau, intensity mean, and intensity variance
calculated from the simplified model
'''
L = poi.total_length #get the total length of the gene
Lm = np.mean(poi.tag_epitopes[tag]) #the mean location of the tag epitopes
L_tag = int((poi.tag_epitopes[tag][-1] - poi.tag_epitopes[tag][0]) / 2)
ke_analytical = L*ke / np.sum(self.get_ui(poi.nt_seq[:-3]))
tau_analytical = L_tag/ke_analytical #analytical tau ie autocovariance time
mean_analytical = ki*tau_analytical* (1.-Lm/float(L)) # mean intensity
var_analytical = ki*tau_analytical* (1.-Lm/float(L))**2 #var intensity
return tau_analytical,mean_analytical,var_analytical
def get_binned_k_emphasize_probes(self,k,bins,pl):
'''
evenly bins elongation rates as best it can.
'''
probe_region_start = np.where(pl > 0)[0]
probe_region_end = np.where(pl > 0)[-1]
binsize = int(np.floor(len(k)/bins))
binned_ks = []
k_binned = np.zeros(bins)
k_lens = np.ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = np.hstack(([0.], np.cumsum(k_lens))).astype(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = np.mean(binned_ks[i])/len(binned_ks[i])
return k_binned,k_lens
def get_binned_k(self,k,bins):
'''
evenly bins elongation rates as best it can.
'''
binsize = int(np.floor(len(k)/bins))
binned_ks = []
k_binned = np.zeros(bins)
k_lens = np.ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = np.hstack(([0.], np.cumsum(k_lens))).astype(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = 1/np.mean(1/np.array(binned_ks[i]))
return k_binned,k_lens
def get_binned_probe_vec(self,probe_loc,bins):
'''
bin the probe vector as even as possible
'''
probe_loc = np.atleast_2d(probe_loc)
binsize = int(np.floor(probe_loc.shape[1]/bins))
probeloc_binned = np.zeros((np.atleast_2d(probe_loc).shape[0],bins))
probe_lens = np.ones((np.atleast_2d(probe_loc).shape[0],bins))*binsize
to_redistribute = len(probe_loc)%bins
np.atleast_2d(probe_loc).shape[0]
probe_lens[-to_redistribute:] = binsize+1
inds = np.hstack(([0.], np.cumsum(probe_lens,axis=1)[0,:])).astype(int)
for i in range(0,bins):
probeloc_binned[:,i] = np.sum(probe_loc[:,inds[i]:inds[i+1]],axis=1)
probevec_binned = np.cumsum(probeloc_binned,axis=1)
return probevec_binned.astype(int), probeloc_binned.astype(int)
def ssa_binned(self,nt_seq=None, bins = 50,all_k=None, k_elong_mean=10, k_initiation=.03, probePosition=[], n_traj=100, tf=1000, start_time=0, tstep=1000, time_inhibit=0, evaluating_frap=False, evaluating_inhibitor=False,force_python = False):
if nt_seq == None: #get sequence if none was passed
nt_seq = self.POI.nt_seq
genelength = int(len(nt_seq)/3)
if len(probePosition) == 0:
pv,probePosition = self.get_probvec()
if all_k == None: # build the k vector if one was not provided
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
k_elongation = np.zeros((1, genelength))
tRNA_copynumber = np.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / mean_tRNA_copynumber) * k_elong_mean
all_k = [k_initiation] + k_elongation.flatten().tolist()[:-1] + [10]
kbin,klen = self.get_binned_k(k_elongation.flatten()[:-1],bins)
all_k = [k_initiation] + kbin.flatten().tolist() #
pv,probePosition = self.get_binned_probe_vec(probePosition,bins)
footprint = 0
if isinstance(probePosition,list):
probePosition = np.array([probePosition]).astype(int)
ssa_obj = self.__solve_ssa(genelength, all_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python,footprint)
return ssa_obj
def ssa_solver(self, nt_seq=None, all_k=None, k_elong_mean=10, k_initiation=.03, probePosition=[], n_traj=100, tf=1000, start_time=0, tstep=1000, time_inhibit=0, evaluating_frap=False, evaluating_inhibitor=False,force_python = False,N_rib=200):
'''
Solve stochastic simulation algorithms (SSA) for the translation simulation.
*keyword args*
**nt_seq**, nucleotide sequence to simulate
**all_k**, the propensity rates for each codon location (obtained via get_k)
**k_elong_mean**, average elongation rate to normalize by
**k_initiation**, rate of mRNA translation initiation
**probePosition**, binary vector of probe positions, i.e. where the tag epitopes start by codon position
**n_traj**, number of trajectories
**tf**, final time point
**tstep**, number of time steps to record from 0 to tf
**time_inhibit**, inhibition time of translation either, harringtonine assay or FRAP
**evaluating_frap**, true or false for evaluating frap assay at time_inhibit
**evaluating_inhibitor**, true or false for evaluating harringtonine at time_inhibit
*returns*
**ssa_obj**, a ssa() class containing the raw ribosome posistions simulated and statistics such as intensity vectors from the SSA trajectory group
'''
if len(probePosition) == 0:
'''
try:
probePosition = []
for key in self.POI.tag_epitopes.keys():
probePosition = probePosition + self.POI.tag_epitopes[key]
probePosition = np.unique(probePosition).tolist()
except:
print('No POI found')
#nt_seq = self.tag_full['T_flag'] + nt_seq
'''
pv,probePosition = self.get_probvec()
if nt_seq == None:
nt_seq = self.POI.nt_seq
genelength = int(len(nt_seq)/3)
if all_k == None:
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
k_elongation = np.zeros((1, genelength))
tRNA_copynumber = np.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / mean_tRNA_copynumber) * k_elong_mean
all_k = [k_initiation] + k_elongation.flatten().tolist()[:-1] + [10]
if isinstance(probePosition,list):
probePosition = np.array([probePosition]).astype(int)
footprint = 9
ssa_obj = self.__solve_ssa(genelength, all_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python, footprint, N_rib)
return ssa_obj
def build_ODE(self,k,t,kbind, pl):
m = models.TranslateCorrs()
m.N = len(k)
m.tf = t[-1]
m.ptimes = len(t)
m.ke = k
#m.ke = 13.567*np.ones(kelong[1:].shape[0])
# m.ke[0] = 0.0
#m.kb = kelong[0]
m.kb = kbind
m.fi = 1
m.ti = t[0]
print(m.__dict__)
# Solve correlations
print("*****SOLVING MOMENT EQUATIONS*****")
m.binary = pl
start = time.time()
m.csolve()
solve_time = time.time()-start
print("Time to solve: %f" %solve_time)
print("Done.")
mean_I = m.map_to_fluorescence3(m.mu_ss)
var_I = m.map_to_fluorescence(m.var_ss)
print(mean_I)
print(var_I)
return m.tvec,np.ravel((m.intensity)/var_I), m.soln,m
def __solve_ssa(self,genelength,all_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python,footprint,N_rib):
non_consider_time = start_time
'''
if probePosition.shape[0] <= 1:
pv = np.zeros((1, genelength+1)).astype(int).flatten()
for i in range(len(probePosition[0])):
pv[probePosition[0][i]:] = i+1
else:
pv = np.zeros((probePosition.shape[0], genelength+1)).astype(int)
for j in range(probePosition.shape[0]):
for i in range(len(probePosition)):
pv[j][probePosition[j][i]:] = i+1
'''
npoints = tstep #non_consider_time + tstep
time_vec_fixed = np.linspace(0, npoints-1, npoints, dtype=np.float64)
truetime = np.linspace(0, tf, tstep, dtype=np.float64)
rib_vec = []
solutions = []
evf = int(evaluating_frap)
evi = int(evaluating_inhibitor)
try:
intime = float(time_inhibit)
except:
intime = 0
# if evaluating_frap == True or evaluating_inhibitor == True:
# for i in range(nRepetitions):
#
# soln = self.SSA(all_k,time_vec_fixed,inhibit_time=time_inhibit+non_consider_time,FRAP=evaluating_frap,Inhibitor=evaluating_inhibitor)
# solutions.append(soln)
# else:
solutionssave = []
st = time.time()
#try:
if force_python == True:
st[0]
rib_vec = []
solutions = []
solutionssave = []
#N_rib = 200
all_results = np.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=np.int32)
all_ribtimes = np.zeros((n_traj,int(1.3*all_k[0]*truetime[-1])),dtype=np.float64)
result = np.zeros((len(time_vec_fixed)*N_rib), dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
k = np.array(all_k)
seeds = np.random.randint(0, 0x7FFFFFF, n_traj)
all_frapresults = np.zeros((n_traj,N_rib*len(time_vec_fixed)),dtype=np.int32)
all_collisions = np.zeros((n_traj,int(1.3*all_k[0]*truetime[-1])),dtype=np.int32)
all_nribs = np.zeros((n_traj,1))
all_col_points = []
x0 = np.zeros((N_rib),dtype=np.int32)
for i in range(n_traj):
result = np.zeros((len(time_vec_fixed)*N_rib), dtype=np.int32)
ribtimes = np.zeros((int(1.3*k[0]*truetime[-1])),dtype=np.float64)
frapresult = np.zeros((len(time_vec_fixed)*N_rib),dtype=np.int32)
coltimes = np.zeros((int(1.3*k[0]*truetime[-1])),dtype=np.int32)
colpointsx = np.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=np.int32)
colpointst = np.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=np.float64)
nribs = np.array([0],dtype=np.int32)
ssa_translation.run_SSA(result, ribtimes, coltimes, colpointsx,colpointst, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs,x0,footprint, N_rib)
#ssa_translation.run_SSA(result, ribtimes, coltimes, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs)
all_results[i, :] = result
all_frapresults[i,:] = frapresult
all_ribtimes[i,:] = ribtimes
all_collisions[i,:] = coltimes
all_nribs[i,:] = nribs
endcolrec = np.where(colpointsx == 0)[0][0]
colpoints = np.vstack((colpointsx[:endcolrec],colpointst[:endcolrec]))
all_col_points.append(colpoints.T)
for i in range(n_traj):
soln = all_results[i, :].reshape((N_rib, len(time_vec_fixed)))
validind = np.where(np.sum(soln,axis=1)!=0)[0]
if np.max(validind) != N_rib-1:
validind = np.append(np.where(np.sum(soln,axis=1)!=0)[0],np.max(validind)+1)
so = soln[(validind,)]
solutionssave.append(so)
solutions.append(soln)
collisions = np.array([[]])
watched_ribs = []
for i in range(n_traj):
totalrib = all_nribs[i]
if totalrib > all_collisions.shape[1]:
collisions = np.append(collisions, all_collisions[i][:])
watched_ribs.append(int(all_collisions.shape[1]))
else:
collisions = np.append(collisions, all_collisions[i][:int(totalrib[0])])
watched_ribs.append(int(totalrib[0]))
sttime = time.time() - st
# except:
#
# print('C++ library failed, Using Python Implementation')
# rib_vec = []
#
# solutions = []
# solutionssave = []
# N_rib = 200
# collisions = np.array([[]])
# all_results = np.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=np.int32)
# all_col_points = []
# watched_ribs = []
# for i in range(n_traj):
#
# soln,all_ribtimes,Ncol,col_points = self.SSA(all_k, truetime, inhibit_time=time_inhibit+non_consider_time, FRAP=evaluating_frap, Inhibitor=evaluating_inhibitor)
# #soln = soln.reshape((1, (len(time_vec_fixed)*N_rib)))
#
# collisions = np.append(collisions,Ncol)
# watched_ribs.append(int(len(collisions)))
# validind = np.where(np.sum(soln,axis=1)!=0)[0]
# all_col_points.append(np.array(col_points))
# if np.max(validind) != N_rib-1:
# validind = np.append(np.where(np.sum(soln,axis=1)!=0)[0],np.max(validind)+1)
#
# so = soln[(validind,)]
#
# solutionssave.append(so)
#
# solutions.append(soln)
#
# result = soln.reshape((1, (len(time_vec_fixed)*N_rib)))
# all_results[i, :] = result
#
# sttime = time.time() - st
#
#
# #rb = sparse.lil_matrix((len(time_vec_fixed),genelength),dtype=int)
# #for j in range(soln.shape[1]):
#
# #if len(np.where(soln[:,j]!=0)[0]) !=0:
# #print(np.where(soln[:,j]!=0)[0])
#
#
# #rb[j,np.where(soln[:,j]!=0)[0]] = 1
#
#
# #for value in soln[:,j][np.where(soln[:,j]!=0)[0]].astype(int):
#
# #rb[j, value-1] = 1
#
# #rib_vec.append(rb)
#
#
no_ribosomes = np.zeros((n_traj, (genelength+1)))
startindex = np.where(truetime >= non_consider_time)[0][0]
#all_results = all_results[:,startindex*N_rib:]
for i in range(len(solutions)):
for j in range(len(solutions[0][0][startindex:])):
rib_pos = solutions[i][startindex:, j][np.nonzero(solutions[i][startindex:, j])]
no_ribosomes[i, rib_pos.astype(int)] += 1
no_ribosomes = no_ribosomes[:, 1:]
ribosome_means = np.mean(no_ribosomes, axis=0)
ribosome_density = ribosome_means/npoints
no_ribosomes_per_mrna = np.mean(no_ribosomes)
if probePosition.shape[0] <=1:
I = np.zeros((n_traj, len(time_vec_fixed[startindex:])))
else:
I = np.zeros((int(probePosition.shape[0]),n_traj, len(time_vec_fixed[startindex:])))
#I = np.zeros((1,tstep+1))
if evaluating_frap == False:
if probePosition.shape[0] <=1:
for i in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
I[i, :] = np.sum(np.multiply(pv.flatten()[traj], traj>0), axis=1)[startindex:].T
else:
for j in range(probePosition.shape[0]):
for i in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
I[j,i, :] = np.sum(pv[j][traj], axis=1)[startindex:].T
intensity_vec = I
else:
fraptime = time_inhibit
inds = np.where(truetime > fraptime)
inds2 = np.where(truetime < fraptime+20)
inds = np.intersect1d(inds,inds2)
endfrap = inds[-1]-1
for i in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
nribs = np.sum(solutionssave[i][:,endfrap]!=0)
#ribloc = solutionssave[i][:,endfrap]
#adj_pv = pv[solutionssave[i][:,inds[-1]][:nribs]]
frap_app = 20
revI = self.get_negative_intensity(traj,genelength,pv,truetime,fraptime+start_time,fraptime+start_time+frap_app)
I[i, :] = np.sum(pv[traj], axis=1)[startindex:].T
I[i,inds[0]:inds[0]+20] = 0
#I[i,endfrap-startindex:] = np.sum(pv[traj],axis=1)[endfrap-startindex:].T
I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] = I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] + revI
intensity_vec = I
ssa_obj = ssa()
ssa_obj.no_ribosomes = no_ribosomes
ssa_obj.n_traj = n_traj
ssa_obj.k = all_k
ssa_obj.no_rib_per_mrna = no_ribosomes_per_mrna
ssa_obj.rib_density = ribosome_density
ssa_obj.rib_means = ribosome_means
ssa_obj.rib_vec = rib_vec
ssa_obj.intensity_vec = intensity_vec
ssa_obj.time_vec_fixed = time_vec_fixed
ssa_obj.time = truetime
ssa_obj.time_rec = truetime[startindex:]
ssa_obj.start_time = non_consider_time
ssa_obj.watched_ribs = watched_ribs
try:
ssa_obj.col_points = all_col_points
except:
pass
ssa_obj.evaluating_inhibitor = evaluating_inhibitor
ssa_obj.evaluating_frap = evaluating_frap
ssa_obj.time_inhibit = time_inhibit
ssa_obj.solutions = solutionssave
ssa_obj.solvetime = sttime
ssa_obj.collisions = collisions
try:
ssa_obj.ribtimes = all_ribtimes[np.where(all_ribtimes > 0)]
except:
pass
#solt = solutions.T
fragmented_trajectories = []
fragtimes = []
maxlen = 0
fragmentspertraj= []
for k in range(n_traj):
ind = np.array([next(j for j in range(0,solutions[k].shape[0]) if int(solutions[k][j, i]) == 0 or int(solutions[k][j, i]) == -1) for i in range(0, solutions[k].shape[1])])
changes = ind[1:] - ind[:-1]
addindexes = np.where(changes > 0)[0]
subindexes = np.where(changes < 0)[0]
sub = solutions[k][:,1:] - solutions[k][:,:-1]
neutralindexes = np.unique(np.where(sub < 0)[1])
neutralindexes = np.setxor1d(neutralindexes, subindexes)
for index in neutralindexes:
pre = solutions[k][:,index]
post = solutions[k][:,index+1]
changecount = 0
while len(np.where(post - pre < 0)[0]) > 0:
post = np.append([genelength],post)
pre = np.append(pre,0)
changecount+=1
for i in range(changecount):
addindexes = np.sort(np.append(addindexes,index))
subindexes = np.sort(np.append(subindexes,index))
changes[index] = -changecount
ind[index] += changecount
for index in np.where(np.abs(changes)>1)[0]:
if changes[index] < 0:
for i in range(np.abs(changes[index])-1):
subindexes = np.sort(np.append(subindexes,index))
else:
for i in range(np.abs(changes[index])-1):
addindexes = np.sort(np.append(addindexes,index))
truefrags = len(subindexes)
if len(subindexes) < len(addindexes):
subindexes = np.append(subindexes, (np.ones((len(addindexes)-len(subindexes)))*(len(truetime)-1)).astype(int))
fragmentspertraj.append(len(subindexes))
for m in range(min(len(subindexes),len(addindexes))):
traj = solutions[k][:, addindexes[m]:subindexes[m]+1]
traj_ind = changes[addindexes[m]:subindexes[m]+1]
startind = ind[addindexes[m]]
minusloc = [0] + np.where(traj_ind < 0)[0].astype(int).tolist()
fragment = np.array([])
iterind = startind
if subindexes[m]-addindexes[m] > 0:
if len(minusloc) > 1:
if m <= truefrags:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[0, minusloc[-1]+1:].flatten())
else:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[m-truefrags, minusloc[-1]+1:].flatten())
else:
fragment = solutions[k][startind][addindexes[m]:subindexes[m]+1].flatten()
fragtimes.append(addindexes[m]+1)
fragmented_trajectories.append(fragment)
#if m <= truefrags:
#kes.append(genelength/truetime[len(fragment)])
if len(fragment) > maxlen:
maxlen = len(fragment)
fragarray = np.zeros((len(fragmented_trajectories), maxlen))
for i in range(len(fragmented_trajectories)):
fragarray[i][0:len(fragmented_trajectories[i])] = fragmented_trajectories[i]
ssa_obj.fragments = fragarray
ssa_obj.fragtimes = fragtimes
ssa_obj.frag_per_traj = fragmentspertraj
ssa_obj.full_frags = truefrags
ssa_obj.all_results = all_results
if probePosition.shape[0] > 1:
for i in range(probePosition.shape[0]):
if i > 0:
autocorr_vec2, mean_autocorr2, error_autocorr2, dwelltime2, ke_sim2 = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec = np.vstack((autocorr_vec,autocorr_vec2))
mean_autocorr = np.vstack((mean_autocorr,mean_autocorr2))
error_autocorr = np.vstack((error_autocorr,error_autocorr2))
dwelltime.append(dwelltime2)
ke_sim.append(ke_sim2)
else:
autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec_norm, mean_autocorr_norm, error_autocorr_norm, dwelltime, ke_sim = self.get_autocorr_norm(intensity_vec[i], truetime, 0, genelength)
dwelltime = [dwelltime]
ke_sim = [ke_sim]
else:
autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec, truetime, 0, genelength)
autocorr_vec_norm, mean_autocorr_norm, error_autocorr_norm, dwelltime, ke_sim = self.get_autocorr_norm(intensity_vec, truetime, 0, genelength)
acov,nacov = self.get_all_autocovariances(intensity_vec,truetime,genelength )
ssa_obj.autocorr_vec = autocorr_vec
ssa_obj.mean_autocorr = mean_autocorr
ssa_obj.error_autocorr = error_autocorr
ssa_obj.autocorr_vec_norm = autocorr_vec_norm
ssa_obj.mean_autocorr_norm = mean_autocorr_norm
ssa_obj.error_autocorr_norm = error_autocorr_norm
ssa_obj.dwelltime = dwelltime
ssa_obj.ke_sim = ke_sim
ssa_obj.ke_true = float(genelength)/np.mean(ssa_obj.ribtimes)
ssa_obj.probe = probePosition
try:
ssa_obj.autocovariance_dict = acov
ssa_obj.autocovariance_norm_dict = nacov
except:
pass
return ssa_obj
def get_negative_intensity(self,solution,gene_length,pv,tvec,ti,stop_frap):
startindex = np.where(tvec >= ti)[0][0]
stop_frap = np.where(tvec >= stop_frap)[0][0]
solution = solution.T
fragmented_trajectories = []
fragtimes = []
endfragtimes = []
maxlen = 0
fragmentspertraj= []
ind = np.array([next(j for j in range(0,solution.shape[0]) if int(solution[j, i]) == 0 or int(solution[j, i]) == -1) for i in range(0, solution.shape[1])])
changes = ind[1:] - ind[:-1]
addindexes = np.where(changes > 0)[0]
subindexes = np.where(changes < 0)[0]
sub = solution[:,1:] - solution[:,:-1]
neutralindexes = np.unique(np.where(sub < 0)[1])
neutralindexes = np.setxor1d(neutralindexes, subindexes)
for index in neutralindexes:
pre = solution[:,index]
post = solution[:,index+1]
changecount = 0
while len(np.where(post - pre < 0)[0]) > 0:
post = np.append([gene_length],post)
pre =
|
np.append(pre,0)
|
numpy.append
|
#!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Parts are based on https://github.com/multimodallearning/pytorch-mask-rcnn
published under MIT license.
"""
import warnings
warnings.filterwarnings('ignore', '.*From scipy 0.13.0, the output shape of zoom()*')
import numpy as np
import scipy.misc
import scipy.ndimage
import scipy.interpolate
from scipy.ndimage.measurements import label as lb
import torch
import tqdm
from custom_extensions.nms import nms
from custom_extensions.roi_align import roi_align
############################################################
# Segmentation Processing
############################################################
def sum_tensor(input, axes, keepdim=False):
axes = np.unique(axes)
if keepdim:
for ax in axes:
input = input.sum(ax, keepdim=True)
else:
for ax in sorted(axes, reverse=True):
input = input.sum(int(ax))
return input
def get_one_hot_encoding(y, n_classes):
"""
transform a numpy label array to a one-hot array of the same shape.
:param y: array of shape (b, 1, y, x, (z)).
:param n_classes: int, number of classes to unfold in one-hot encoding.
:return y_ohe: array of shape (b, n_classes, y, x, (z))
"""
dim = len(y.shape) - 2
if dim == 2:
y_ohe = np.zeros((y.shape[0], n_classes, y.shape[2], y.shape[3])).astype('int32')
elif dim == 3:
y_ohe = np.zeros((y.shape[0], n_classes, y.shape[2], y.shape[3], y.shape[4])).astype('int32')
else:
raise Exception("invalid dimensions {} encountered".format(y.shape))
for cl in np.arange(n_classes):
y_ohe[:, cl][y[:, 0] == cl] = 1
return y_ohe
def dice_per_batch_inst_and_class(pred, y, n_classes, convert_to_ohe=True, smooth=1e-8):
'''
computes dice scores per batch instance and class.
:param pred: prediction array of shape (b, 1, y, x, (z)) (e.g. softmax prediction with argmax over dim 1)
:param y: ground truth array of shape (b, 1, y, x, (z)) (contains int [0, ..., n_classes]
:param n_classes: int
:return: dice scores of shape (b, c)
'''
if convert_to_ohe:
pred = get_one_hot_encoding(pred, n_classes)
y = get_one_hot_encoding(y, n_classes)
axes = tuple(range(2, len(pred.shape)))
intersect = np.sum(pred*y, axis=axes)
denominator = np.sum(pred, axis=axes)+np.sum(y, axis=axes)
dice = (2.0*intersect + smooth) / (denominator + smooth)
return dice
def dice_per_batch_and_class(pred, targ, n_classes, convert_to_ohe=True, smooth=1e-8):
'''
computes dice scores per batch and class.
:param pred: prediction array of shape (b, 1, y, x, (z)) (e.g. softmax prediction with argmax over dim 1)
:param targ: ground truth array of shape (b, 1, y, x, (z)) (contains int [0, ..., n_classes])
:param n_classes: int
:param smooth: Laplacian smooth, https://en.wikipedia.org/wiki/Additive_smoothing
:return: dice scores of shape (b, c)
'''
if convert_to_ohe:
pred = get_one_hot_encoding(pred, n_classes)
targ = get_one_hot_encoding(targ, n_classes)
axes = (0, *list(range(2, len(pred.shape)))) #(0,2,3(,4))
intersect = np.sum(pred * targ, axis=axes)
denominator = np.sum(pred, axis=axes) + np.sum(targ, axis=axes)
dice = (2.0 * intersect + smooth) / (denominator + smooth)
assert dice.shape==(n_classes,), "dice shp {}".format(dice.shape)
return dice
def batch_dice(pred, y, false_positive_weight=1.0, smooth=1e-6):
'''
compute soft dice over batch. this is a differentiable score and can be used as a loss function.
only dice scores of foreground classes are returned, since training typically
does not benefit from explicit background optimization. Pixels of the entire batch are considered a pseudo-volume to compute dice scores of.
This way, single patches with missing foreground classes can not produce faulty gradients.
:param pred: (b, c, y, x, (z)), softmax probabilities (network output).
:param y: (b, c, y, x, (z)), one hote encoded segmentation mask.
:param false_positive_weight: float [0,1]. For weighting of imbalanced classes,
reduces the penalty for false-positive pixels. Can be beneficial sometimes in data with heavy fg/bg imbalances.
:return: soft dice score (float).This function discards the background score and returns the mena of foreground scores.
'''
if len(pred.size()) == 4:
axes = (0, 2, 3)
intersect = sum_tensor(pred * y, axes, keepdim=False)
denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)
return torch.mean(( (2*intersect + smooth) / (denom + smooth))[1:]) #only fg dice here.
elif len(pred.size()) == 5:
axes = (0, 2, 3, 4)
intersect = sum_tensor(pred * y, axes, keepdim=False)
denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)
return torch.mean(( (2*intersect + smooth) / (denom + smooth))[1:]) #only fg dice here.
else:
raise ValueError('wrong input dimension in dice loss')
############################################################
# Bounding Boxes
############################################################
def compute_iou_2D(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2] THIS IS THE GT BOX
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_iou_3D(box, boxes, box_volume, boxes_volume):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2, z1, z2] (typically gt box)
boxes: [boxes_count, (y1, x1, y2, x2, z1, z2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
z1 = np.maximum(box[4], boxes[:, 4])
z2 = np.minimum(box[5], boxes[:, 5])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0) * np.maximum(z2 - z1, 0)
union = box_volume + boxes_volume[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)]. / 3D: (z1, z2))
For better performance, pass the largest set first and the smaller second.
:return: (#boxes1, #boxes2), ious of each box of 1 machted with each of 2
"""
# Areas of anchors and GT boxes
if boxes1.shape[1] == 4:
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i] #this is the gt box
overlaps[:, i] = compute_iou_2D(box2, boxes1, area2[i], area1)
return overlaps
else:
# Areas of anchors and GT boxes
volume1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) * (boxes1[:, 5] - boxes1[:, 4])
volume2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) * (boxes2[:, 5] - boxes2[:, 4])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(boxes2.shape[0]):
box2 = boxes2[i] # this is the gt box
overlaps[:, i] = compute_iou_3D(box2, boxes1, volume2[i], volume1)
return overlaps
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)] / 3D: (z1, z2))
"""
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = torch.log(gt_height / height)
dw = torch.log(gt_width / width)
result = torch.stack([dy, dx, dh, dw], dim=1)
if box.shape[1] > 4:
depth = box[:, 5] - box[:, 4]
center_z = box[:, 4] + 0.5 * depth
gt_depth = gt_box[:, 5] - gt_box[:, 4]
gt_center_z = gt_box[:, 4] + 0.5 * gt_depth
dz = (gt_center_z - center_z) / depth
dd = torch.log(gt_depth / depth)
result = torch.stack([dy, dx, dz, dh, dw, dd], dim=1)
return result
def unmold_mask_2D(mask, bbox, image_shape):
"""Converts a mask generated by the neural network into a format similar
to it's original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
y1, x1, y2, x2 = bbox
out_zoom = [y2 - y1, x2 - x1]
zoom_factor = [i / j for i, j in zip(out_zoom, mask.shape)]
mask = scipy.ndimage.zoom(mask, zoom_factor, order=1).astype(np.float32)
# Put the mask in the right location.
full_mask = np.zeros(image_shape[:2]) #only y,x
full_mask[y1:y2, x1:x2] = mask
return full_mask
def unmold_mask_2D_torch(mask, bbox, image_shape):
"""Converts a mask generated by the neural network into a format similar
to it's original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
y1, x1, y2, x2 = bbox
out_zoom = [(y2 - y1).float(), (x2 - x1).float()]
zoom_factor = [i / j for i, j in zip(out_zoom, mask.shape)]
mask = mask.unsqueeze(0).unsqueeze(0)
mask = torch.nn.functional.interpolate(mask, scale_factor=zoom_factor)
mask = mask[0][0]
#mask = scipy.ndimage.zoom(mask.cpu().numpy(), zoom_factor, order=1).astype(np.float32)
#mask = torch.from_numpy(mask).cuda()
# Put the mask in the right location.
full_mask = torch.zeros(image_shape[:2]) # only y,x
full_mask[y1:y2, x1:x2] = mask
return full_mask
def unmold_mask_3D(mask, bbox, image_shape):
"""Converts a mask generated by the neural network into a format similar
to it's original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2, z1, z2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
y1, x1, y2, x2, z1, z2 = bbox
out_zoom = [y2 - y1, x2 - x1, z2 - z1]
zoom_factor = [i/j for i,j in zip(out_zoom, mask.shape)]
mask = scipy.ndimage.zoom(mask, zoom_factor, order=1).astype(np.float32)
# Put the mask in the right location.
full_mask = np.zeros(image_shape[:3])
full_mask[y1:y2, x1:x2, z1:z2] = mask
return full_mask
def nms_numpy(box_coords, scores, thresh):
""" non-maximum suppression on 2D or 3D boxes in numpy.
:param box_coords: [y1,x1,y2,x2 (,z1,z2)] with y1<=y2, x1<=x2, z1<=z2.
:param scores: ranking scores (higher score == higher rank) of boxes.
:param thresh: IoU threshold for clustering.
:return:
"""
y1 = box_coords[:, 0]
x1 = box_coords[:, 1]
y2 = box_coords[:, 2]
x2 = box_coords[:, 3]
assert np.all(y1 <= y2) and np.all(x1 <= x2), """"the definition of the coordinates is crucially important here:
coordinates of which maxima are taken need to be the lower coordinates"""
areas = (x2 - x1) * (y2 - y1)
is_3d = box_coords.shape[1] == 6
if is_3d: # 3-dim case
z1 = box_coords[:, 4]
z2 = box_coords[:, 5]
assert np.all(z1<=z2), """"the definition of the coordinates is crucially important here:
coordinates of which maxima are taken need to be the lower coordinates"""
areas *= (z2 - z1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0: # order is the sorted index. maps order to index: order[1] = 24 means (rank1, ix 24)
i = order[0] # highest scoring element
yy1 = np.maximum(y1[i], y1[order]) # highest scoring element still in >order<, is compared to itself, that is okay.
xx1 = np.maximum(x1[i], x1[order])
yy2 = np.minimum(y2[i], y2[order])
xx2 = np.minimum(x2[i], x2[order])
h = np.maximum(0.0, yy2 - yy1)
w = np.maximum(0.0, xx2 - xx1)
inter = h * w
if is_3d:
zz1 = np.maximum(z1[i], z1[order])
zz2 = np.minimum(z2[i], z2[order])
d = np.maximum(0.0, zz2 - zz1)
inter *= d
iou = inter / (areas[i] + areas[order] - inter)
non_matches = np.nonzero(iou <= thresh)[0] # get all elements that were not matched and discard all others.
order = order[non_matches]
keep.append(i)
return keep
############################################################
# M-RCNN
############################################################
def refine_proposals(rpn_pred_probs, rpn_pred_deltas, proposal_count, batch_anchors, cf):
"""
Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement details to anchors.
:param rpn_pred_probs: (b, n_anchors, 2)
:param rpn_pred_deltas: (b, n_anchors, (y, x, (z), log(h), log(w), (log(d))))
:return: batch_normalized_props: Proposals in normalized coordinates (b, proposal_count, (y1, x1, y2, x2, (z1), (z2), score))
:return: batch_out_proposals: Box coords + RPN foreground scores
for monitoring/plotting (b, proposal_count, (y1, x1, y2, x2, (z1), (z2), score))
"""
std_dev = torch.from_numpy(cf.rpn_bbox_std_dev[None]).float().cuda()
norm = torch.from_numpy(cf.scale).float().cuda()
anchors = batch_anchors.clone()
batch_scores = rpn_pred_probs[:, :, 1]
# norm deltas
batch_deltas = rpn_pred_deltas * std_dev
batch_normalized_props = []
batch_out_proposals = []
# loop over batch dimension.
for ix in range(batch_scores.shape[0]):
scores = batch_scores[ix]
deltas = batch_deltas[ix]
non_nans = deltas == deltas
assert torch.all(non_nans), "deltas have nans: {}".format(deltas[~non_nans])
non_nans = anchors == anchors
assert torch.all(non_nans), "anchors have nans: {}".format(anchors[~non_nans])
# improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(cf.pre_nms_limit, anchors.size()[0])
scores, order = scores.sort(descending=True)
order = order[:pre_nms_limit]
scores = scores[:pre_nms_limit]
deltas = deltas[order, :]
# apply deltas to anchors to get refined anchors and filter with non-maximum suppression.
if batch_deltas.shape[-1] == 4:
boxes = apply_box_deltas_2D(anchors[order, :], deltas)
non_nans = boxes == boxes
assert torch.all(non_nans), "unnormalized boxes before clip/after delta apply have nans: {}".format(boxes[~non_nans])
boxes = clip_boxes_2D(boxes, cf.window)
else:
boxes = apply_box_deltas_3D(anchors[order, :], deltas)
boxes = clip_boxes_3D(boxes, cf.window)
non_nans = boxes == boxes
assert torch.all(non_nans), "unnormalized boxes before nms/after clip have nans: {}".format(boxes[~non_nans])
# boxes are y1,x1,y2,x2, torchvision-nms requires x1,y1,x2,y2, but consistent swap x<->y is irrelevant.
keep = nms.nms(boxes, scores, cf.rpn_nms_threshold)
keep = keep[:proposal_count]
boxes = boxes[keep, :]
rpn_scores = scores[keep][:, None]
# pad missing boxes with 0.
if boxes.shape[0] < proposal_count:
n_pad_boxes = proposal_count - boxes.shape[0]
zeros = torch.zeros([n_pad_boxes, boxes.shape[1]]).cuda()
boxes = torch.cat([boxes, zeros], dim=0)
zeros = torch.zeros([n_pad_boxes, rpn_scores.shape[1]]).cuda()
rpn_scores = torch.cat([rpn_scores, zeros], dim=0)
# concat box and score info for monitoring/plotting.
batch_out_proposals.append(torch.cat((boxes, rpn_scores), 1).cpu().data.numpy())
# normalize dimensions to range of 0 to 1.
non_nans = boxes == boxes
assert torch.all(non_nans), "unnormalized boxes after nms have nans: {}".format(boxes[~non_nans])
normalized_boxes = boxes / norm
where = normalized_boxes <=1
assert torch.all(where), "normalized box coords >1 found:\n {}\n".format(normalized_boxes[~where])
# add again batch dimension
batch_normalized_props.append(torch.cat((normalized_boxes, rpn_scores), 1).unsqueeze(0))
batch_normalized_props = torch.cat(batch_normalized_props)
batch_out_proposals = np.array(batch_out_proposals)
return batch_normalized_props, batch_out_proposals
def pyramid_roi_align(feature_maps, rois, pool_size, pyramid_levels, dim):
"""
Implements ROI Pooling on multiple levels of the feature pyramid.
:param feature_maps: list of feature maps, each of shape (b, c, y, x , (z))
:param rois: proposals (normalized coords.) as returned by RPN. contain info about original batch element allocation.
(n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ixs)
:param pool_size: list of poolsizes in dims: [x, y, (z)]
:param pyramid_levels: list. [0, 1, 2, ...]
:return: pooled: pooled feature map rois (n_proposals, c, poolsize_y, poolsize_x, (poolsize_z))
Output:
Pooled regions in the shape: [num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
boxes = rois[:, :dim*2]
batch_ixs = rois[:, dim*2]
# Assign each ROI to a level in the pyramid based on the ROI area.
if dim == 2:
y1, x1, y2, x2 = boxes.chunk(4, dim=1)
else:
y1, x1, y2, x2, z1, z2 = boxes.chunk(6, dim=1)
h = y2 - y1
w = x2 - x1
# Equation 1 in https://arxiv.org/abs/1612.03144. Account for
# the fact that our coordinates are normalized here.
# divide sqrt(h*w) by 1 instead image_area.
roi_level = (4 + torch.log2(torch.sqrt(h*w))).round().int().clamp(pyramid_levels[0], pyramid_levels[-1])
# if Pyramid contains additional level P6, adapt the roi_level assignment accordingly.
if len(pyramid_levels) == 5:
roi_level[h*w > 0.65] = 5
# Loop through levels and apply ROI pooling to each.
pooled = []
box_to_level = []
fmap_shapes = [f.shape for f in feature_maps]
for level_ix, level in enumerate(pyramid_levels):
ix = roi_level == level
if not ix.any():
continue
ix = torch.nonzero(ix)[:, 0]
level_boxes = boxes[ix, :]
# re-assign rois to feature map of original batch element.
ind = batch_ixs[ix].int()
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = level_boxes.detach()
if len(pool_size) == 2:
# remap to feature map coordinate system
y_exp, x_exp = fmap_shapes[level_ix][2:] # exp = expansion
level_boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp], dtype=torch.float32).cuda())
pooled_features = roi_align.roi_align_2d(feature_maps[level_ix],
torch.cat((ind.unsqueeze(1).float(), level_boxes), dim=1),
pool_size)
else:
y_exp, x_exp, z_exp = fmap_shapes[level_ix][2:]
level_boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp, z_exp, z_exp], dtype=torch.float32).cuda())
pooled_features = roi_align.roi_align_3d(feature_maps[level_ix],
torch.cat((ind.unsqueeze(1).float(), level_boxes), dim=1),
pool_size)
pooled.append(pooled_features)
# Pack pooled features into one tensor
pooled = torch.cat(pooled, dim=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = torch.cat(box_to_level, dim=0)
# Rearrange pooled features to match the order of the original boxes
_, box_to_level = torch.sort(box_to_level)
pooled = pooled[box_to_level, :, :]
return pooled
def roi_align_3d_numpy(input: np.ndarray, rois, output_size: tuple,
spatial_scale: float = 1., sampling_ratio: int = -1) -> np.ndarray:
""" This fct mainly serves as a verification method for 3D CUDA implementation of RoIAlign, it's highly
inefficient due to the nested loops.
:param input: (ndarray[N, C, H, W, D]): input feature map
:param rois: list (N,K(n), 6), K(n) = nr of rois in batch-element n, single roi of format (y1,x1,y2,x2,z1,z2)
:param output_size:
:param spatial_scale:
:param sampling_ratio:
:return: (List[N, K(n), C, output_size[0], output_size[1], output_size[2]])
"""
out_height, out_width, out_depth = output_size
coord_grid = tuple([np.linspace(0, input.shape[dim] - 1, num=input.shape[dim]) for dim in range(2, 5)])
pooled_rois = [[]] * len(rois)
assert len(rois) == input.shape[0], "batch dim mismatch, rois: {}, input: {}".format(len(rois), input.shape[0])
print("Numpy 3D RoIAlign progress:", end="\n")
for b in range(input.shape[0]):
for roi in tqdm.tqdm(rois[b]):
y1, x1, y2, x2, z1, z2 = np.array(roi) * spatial_scale
roi_height = max(float(y2 - y1), 1.)
roi_width = max(float(x2 - x1), 1.)
roi_depth = max(float(z2 - z1), 1.)
if sampling_ratio <= 0:
sampling_ratio_h = int(np.ceil(roi_height / out_height))
sampling_ratio_w = int(np.ceil(roi_width / out_width))
sampling_ratio_d = int(np.ceil(roi_depth / out_depth))
else:
sampling_ratio_h = sampling_ratio_w = sampling_ratio_d = sampling_ratio # == n points per bin
bin_height = roi_height / out_height
bin_width = roi_width / out_width
bin_depth = roi_depth / out_depth
n_points = sampling_ratio_h * sampling_ratio_w * sampling_ratio_d
pooled_roi = np.empty((input.shape[1], out_height, out_width, out_depth), dtype="float32")
for chan in range(input.shape[1]):
lin_interpolator = scipy.interpolate.RegularGridInterpolator(coord_grid, input[b, chan],
method="linear")
for bin_iy in range(out_height):
for bin_ix in range(out_width):
for bin_iz in range(out_depth):
bin_val = 0.
for i in range(sampling_ratio_h):
for j in range(sampling_ratio_w):
for k in range(sampling_ratio_d):
loc_ijk = [
y1 + bin_iy * bin_height + (i + 0.5) * (bin_height / sampling_ratio_h),
x1 + bin_ix * bin_width + (j + 0.5) * (bin_width / sampling_ratio_w),
z1 + bin_iz * bin_depth + (k + 0.5) * (bin_depth / sampling_ratio_d)]
# print("loc_ijk", loc_ijk)
if not (np.any([c < -1.0 for c in loc_ijk]) or loc_ijk[0] > input.shape[2] or
loc_ijk[1] > input.shape[3] or loc_ijk[2] > input.shape[4]):
for catch_case in range(3):
# catch on-border cases
if int(loc_ijk[catch_case]) == input.shape[catch_case + 2] - 1:
loc_ijk[catch_case] = input.shape[catch_case + 2] - 1
bin_val += lin_interpolator(loc_ijk)
pooled_roi[chan, bin_iy, bin_ix, bin_iz] = bin_val / n_points
pooled_rois[b].append(pooled_roi)
return np.array(pooled_rois)
def refine_detections(cf, batch_ixs, rois, deltas, scores, regressions):
"""
Refine classified proposals (apply deltas to rpn rois), filter overlaps (nms) and return final detections.
:param rois: (n_proposals, 2 * dim) normalized boxes as proposed by RPN. n_proposals = batch_size * POST_NMS_ROIS
:param deltas: (n_proposals, n_classes, 2 * dim) box refinement deltas as predicted by mrcnn bbox regressor.
:param batch_ixs: (n_proposals) batch element assignment info for re-allocation.
:param scores: (n_proposals, n_classes) probabilities for all classes per roi as predicted by mrcnn classifier.
:param regressions: (n_proposals, n_classes, regression_features (+1 for uncertainty if predicted) regression vector
:return: result: (n_final_detections, (y1, x1, y2, x2, (z1), (z2), batch_ix, pred_class_id, pred_score, *regression vector features))
"""
# class IDs per ROI. Since scores of all classes are of interest (not just max class), all are kept at this point.
class_ids = []
fg_classes = cf.head_classes - 1
# repeat vectors to fill in predictions for all foreground classes.
for ii in range(1, fg_classes + 1):
class_ids += [ii] * rois.shape[0]
class_ids = torch.from_numpy(np.array(class_ids)).cuda()
batch_ixs = batch_ixs.repeat(fg_classes)
rois = rois.repeat(fg_classes, 1)
deltas = deltas.repeat(fg_classes, 1, 1)
scores = scores.repeat(fg_classes, 1)
regressions = regressions.repeat(fg_classes, 1, 1)
# get class-specific scores and bounding box deltas
idx = torch.arange(class_ids.size()[0]).long().cuda()
# using idx instead of slice [:,] squashes first dimension.
#len(class_ids)>scores.shape[1] --> probs is broadcasted by expansion from fg_classes-->len(class_ids)
batch_ixs = batch_ixs[idx]
deltas_specific = deltas[idx, class_ids]
class_scores = scores[idx, class_ids]
regressions = regressions[idx, class_ids]
# apply bounding box deltas. re-scale to image coordinates.
std_dev = torch.from_numpy(np.reshape(cf.rpn_bbox_std_dev, [1, cf.dim * 2])).float().cuda()
scale = torch.from_numpy(cf.scale).float().cuda()
refined_rois = apply_box_deltas_2D(rois, deltas_specific * std_dev) * scale if cf.dim == 2 else \
apply_box_deltas_3D(rois, deltas_specific * std_dev) * scale
# round and cast to int since we're dealing with pixels now
refined_rois = clip_to_window(cf.window, refined_rois)
refined_rois = torch.round(refined_rois)
# filter out low confidence boxes
keep = idx
keep_bool = (class_scores >= cf.model_min_confidence)
if not 0 in torch.nonzero(keep_bool).size():
score_keep = torch.nonzero(keep_bool)[:, 0]
pre_nms_class_ids = class_ids[score_keep]
pre_nms_rois = refined_rois[score_keep]
pre_nms_scores = class_scores[score_keep]
pre_nms_batch_ixs = batch_ixs[score_keep]
for j, b in enumerate(unique1d(pre_nms_batch_ixs)):
bixs = torch.nonzero(pre_nms_batch_ixs == b)[:, 0]
bix_class_ids = pre_nms_class_ids[bixs]
bix_rois = pre_nms_rois[bixs]
bix_scores = pre_nms_scores[bixs]
for i, class_id in enumerate(unique1d(bix_class_ids)):
ixs = torch.nonzero(bix_class_ids == class_id)[:, 0]
# nms expects boxes sorted by score.
ix_rois = bix_rois[ixs]
ix_scores = bix_scores[ixs]
ix_scores, order = ix_scores.sort(descending=True)
ix_rois = ix_rois[order, :]
class_keep = nms.nms(ix_rois, ix_scores, cf.detection_nms_threshold)
# map indices back.
class_keep = keep[score_keep[bixs[ixs[order[class_keep]]]]]
# merge indices over classes for current batch element
b_keep = class_keep if i == 0 else unique1d(torch.cat((b_keep, class_keep)))
# only keep top-k boxes of current batch-element
top_ids = class_scores[b_keep].sort(descending=True)[1][:cf.model_max_instances_per_batch_element]
b_keep = b_keep[top_ids]
# merge indices over batch elements.
batch_keep = b_keep if j == 0 else unique1d(torch.cat((batch_keep, b_keep)))
keep = batch_keep
else:
keep = torch.tensor([0]).long().cuda()
# arrange output
output = [refined_rois[keep], batch_ixs[keep].unsqueeze(1)]
output += [class_ids[keep].unsqueeze(1).float(), class_scores[keep].unsqueeze(1)]
output += [regressions[keep]]
result = torch.cat(output, dim=1)
# shape: (n_keeps, catted feats), catted feats: [0:dim*2] are box_coords, [dim*2] are batch_ics,
# [dim*2+1] are class_ids, [dim*2+2] are scores, [dim*2+3:] are regression vector features (incl uncertainty)
return result
def loss_example_mining(cf, batch_proposals, batch_gt_boxes, batch_gt_masks, batch_roi_scores,
batch_gt_class_ids, batch_gt_regressions):
"""
Subsamples proposals for mrcnn losses and generates targets. Sampling is done per batch element, seems to have positive
effects on training, as opposed to sampling over entire batch. Negatives are sampled via stochastic hard-example mining
(SHEM), where a number of negative proposals is drawn from larger pool of highest scoring proposals for stochasticity.
Scoring is obtained here as the max over all foreground probabilities as returned by mrcnn_classifier (worked better than
loss-based class-balancing methods like "online hard-example mining" or "focal loss".)
Classification-regression duality: regressions can be given along with classes (at least fg/bg, only class scores
are used for ranking).
:param batch_proposals: (n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ixs).
boxes as proposed by RPN. n_proposals here is determined by batch_size * POST_NMS_ROIS.
:param mrcnn_class_logits: (n_proposals, n_classes)
:param batch_gt_boxes: list over batch elements. Each element is a list over the corresponding roi target coordinates.
:param batch_gt_masks: list over batch elements. Each element is binary mask of shape (n_gt_rois, c, y, x, (z))
:param batch_gt_class_ids: list over batch elements. Each element is a list over the corresponding roi target labels.
if no classes predicted (only fg/bg from RPN): expected as pseudo classes [0, 1] for bg, fg.
:param batch_gt_regressions: list over b elements. Each element is a regression target vector. if None--> pseudo
:return: sample_indices: (n_sampled_rois) indices of sampled proposals to be used for loss functions.
:return: target_class_ids: (n_sampled_rois)containing target class labels of sampled proposals.
:return: target_deltas: (n_sampled_rois, 2 * dim) containing target deltas of sampled proposals for box refinement.
:return: target_masks: (n_sampled_rois, y, x, (z)) containing target masks of sampled proposals.
"""
# normalization of target coordinates
#global sample_regressions
if cf.dim == 2:
h, w = cf.patch_size
scale = torch.from_numpy(np.array([h, w, h, w])).float().cuda()
else:
h, w, z = cf.patch_size
scale = torch.from_numpy(np.array([h, w, h, w, z, z])).float().cuda()
positive_count = 0
negative_count = 0
sample_positive_indices = []
sample_negative_indices = []
sample_deltas = []
sample_masks = []
sample_class_ids = []
if batch_gt_regressions is not None:
sample_regressions = []
else:
target_regressions = torch.FloatTensor().cuda()
std_dev = torch.from_numpy(cf.bbox_std_dev).float().cuda()
# loop over batch and get positive and negative sample rois.
for b in range(len(batch_gt_boxes)):
gt_masks = torch.from_numpy(batch_gt_masks[b]).float().cuda()
gt_class_ids = torch.from_numpy(batch_gt_class_ids[b]).int().cuda()
if batch_gt_regressions is not None:
gt_regressions = torch.from_numpy(batch_gt_regressions[b]).float().cuda()
#if np.any(batch_gt_class_ids[b] > 0): # skip roi selection for no gt images.
if np.any([len(coords)>0 for coords in batch_gt_boxes[b]]):
gt_boxes = torch.from_numpy(batch_gt_boxes[b]).float().cuda() / scale
else:
gt_boxes = torch.FloatTensor().cuda()
# get proposals and indices of current batch element.
proposals = batch_proposals[batch_proposals[:, -1] == b][:, :-1]
batch_element_indices = torch.nonzero(batch_proposals[:, -1] == b).squeeze(1)
# Compute overlaps matrix [proposals, gt_boxes]
if not 0 in gt_boxes.size():
if gt_boxes.shape[1] == 4:
assert cf.dim == 2, "gt_boxes shape {} doesnt match cf.dim{}".format(gt_boxes.shape, cf.dim)
overlaps = bbox_overlaps_2D(proposals, gt_boxes)
else:
assert cf.dim == 3, "gt_boxes shape {} doesnt match cf.dim{}".format(gt_boxes.shape, cf.dim)
overlaps = bbox_overlaps_3D(proposals, gt_boxes)
# Determine positive and negative ROIs
roi_iou_max = torch.max(overlaps, dim=1)[0]
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = roi_iou_max >= (0.5 if cf.dim == 2 else 0.3)
# 2. Negative ROIs are those with < 0.1 with every GT box.
negative_roi_bool = roi_iou_max < (0.1 if cf.dim == 2 else 0.01)
else:
positive_roi_bool = torch.FloatTensor().cuda()
negative_roi_bool = torch.from_numpy(np.array([1]*proposals.shape[0])).cuda()
# Sample Positive ROIs
if not 0 in torch.nonzero(positive_roi_bool).size():
positive_indices = torch.nonzero(positive_roi_bool).squeeze(1)
positive_samples = int(cf.train_rois_per_image * cf.roi_positive_ratio)
rand_idx = torch.randperm(positive_indices.size()[0])
rand_idx = rand_idx[:positive_samples].cuda()
positive_indices = positive_indices[rand_idx]
positive_samples = positive_indices.size()[0]
positive_rois = proposals[positive_indices, :]
# Assign positive ROIs to GT boxes.
positive_overlaps = overlaps[positive_indices, :]
roi_gt_box_assignment = torch.max(positive_overlaps, dim=1)[1]
roi_gt_boxes = gt_boxes[roi_gt_box_assignment, :]
roi_gt_class_ids = gt_class_ids[roi_gt_box_assignment]
if batch_gt_regressions is not None:
roi_gt_regressions = gt_regressions[roi_gt_box_assignment]
# Compute bbox refinement targets for positive ROIs
deltas = box_refinement(positive_rois, roi_gt_boxes)
deltas /= std_dev
roi_masks = gt_masks[roi_gt_box_assignment]
assert roi_masks.shape[1] == 1, "gt masks have more than one channel --> is this desired?"
# Compute mask targets
boxes = positive_rois
box_ids = torch.arange(roi_masks.shape[0]).cuda().unsqueeze(1).float()
if len(cf.mask_shape) == 2:
y_exp, x_exp = roi_masks.shape[2:] # exp = expansion
boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp], dtype=torch.float32).cuda())
masks = roi_align.roi_align_2d(roi_masks,
torch.cat((box_ids, boxes), dim=1),
cf.mask_shape)
else:
y_exp, x_exp, z_exp = roi_masks.shape[2:] # exp = expansion
boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp, z_exp, z_exp], dtype=torch.float32).cuda())
masks = roi_align.roi_align_3d(roi_masks,
torch.cat((box_ids, boxes), dim=1),
cf.mask_shape)
masks = masks.squeeze(1)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = torch.round(masks)
sample_positive_indices.append(batch_element_indices[positive_indices])
sample_deltas.append(deltas)
sample_masks.append(masks)
sample_class_ids.append(roi_gt_class_ids)
if batch_gt_regressions is not None:
sample_regressions.append(roi_gt_regressions)
positive_count += positive_samples
else:
positive_samples = 0
# Sample negative ROIs. Add enough to maintain positive:negative ratio, but at least 1. Sample via SHEM.
if not 0 in torch.nonzero(negative_roi_bool).size():
negative_indices = torch.nonzero(negative_roi_bool).squeeze(1)
r = 1.0 / cf.roi_positive_ratio
b_neg_count = np.max((int(r * positive_samples - positive_samples), 1))
roi_scores_neg = batch_roi_scores[batch_element_indices[negative_indices]]
raw_sampled_indices = shem(roi_scores_neg, b_neg_count, cf.shem_poolsize)
sample_negative_indices.append(batch_element_indices[negative_indices[raw_sampled_indices]])
negative_count += raw_sampled_indices.size()[0]
if len(sample_positive_indices) > 0:
target_deltas = torch.cat(sample_deltas)
target_masks = torch.cat(sample_masks)
target_class_ids = torch.cat(sample_class_ids)
if batch_gt_regressions is not None:
target_regressions = torch.cat(sample_regressions)
# Pad target information with zeros for negative ROIs.
if positive_count > 0 and negative_count > 0:
sample_indices = torch.cat((torch.cat(sample_positive_indices), torch.cat(sample_negative_indices)), dim=0)
zeros = torch.zeros(negative_count, cf.dim * 2).cuda()
target_deltas = torch.cat([target_deltas, zeros], dim=0)
zeros = torch.zeros(negative_count, *cf.mask_shape).cuda()
target_masks = torch.cat([target_masks, zeros], dim=0)
zeros = torch.zeros(negative_count).int().cuda()
target_class_ids = torch.cat([target_class_ids, zeros], dim=0)
if batch_gt_regressions is not None:
# regression targets need to have 0 as background/negative with below practice
if 'regression_bin' in cf.prediction_tasks:
zeros = torch.zeros(negative_count, dtype=torch.float).cuda()
else:
zeros = torch.zeros(negative_count, cf.regression_n_features, dtype=torch.float).cuda()
target_regressions = torch.cat([target_regressions, zeros], dim=0)
elif positive_count > 0:
sample_indices = torch.cat(sample_positive_indices)
elif negative_count > 0:
sample_indices = torch.cat(sample_negative_indices)
target_deltas = torch.zeros(negative_count, cf.dim * 2).cuda()
target_masks = torch.zeros(negative_count, *cf.mask_shape).cuda()
target_class_ids = torch.zeros(negative_count).int().cuda()
if batch_gt_regressions is not None:
if 'regression_bin' in cf.prediction_tasks:
target_regressions = torch.zeros(negative_count, dtype=torch.float).cuda()
else:
target_regressions = torch.zeros(negative_count, cf.regression_n_features, dtype=torch.float).cuda()
else:
sample_indices = torch.LongTensor().cuda()
target_class_ids = torch.IntTensor().cuda()
target_deltas = torch.FloatTensor().cuda()
target_masks = torch.FloatTensor().cuda()
target_regressions = torch.FloatTensor().cuda()
return sample_indices, target_deltas, target_masks, target_class_ids, target_regressions
############################################################
# Anchors
############################################################
def generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):
"""
scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]
ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]
shape: [height, width] spatial shape of the feature map over which
to generate anchors.
feature_stride: Stride of the feature map relative to the image in pixels.
anchor_stride: Stride of anchors on the feature map. For example, if the
value is 2 then generate anchors for every other feature map pixel.
"""
# Get all combinations of scales and ratios
scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))
scales = scales.flatten()
ratios = ratios.flatten()
# Enumerate heights and widths from scales and ratios
heights = scales / np.sqrt(ratios)
widths = scales * np.sqrt(ratios)
# Enumerate shifts in feature space
shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride
shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride
shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = np.meshgrid(widths, shifts_x)
box_heights, box_centers_y = np.meshgrid(heights, shifts_y)
# Reshape to get a list of (y, x) and a list of (h, w)
box_centers = np.stack([box_centers_y, box_centers_x], axis=2).reshape([-1, 2])
box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])
# Convert to corner coordinates (y1, x1, y2, x2)
boxes = np.concatenate([box_centers - 0.5 * box_sizes, box_centers + 0.5 * box_sizes], axis=1)
return boxes
def generate_anchors_3D(scales_xy, scales_z, ratios, shape, feature_stride_xy, feature_stride_z, anchor_stride):
"""
scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]
ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]
shape: [height, width] spatial shape of the feature map over which
to generate anchors.
feature_stride: Stride of the feature map relative to the image in pixels.
anchor_stride: Stride of anchors on the feature map. For example, if the
value is 2 then generate anchors for every other feature map pixel.
"""
# Get all combinations of scales and ratios
scales_xy, ratios_meshed = np.meshgrid(np.array(scales_xy), np.array(ratios))
scales_xy = scales_xy.flatten()
ratios_meshed = ratios_meshed.flatten()
# Enumerate heights and widths from scales and ratios
heights = scales_xy / np.sqrt(ratios_meshed)
widths = scales_xy * np.sqrt(ratios_meshed)
depths = np.tile(np.array(scales_z), len(ratios_meshed)//np.array(scales_z)[..., None].shape[0])
# Enumerate shifts in feature space
shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride_xy #translate from fm positions to input coords.
shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride_xy
shifts_z = np.arange(0, shape[2], anchor_stride) * (feature_stride_z)
shifts_x, shifts_y, shifts_z = np.meshgrid(shifts_x, shifts_y, shifts_z)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = np.meshgrid(widths, shifts_x)
box_heights, box_centers_y = np.meshgrid(heights, shifts_y)
box_depths, box_centers_z = np.meshgrid(depths, shifts_z)
# Reshape to get a list of (y, x, z) and a list of (h, w, d)
box_centers = np.stack(
[box_centers_y, box_centers_x, box_centers_z], axis=2).reshape([-1, 3])
box_sizes = np.stack([box_heights, box_widths, box_depths], axis=2).reshape([-1, 3])
# Convert to corner coordinates (y1, x1, y2, x2, z1, z2)
boxes = np.concatenate([box_centers - 0.5 * box_sizes,
box_centers + 0.5 * box_sizes], axis=1)
boxes = np.transpose(np.array([boxes[:, 0], boxes[:, 1], boxes[:, 3], boxes[:, 4], boxes[:, 2], boxes[:, 5]]), axes=(1, 0))
return boxes
def generate_pyramid_anchors(logger, cf):
"""Generate anchors at different levels of a feature pyramid. Each scale
is associated with a level of the pyramid, but each ratio is used in
all levels of the pyramid.
from configs:
:param scales: cf.RPN_ANCHOR_SCALES , for conformity with retina nets: scale entries need to be list, e.g. [[4], [8], [16], [32]]
:param ratios: cf.RPN_ANCHOR_RATIOS , e.g. [0.5, 1, 2]
:param feature_shapes: cf.BACKBONE_SHAPES , e.g. [array of shapes per feature map] [80, 40, 20, 10, 5]
:param feature_strides: cf.BACKBONE_STRIDES , e.g. [2, 4, 8, 16, 32, 64]
:param anchors_stride: cf.RPN_ANCHOR_STRIDE , e.g. 1
:return anchors: (N, (y1, x1, y2, x2, (z1), (z2)). All generated anchors in one array. Sorted
with the same order of the given scales. So, anchors of scale[0] come first, then anchors of scale[1], and so on.
"""
scales = cf.rpn_anchor_scales
ratios = cf.rpn_anchor_ratios
feature_shapes = cf.backbone_shapes
anchor_stride = cf.rpn_anchor_stride
pyramid_levels = cf.pyramid_levels
feature_strides = cf.backbone_strides
logger.info("anchor scales {} and feature map shapes {}".format(scales, feature_shapes))
expected_anchors = [np.prod(feature_shapes[level]) * len(ratios) * len(scales['xy'][level]) for level in pyramid_levels]
anchors = []
for lix, level in enumerate(pyramid_levels):
if len(feature_shapes[level]) == 2:
anchors.append(generate_anchors(scales['xy'][level], ratios, feature_shapes[level],
feature_strides['xy'][level], anchor_stride))
elif len(feature_shapes[level]) == 3:
anchors.append(generate_anchors_3D(scales['xy'][level], scales['z'][level], ratios, feature_shapes[level],
feature_strides['xy'][level], feature_strides['z'][level], anchor_stride))
else:
raise Exception("invalid feature_shapes[{}] size {}".format(level, feature_shapes[level]))
logger.info("level {}: expected anchors {}, built anchors {}.".format(level, expected_anchors[lix], anchors[-1].shape))
out_anchors = np.concatenate(anchors, axis=0)
logger.info("Total: expected anchors {}, built anchors {}.".format(np.sum(expected_anchors), out_anchors.shape))
return out_anchors
def apply_box_deltas_2D(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
# Convert to y, x, h, w
non_nans = boxes == boxes
assert torch.all(non_nans), "boxes at beginning of delta apply have nans: {}".format(
boxes[~non_nans])
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
# clip delta preds in order to avoid infs and later nans after exponentiation.
height *= torch.exp(torch.clamp(deltas[:, 2], max=6.))
width *= torch.exp(torch.clamp(deltas[:, 3], max=6.))
non_nans = width == width
assert torch.all(non_nans), "inside delta apply, width has nans: {}".format(
width[~non_nans])
# 0.*inf results in nan. fix nans to zeros?
# height[height!=height] = 0.
# width[width!=width] = 0.
non_nans = height == height
assert torch.all(non_nans), "inside delta apply, height has nans directly after setting to zero: {}".format(
height[~non_nans])
non_nans = width == width
assert torch.all(non_nans), "inside delta apply, width has nans directly after setting to zero: {}".format(
width[~non_nans])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = torch.stack([y1, x1, y2, x2], dim=1)
non_nans = result == result
assert torch.all(non_nans), "inside delta apply, result has nans: {}".format(result[~non_nans])
return result
def apply_box_deltas_3D(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 6] where each row is y1, x1, y2, x2, z1, z2
deltas: [N, 6] where each row is [dy, dx, dz, log(dh), log(dw), log(dd)]
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
depth = boxes[:, 5] - boxes[:, 4]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
center_z = boxes[:, 4] + 0.5 * depth
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
center_z += deltas[:, 2] * depth
height *= torch.exp(deltas[:, 3])
width *= torch.exp(deltas[:, 4])
depth *= torch.exp(deltas[:, 5])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
z1 = center_z - 0.5 * depth
y2 = y1 + height
x2 = x1 + width
z2 = z1 + depth
result = torch.stack([y1, x1, y2, x2, z1, z2], dim=1)
return result
def clip_boxes_2D(boxes, window):
"""
boxes: [N, 4] each col is y1, x1, y2, x2
window: [4] in the form y1, x1, y2, x2
"""
boxes = torch.stack( \
[boxes[:, 0].clamp(float(window[0]), float(window[2])),
boxes[:, 1].clamp(float(window[1]), float(window[3])),
boxes[:, 2].clamp(float(window[0]), float(window[2])),
boxes[:, 3].clamp(float(window[1]), float(window[3]))], 1)
return boxes
def clip_boxes_3D(boxes, window):
"""
boxes: [N, 6] each col is y1, x1, y2, x2, z1, z2
window: [6] in the form y1, x1, y2, x2, z1, z2
"""
boxes = torch.stack( \
[boxes[:, 0].clamp(float(window[0]), float(window[2])),
boxes[:, 1].clamp(float(window[1]), float(window[3])),
boxes[:, 2].clamp(float(window[0]), float(window[2])),
boxes[:, 3].clamp(float(window[1]), float(window[3])),
boxes[:, 4].clamp(float(window[4]), float(window[5])),
boxes[:, 5].clamp(float(window[4]), float(window[5]))], 1)
return boxes
from matplotlib import pyplot as plt
def clip_boxes_numpy(boxes, window):
"""
boxes: [N, 4] each col is y1, x1, y2, x2 / [N, 6] in 3D.
window: iamge shape (y, x, (z))
"""
if boxes.shape[1] == 4:
boxes = np.concatenate(
(np.clip(boxes[:, 0], 0, window[0])[:, None],
np.clip(boxes[:, 1], 0, window[0])[:, None],
np.clip(boxes[:, 2], 0, window[1])[:, None],
np.clip(boxes[:, 3], 0, window[1])[:, None]), 1
)
else:
boxes = np.concatenate(
(np.clip(boxes[:, 0], 0, window[0])[:, None],
np.clip(boxes[:, 1], 0, window[0])[:, None],
np.clip(boxes[:, 2], 0, window[1])[:, None],
np.clip(boxes[:, 3], 0, window[1])[:, None],
np.clip(boxes[:, 4], 0, window[2])[:, None],
np.clip(boxes[:, 5], 0, window[2])[:, None]), 1
)
return boxes
def bbox_overlaps_2D(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
boxes1_repeat = boxes2.size()[0]
boxes2_repeat = boxes1.size()[0]
boxes1 = boxes1.repeat(1,boxes1_repeat).view(-1,4)
boxes2 = boxes2.repeat(boxes2_repeat,1)
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = boxes1.chunk(4, dim=1)
b2_y1, b2_x1, b2_y2, b2_x2 = boxes2.chunk(4, dim=1)
y1 = torch.max(b1_y1, b2_y1)[:, 0]
x1 = torch.max(b1_x1, b2_x1)[:, 0]
y2 = torch.min(b1_y2, b2_y2)[:, 0]
x2 = torch.min(b1_x2, b2_x2)[:, 0]
#--> expects x1<x2 & y1<y2
zeros = torch.zeros(y1.size()[0], requires_grad=False)
if y1.is_cuda:
zeros = zeros.cuda()
intersection = torch.max(x2 - x1, zeros) * torch.max(y2 - y1, zeros)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area[:,0] + b2_area[:,0] - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
assert torch.all(iou<=1), "iou score>1 produced in bbox_overlaps_2D"
overlaps = iou.view(boxes2_repeat, boxes1_repeat) #--> per gt box: ious of all proposal boxes with that gt box
return overlaps
def bbox_overlaps_3D(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2, z1, z2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
boxes1_repeat = boxes2.size()[0]
boxes2_repeat = boxes1.size()[0]
boxes1 = boxes1.repeat(1,boxes1_repeat).view(-1,6)
boxes2 = boxes2.repeat(boxes2_repeat,1)
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2, b1_z1, b1_z2 = boxes1.chunk(6, dim=1)
b2_y1, b2_x1, b2_y2, b2_x2, b2_z1, b2_z2 = boxes2.chunk(6, dim=1)
y1 = torch.max(b1_y1, b2_y1)[:, 0]
x1 = torch.max(b1_x1, b2_x1)[:, 0]
y2 = torch.min(b1_y2, b2_y2)[:, 0]
x2 = torch.min(b1_x2, b2_x2)[:, 0]
z1 = torch.max(b1_z1, b2_z1)[:, 0]
z2 = torch.min(b1_z2, b2_z2)[:, 0]
zeros = torch.zeros(y1.size()[0], requires_grad=False)
if y1.is_cuda:
zeros = zeros.cuda()
intersection = torch.max(x2 - x1, zeros) * torch.max(y2 - y1, zeros) * torch.max(z2 - z1, zeros)
# 3. Compute unions
b1_volume = (b1_y2 - b1_y1) * (b1_x2 - b1_x1) * (b1_z2 - b1_z1)
b2_volume = (b2_y2 - b2_y1) * (b2_x2 - b2_x1) * (b2_z2 - b2_z1)
union = b1_volume[:,0] + b2_volume[:,0] - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = iou.view(boxes2_repeat, boxes1_repeat)
return overlaps
def gt_anchor_matching(cf, anchors, gt_boxes, gt_class_ids=None):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2, (z1), (z2))]
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2, (z1), (z2))]
gt_class_ids (optional): [num_gt_boxes] Integer class IDs for one stage detectors. in RPN case of Mask R-CNN,
set all positive matches to 1 (foreground)
Returns:
anchor_class_matches: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
anchor_delta_targets: [N, (dy, dx, (dz), log(dh), log(dw), (log(dd)))] Anchor bbox deltas.
"""
anchor_class_matches = np.zeros([anchors.shape[0]], dtype=np.int32)
anchor_delta_targets = np.zeros((cf.rpn_train_anchors_per_image, 2*cf.dim))
anchor_matching_iou = cf.anchor_matching_iou
if gt_boxes is None:
anchor_class_matches = np.full(anchor_class_matches.shape, fill_value=-1)
return anchor_class_matches, anchor_delta_targets
# for mrcnn: anchor matching is done for RPN loss, so positive labels are all 1 (foreground)
if gt_class_ids is None:
gt_class_ids = np.array([1] * len(gt_boxes))
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= anchor_matching_iou then it's positive.
# If an anchor overlaps a GT box with IoU < 0.1 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.1).
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
if anchors.shape[1] == 4:
anchor_class_matches[(anchor_iou_max < 0.1)] = -1
elif anchors.shape[1] == 6:
anchor_class_matches[(anchor_iou_max < 0.01)] = -1
else:
raise ValueError('anchor shape wrong {}'.format(anchors.shape))
# 2. Set an anchor for each GT box (regardless of IoU value).
gt_iou_argmax = np.argmax(overlaps, axis=0)
for ix, ii in enumerate(gt_iou_argmax):
anchor_class_matches[ii] = gt_class_ids[ix]
# 3. Set anchors with high overlap as positive.
above_thresh_ixs =
|
np.argwhere(anchor_iou_max >= anchor_matching_iou)
|
numpy.argwhere
|
import os
import copy
import gym
import numpy as np
from numba import njit
import nle
import cv2
from PIL import Image, ImageFont, ImageDraw
SMALL_FONT_PATH = os.path.abspath("Hack-Regular.ttf")
# Mapping of 0-15 colors used.
# Taken from bottom image here. It seems about right
# https://i.stack.imgur.com/UQVe5.png
COLORS = [
"#000000",
"#800000",
"#008000",
"#808000",
"#000080",
"#800080",
"#008080",
"#808080", # - flipped these ones around
"#C0C0C0", # | the gray-out dull stuff
"#FF0000",
"#00FF00",
"#FFFF00",
"#0000FF",
"#FF00FF",
"#00FFFF",
"#FFFFFF",
]
class VectorFeaturesWrapper(gym.Wrapper):
"""Create network-friendly vector features from the stuff nethack has"""
# Hand-chosen scaling values for each blstat entry. Aims to limit them in [0, 1] range.
BLSTAT_NORMALIZATION_STATS = np.array([
1.0 / 79.0, # hero col
1.0 / 21, # hero row
0.0, # strength pct
1.0 / 10, # strength
1.0 / 10, # dexterity
1.0 / 10, # constitution
1.0 / 10, # intelligence
1.0 / 10, # wisdom
1.0 / 10, # charisma
0.0, # score
1.0 / 10, # hitpoints
1.0 / 10, # max hitpoints
0.0, # depth
1.0 / 1000, # gold
1.0 / 10, # energy
1.0 / 10, # max energy
1.0 / 10, # armor class
0.0, # monster level
1.0 / 10, # experience level
1.0 / 100, # experience points
1.0 / 1000, # time
1.0, # hunger_state
1.0 / 10, # carrying capacity
0.0, # carrying capacity
0.0, # level number
0.0, # condition bits
])
CROP_CENTER_NORMALIZATION_STATS = np.array([
1.0 / 20,
1.0 / 80
])
# Make sure we do not spook the network
BLSTAT_CLIP_RANGE = (-5, 5)
def __init__(self, env):
super().__init__(env)
num_items = VectorFeaturesWrapper.BLSTAT_NORMALIZATION_STATS.shape[0]
obs_spaces = {
'vector_obs': gym.spaces.Box(
low=VectorFeaturesWrapper.BLSTAT_CLIP_RANGE[0],
high=VectorFeaturesWrapper.BLSTAT_CLIP_RANGE[1],
shape=(num_items,),
dtype=np.float32
)
}
# Add other obs spaces other than blstats
obs_spaces.update([
(k, self.env.observation_space[k]) for k in self.env.observation_space if k != "blstats"
])
self.observation_space = gym.spaces.Dict(obs_spaces)
def _create_vector_obs(self, obs):
obs_vector = obs["blstats"] * VectorFeaturesWrapper.BLSTAT_NORMALIZATION_STATS
np.clip(
obs_vector,
VectorFeaturesWrapper.BLSTAT_CLIP_RANGE[0],
VectorFeaturesWrapper.BLSTAT_CLIP_RANGE[1],
out=obs_vector
)
obs["vector_obs"] = obs_vector
_ = obs.pop("blstats")
return obs
def step(self, action):
obs, reward, done, info = self.env.step(action)
obs = self._create_vector_obs(obs)
return obs, reward, done, info
def reset(self):
obs = self.env.reset()
obs = self._create_vector_obs(obs)
return obs
@njit
def _tile_characters_to_image(
out_image,
chars,
colors,
output_height_chars,
output_width_chars,
char_array,
offset_h,
offset_w
):
"""
Build an image using cached images of characters in char_array to out_image
"""
char_height = char_array.shape[3]
char_width = char_array.shape[4]
for h in range(output_height_chars):
h_char = h + offset_h
# Stuff outside boundaries is not visible, so
# just leave it black
if h_char < 0 or h_char >= chars.shape[0]:
continue
for w in range(output_width_chars):
w_char = w + offset_w
if w_char < 0 or w_char >= chars.shape[1]:
continue
char = chars[h_char, w_char]
color = colors[h_char, w_char]
h_pixel = h * char_height
w_pixel = w * char_width
out_image[:, h_pixel:h_pixel + char_height, w_pixel:w_pixel + char_width] = char_array[char, color]
class RenderCharImagesWithNumpyWrapper(gym.Wrapper):
"""
Render characters as images, using PIL to render characters like we humans see on screen
but then some caching and numpy stuff to speed up things.
To speed things up, crop image around the player.
"""
def __init__(self, env, font_size=9, crop_size=None, rescale_font_size=None):
super().__init__(env)
self.char_array = self._initialize_char_array(font_size, rescale_font_size)
self.char_height = self.char_array.shape[2]
self.char_width = self.char_array.shape[3]
# Transpose for CHW
self.char_array = self.char_array.transpose(0, 1, 4, 2, 3)
self.crop_size = crop_size
if crop_size is None:
# Render full "obs"
old_obs_space = self.env.observation_space["obs"]
self.output_height_chars = old_obs_space.shape[0]
self.output_width_chars = old_obs_space.shape[1]
else:
# Render only crop region
self.half_crop_size = crop_size // 2
self.output_height_chars = crop_size
self.output_width_chars = crop_size
self.chw_image_shape = (
3,
self.output_height_chars * self.char_height,
self.output_width_chars * self.char_width
)
# sample-factory expects at least one observation named "obs"
obs_spaces = {
'obs': gym.spaces.Box(
low=0,
high=255,
shape=self.chw_image_shape,
dtype=np.uint8
)
}
obs_spaces.update([(k, self.env.observation_space[k]) for k in self.env.observation_space if k not in ["tty_chars", "tty_colors"]])
self.observation_space = gym.spaces.Dict(obs_spaces)
def _initialize_char_array(self, font_size, rescale_font_size):
"""Draw all characters in PIL and cache them in numpy arrays
if rescale_font_size is given, assume it is (width, height)
Returns a np array of (num_chars, num_colors, char_height, char_width, 3)
"""
font = ImageFont.truetype(SMALL_FONT_PATH, font_size)
dummy_text = "".join([(chr(i) if chr(i).isprintable() else " ") for i in range(256)])
_, _, image_width, image_height = font.getbbox(dummy_text)
# Above can not be trusted (or its siblings)....
image_width = int(
|
np.ceil(image_width / 256)
|
numpy.ceil
|
import sys
import copy
import mediapipe as mp
import numpy as np
import cv2 as cv
from utils import CvFpsCalc
from handtracker import HandTracker
from gestureclassifier import GestureClassifier
from collections import deque
import webcam
class ScarletWitch:
def __init__(self, arguments, stdout=sys.stdout):
self.args = arguments
self.stdout = stdout
self.cap_device = arguments.device
self.cap_width = arguments.width
self.cap_height = arguments.height
self.show_info = True
self.mode_view = True
self.last_key = ""
self.brect = [0, 0, 0, 0]
self.use_brect = True
self.freq = 57 # Num of time frames in a dynamic gesture
self.mp_hands = mp.solutions.hands
self.mp_drawing = mp.solutions.drawing_utils
self.running = False
def run(self):
self.running = True
# Set stdout
stdout = sys.stdout
sys.stdout = self.stdout
# Video stream from webcam
vs = webcam.MacVideoStream(self.cap_width, self.cap_height, self.cap_device).start()
# Framerate calculator
cvFpsCalc = CvFpsCalc(buffer_len=10)
# Webcam display
winname = "<NAME>"
dims = [[640, 360], [720, 480], [1280, 720], [1920, 1080]]
win_dims = dims[2]
cv.namedWindow(winname, cv.WINDOW_NORMAL)
cv.resizeWindow(winname, win_dims[0], win_dims[1])
cv.moveWindow(winname, 0, 0)
# Empty frame of hand landmarks
empty_d = self.create_empty_data()
# Coordinate history
history_length = self.freq
landmark_history = deque([empty_d for x in range(history_length)], maxlen=history_length)
# Handtracking and gesture estimation threads
tracker = HandTracker(self.args, vs).start()
classifier = GestureClassifier(history_length)
# Variable initialization
dynamic_gesture_id = -1
dynamic_gesture_label = ""
dynamic_gesture = [dynamic_gesture_id, dynamic_gesture_label]
# Main Loop
while self.running:
# Process keyb input
key = cv.waitKey(10)
self.key_action(key)
# Camera capture
image = vs.read()
tracking_results = tracker.read()
image = cv.flip(image, 1) # Mirror display
debug_image = copy.deepcopy(image)
# Get fps
fps = cvFpsCalc.get()
# Landmark processing
if tracking_results.multi_hand_landmarks is not None:
for hand_landmarks in tracking_results.multi_hand_landmarks:
landmark_history, static_gesture, dynamic_gesture = self.process_landmarks(
debug_image, hand_landmarks, landmark_history, classifier)
else:
landmark_history.append(empty_d)
if self.show_info:
debug_image = self.draw_window_info(debug_image, win_dims, fps, dynamic_gesture[1])
cv.imshow(winname, debug_image)
# Shutdown
cv.destroyAllWindows()
vs.stop()
tracker.stop()
print("ScarletWitch Session Ended")
# Reset stdout
sys.stdout = stdout
def create_empty_data(self):
joint = np.zeros((21, 4))
# Compute angles between joints
v1 = joint[[0,1,2,3,0,5,6,7,0,9,10,11,0,13,14,15,0,17,18,19], :3] # Parent joint
v2 = joint[[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20], :3] # Child joint
v = v2 - v1 # [20, 3]
# Normalize v
# v = v / np.linalg.norm(v, axis=1)[:, np.newaxis]
# Get angle using arcos of dot product
angle = np.arccos(np.einsum('nt,nt->n',
v[[0,1,2,4,5,6,8,9,10,12,13,14,16,17,18],:],
v[[1,2,3,5,6,7,9,10,11,13,14,15,17,18,19],:])) # [15,]
angle =
|
np.degrees(angle)
|
numpy.degrees
|
from typing import Tuple, Optional
import numpy as np
import pandas as pd
import logging
import random
class solvetsp:
def __init__(self,
dist_frame: pd.DataFrame):
"""
This class solves the traveling salesman problem with the 2-opt algorithm. As input, the distance matrix must
be given in a pandas dataframe.
:param dist_frame: dataframe
Dataframe containing the distance matrix for all locations
"""
self.dist_frame = dist_frame
self.num = len(dist_frame) + 1 # Ismaning is at start and end of the list
self.init = None
self.set_init()
# Optimize parameter:
self.sequence = [] # Most recent sequence of locations (in numbers from 0-20)
self.sequence_dists = [] # Distances between the locations
self.dist = 0 # Total distance of the most recent sequence
self.iterated_dists = [] # List of all calculated total distances over the iterations
self.iterated_sequences = [] # List of all calculated sequences over the iterations
self.best_iterated_sequences = [] # List of all sequences from the iteration with the best final result
self.best_iterated_dist = [] # List of all total distances from the iteration with the best final result
def solve_opt2(self,
scorethresh: int = 0.001,
iterations: int = 20):
"""
This function executes the 2-opt algorithm for optimizing the route with the given distance matrix.
Here, the iterations, which always start with a new random route, can be set. the scorethresh defines the
threshold, where the algorithm stops the optimizing process for each iteration. A common default value here is
0.0001. A score of 0 describes no opimization between two steps in the algorithm.
:param scorethresh: float
Lower threshold for the score of each iteration
:param iterations: int
Number of iteration with random initial route
:return:
"""
# Get Initial sequence and distance
self.sequence = self.init
self.dist, sequence_dist = self._get_fulldist(self.sequence)
logging.debug("Initial distance set: {d}".format(d=self.dist))
logging.debug("Initial sequence set: {s}".format(s=self.sequence))
all_sequences = []
all_dists = []
# Iterate over the number of iterations:
for it in range(iterations):
score = 1
iteration_sequences = []
iteration_dists = []
while score > scorethresh:
dist_prev = self.dist
# Iterate over all parts of the sequence:
for start in range(1, self.num - 2):
for stop in range(start + 1, self.num - 1):
# Reorder parts of the sequence:
sequence_new = np.concatenate((self.sequence[0:start],
self.sequence[stop:-len(self.sequence) + start - 1:-1],
self.sequence[stop + 1:len(self.sequence)])).tolist()
# Calculate new total distance of the resulting sequence:
dist_new, sequence_new_dist = self._get_fulldist(sequence_new)
self.sequence_dists.append(dist_new)
iteration_sequences.append(sequence_new)
iteration_dists.append(dist_new)
# Check if new total distance is smaller than recent total distance and save new best sequence
# and total distance (if not do nothing):
if dist_new < self.dist:
self.sequence = sequence_new
self.dist = dist_new
logging.debug("New best distance set: {d}".format(d=dist_new))
score = 1 - self.dist / dist_prev
# Save best distance and sequence from this iteration:
all_sequences.append(iteration_sequences)
all_dists.append(iteration_dists)
self.iterated_dists.append(self.dist)
self.iterated_sequences.append(self.sequence)
logging.info("Score of Iteration {i}: {s}, Distance: {d}".format(i=it, s=score, d=self.dist))
# Start over with new initial sequence:
self.set_init(rand=True)
self.sequence = self.init
self.dist, sequence_dist = self._get_fulldist(self.sequence)
# Get best total distance and sequence:
self.dist = np.min(self.iterated_dists) # Storing total distance of best iteration
try:
ind =
|
np.where(self.iterated_dists == self.dist)
|
numpy.where
|
import os
import pathlib
import pickle
import shutil
import time
from functools import partial
import json
import fire
import numpy as np
import torch
from google.protobuf import text_format
from tensorboardX import SummaryWriter
import torchplus
import second.data.kitti_common as kitti
from second.builder import target_assigner_builder, voxel_builder
from second.data.preprocess import merge_second_batch
from second.protos import pipeline_pb2
from second.pytorch.builder import (box_coder_builder, input_reader_builder,
lr_scheduler_builder, optimizer_builder,
second_builder)
from second.utils.eval import get_coco_eval_result, get_official_eval_result,bev_box_overlap,d3_box_overlap
from second.utils.progress_bar import ProgressBar
from second.pytorch.core import box_torch_ops
from second.pytorch.core.losses import SigmoidFocalClassificationLoss
from second.pytorch.models import fusion
def example_convert_to_torch(example, dtype=torch.float32,
device=None) -> dict:
device = device or torch.device("cuda:0")
example_torch = {}
float_names = [
"voxels", "anchors", "reg_targets", "reg_weights", "bev_map", "rect",
"Trv2c", "P2", "d3_gt_boxes","gt_2d_boxes"
]
for k, v in example.items():
if k in float_names:
example_torch[k] = torch.tensor(v, dtype=torch.float32, device=device).to(dtype)
elif k in ["coordinates", "labels", "num_points"]:
example_torch[k] = torch.tensor(
v, dtype=torch.int32, device=device)
elif k in ["anchors_mask"]:
example_torch[k] = torch.tensor(
v, dtype=torch.uint8, device=device)
else:
example_torch[k] = v
return example_torch
def build_inference_net(config_path,
model_dir,
result_path=None,
predict_test=False,
ckpt_path=None,
ref_detfile=None,
pickle_result=True,
measure_time=False,
batch_size=1):
model_dir = pathlib.Path(model_dir)
if predict_test:
result_name = 'predict_test'
else:
result_name = 'eval_results'
if result_path is None:
result_path = model_dir / result_name
else:
result_path = pathlib.Path(result_path)
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
model_cfg = config.model.second
detection_2d_path = config.train_config.detection_2d_path
center_limit_range = model_cfg.post_center_limit_range
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
class_names = target_assigner.classes
net = second_builder.build(model_cfg, voxel_generator, target_assigner, measure_time=measure_time)
net.cuda()
if ckpt_path is None:
print("load existing model")
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
else:
torchplus.train.restore(ckpt_path, net)
batch_size = batch_size or input_cfg.batch_size
#batch_size = 1
net.eval()
return net
def train(config_path,
model_dir,
result_path=None,
create_folder=False,
display_step=50,
summary_step=5,
pickle_result=True,
patchs=None):
torch.manual_seed(3)
np.random.seed(3)
if create_folder:
if pathlib.Path(model_dir).exists():
model_dir = torchplus.train.create_folder(model_dir)
patchs = patchs or []
model_dir = pathlib.Path(model_dir)
model_dir.mkdir(parents=True, exist_ok=True)
if result_path is None:
result_path = model_dir / 'results'
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
input_cfg = config.train_input_reader
eval_input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
detection_2d_path = config.train_config.detection_2d_path
print("2d detection path:",detection_2d_path)
center_limit_range = model_cfg.post_center_limit_range
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
class_names = target_assigner.classes
net = build_inference_net('./configs/car.fhd.config','../model_dir')
fusion_layer = fusion.fusion()
fusion_layer.cuda()
optimizer_cfg = train_cfg.optimizer
if train_cfg.enable_mixed_precision:
net.half()
net.metrics_to_float()
net.convert_norm_to_float(net)
loss_scale = train_cfg.loss_scale_factor
mixed_optimizer = optimizer_builder.build(optimizer_cfg, fusion_layer, mixed=train_cfg.enable_mixed_precision, loss_scale=loss_scale)
optimizer = mixed_optimizer
# must restore optimizer AFTER using MixedPrecisionWrapper
torchplus.train.try_restore_latest_checkpoints(model_dir,
[mixed_optimizer])
lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, optimizer, train_cfg.steps)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
######################
# PREPARE INPUT
######################
dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=True,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
eval_dataset = input_reader_builder.build(
eval_input_cfg,
model_cfg,
training=True, #if rhnning for test, here it needs to be False
voxel_generator=voxel_generator,
target_assigner=target_assigner)
def _worker_init_fn(worker_id):
time_seed = np.array(time.time(), dtype=np.int32)
np.random.seed(time_seed + worker_id)
print(f"WORKER {worker_id} seed:", np.random.get_state()[1][0])
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=input_cfg.batch_size,
shuffle=True,
num_workers=input_cfg.num_workers,
pin_memory=False,
collate_fn=merge_second_batch,
worker_init_fn=_worker_init_fn)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=eval_input_cfg.batch_size,
shuffle=False,
num_workers=eval_input_cfg.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
data_iter = iter(dataloader)
######################
# TRAINING
######################
focal_loss = SigmoidFocalClassificationLoss()
cls_loss_sum = 0
training_detail = []
log_path = model_dir / 'log.txt'
training_detail_path = model_dir / 'log.json'
if training_detail_path.exists():
with open(training_detail_path, 'r') as f:
training_detail = json.load(f)
logf = open(log_path, 'a')
logf.write(proto_str)
logf.write("\n")
summary_dir = model_dir / 'summary'
summary_dir.mkdir(parents=True, exist_ok=True)
writer = SummaryWriter(str(summary_dir))
total_step_elapsed = 0
remain_steps = train_cfg.steps - net.get_global_step()
t = time.time()
ckpt_start_time = t
total_loop = train_cfg.steps // train_cfg.steps_per_eval + 1
#print("steps, steps_per_eval, total_loop:", train_cfg.steps, train_cfg.steps_per_eval, total_loop)
# total_loop = remain_steps // train_cfg.steps_per_eval + 1
clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch
net.set_global_step(torch.tensor([0]))
if train_cfg.steps % train_cfg.steps_per_eval == 0:
total_loop -= 1
mixed_optimizer.zero_grad()
try:
for _ in range(total_loop):
if total_step_elapsed + train_cfg.steps_per_eval > train_cfg.steps:
steps = train_cfg.steps % train_cfg.steps_per_eval
else:
steps = train_cfg.steps_per_eval
for step in range(steps):
lr_scheduler.step(net.get_global_step())
try:
example = next(data_iter)
except StopIteration:
print("end epoch")
if clear_metrics_every_epoch:
net.clear_metrics()
data_iter = iter(dataloader)
example = next(data_iter)
example_torch = example_convert_to_torch(example, float_dtype)
batch_size = example["anchors"].shape[0]
all_3d_output_camera_dict, all_3d_output, top_predictions, fusion_input,tensor_index = net(example_torch,detection_2d_path)
d3_gt_boxes = example_torch["d3_gt_boxes"][0,:,:]
if d3_gt_boxes.shape[0] == 0:
target_for_fusion = np.zeros((1,70400,1))
positives = torch.zeros(1,70400).type(torch.float32).cuda()
negatives = torch.zeros(1,70400).type(torch.float32).cuda()
negatives[:,:] = 1
else:
d3_gt_boxes_camera = box_torch_ops.box_lidar_to_camera(
d3_gt_boxes, example_torch['rect'][0,:], example_torch['Trv2c'][0,:])
d3_gt_boxes_camera_bev = d3_gt_boxes_camera[:,[0,2,3,5,6]]
###### predicted bev boxes
pred_3d_box = all_3d_output_camera_dict[0]["box3d_camera"]
pred_bev_box = pred_3d_box[:,[0,2,3,5,6]]
#iou_bev = bev_box_overlap(d3_gt_boxes_camera_bev.detach().cpu().numpy(), pred_bev_box.detach().cpu().numpy(), criterion=-1)
iou_bev = d3_box_overlap(d3_gt_boxes_camera.detach().cpu().numpy(), pred_3d_box.squeeze().detach().cpu().numpy(), criterion=-1)
iou_bev_max = np.amax(iou_bev,axis=0)
#print(np.max(iou_bev_max))
target_for_fusion = ((iou_bev_max >= 0.7)*1).reshape(1,-1,1)
positive_index = ((iou_bev_max >= 0.7)*1).reshape(1,-1)
positives = torch.from_numpy(positive_index).type(torch.float32).cuda()
negative_index = ((iou_bev_max <= 0.5)*1).reshape(1,-1)
negatives = torch.from_numpy(negative_index).type(torch.float32).cuda()
cls_preds,flag = fusion_layer(fusion_input.cuda(),tensor_index.cuda())
one_hot_targets = torch.from_numpy(target_for_fusion).type(torch.float32).cuda()
negative_cls_weights = negatives.type(torch.float32) * 1.0
cls_weights = negative_cls_weights + 1.0 * positives.type(torch.float32)
pos_normalizer = positives.sum(1, keepdim=True).type(torch.float32)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
if flag==1:
cls_losses = focal_loss._compute_loss(cls_preds, one_hot_targets, cls_weights.cuda()) # [N, M]
cls_losses_reduced = cls_losses.sum()/example_torch['labels'].shape[0]
cls_loss_sum = cls_loss_sum + cls_losses_reduced
if train_cfg.enable_mixed_precision:
loss *= loss_scale
cls_losses_reduced.backward()
mixed_optimizer.step()
mixed_optimizer.zero_grad()
net.update_global_step()
step_time = (time.time() - t)
t = time.time()
metrics = {}
global_step = net.get_global_step()
if global_step % display_step == 0:
print("now it is",global_step,"steps", " and the cls_loss is :",cls_loss_sum/display_step,
"learning_rate: ",float(optimizer.lr),file=logf)
print("now it is",global_step,"steps", " and the cls_loss is :",cls_loss_sum/display_step,
"learning_rate: ",float(optimizer.lr))
cls_loss_sum = 0
ckpt_elasped_time = time.time() - ckpt_start_time
if ckpt_elasped_time > train_cfg.save_checkpoints_secs:
torchplus.train.save_models(model_dir, [fusion_layer, optimizer],
net.get_global_step())
ckpt_start_time = time.time()
total_step_elapsed += steps
torchplus.train.save_models(model_dir, [fusion_layer, optimizer],
net.get_global_step())
fusion_layer.eval()
net.eval()
result_path_step = result_path / f"step_{net.get_global_step()}"
result_path_step.mkdir(parents=True, exist_ok=True)
print("#################################")
print("#################################", file=logf)
print("# EVAL")
print("# EVAL", file=logf)
print("#################################")
print("#################################", file=logf)
print("Generate output labels...")
print("Generate output labels...", file=logf)
t = time.time()
dt_annos = []
prog_bar = ProgressBar()
net.clear_timer()
prog_bar.start((len(eval_dataset) + eval_input_cfg.batch_size - 1) // eval_input_cfg.batch_size)
val_loss_final = 0
for example in iter(eval_dataloader):
example = example_convert_to_torch(example, float_dtype)
if pickle_result:
dt_annos_i, val_losses = predict_kitti_to_anno(
net, detection_2d_path, fusion_layer, example, class_names, center_limit_range,
model_cfg.lidar_input)
dt_annos+= dt_annos_i
val_loss_final = val_loss_final + val_losses
else:
_predict_kitti_to_file(net, detection_2d_path,example, result_path_step,
class_names, center_limit_range,
model_cfg.lidar_input)
prog_bar.print_bar()
sec_per_ex = len(eval_dataset) / (time.time() - t)
print("validation_loss:", val_loss_final/len(eval_dataloader))
print("validation_loss:", val_loss_final/len(eval_dataloader),file=logf)
print(f'generate label finished({sec_per_ex:.2f}/s). start eval:')
print(
f'generate label finished({sec_per_ex:.2f}/s). start eval:',
file=logf)
gt_annos = [
info["annos"] for info in eval_dataset.dataset.kitti_infos
]
if not pickle_result:
dt_annos = kitti.get_label_annos(result_path_step)
# result = get_official_eval_result_v2(gt_annos, dt_annos, class_names)
result = get_official_eval_result(gt_annos, dt_annos, class_names)
print(result, file=logf)
print(result)
writer.add_text('eval_result', json.dumps(result, indent=2), global_step)
result = get_coco_eval_result(gt_annos, dt_annos, class_names)
print(result, file=logf)
print(result)
if pickle_result:
with open(result_path_step / "result.pkl", 'wb') as f:
pickle.dump(dt_annos, f)
writer.add_text('eval_result', result, global_step)
#net.train()
fusion_layer.train()
except Exception as e:
torchplus.train.save_models(model_dir, [fusion_layer, optimizer],
net.get_global_step())
logf.close()
raise e
# save model before exit
torchplus.train.save_models(model_dir, [fusion_layer, optimizer],
net.get_global_step())
logf.close()
def _predict_kitti_to_file(net,
detection_2d_path,
fusion_layer,
example,
result_save_path,
class_names,
center_limit_range=None,
lidar_input=False):
batch_image_shape = example['image_shape']
batch_imgidx = example['image_idx']
all_3d_output_camera_dict, all_3d_output, top_predictions, fusion_input,torch_index = net(example,detection_2d_path)
t_start = time.time()
fusion_cls_preds,flag = fusion_layer(fusion_input.cuda(),torch_index.cuda())
t_end = time.time()
t_fusion = t_end - t_start
fusion_cls_preds_reshape = fusion_cls_preds.reshape(1,200,176,2)
all_3d_output.update({'cls_preds':fusion_cls_preds_reshape})
predictions_dicts = predict_v2(net,example, all_3d_output)
for i, preds_dict in enumerate(predictions_dicts):
image_shape = batch_image_shape[i]
img_idx = preds_dict["image_idx"]
if preds_dict["bbox"] is not None or preds_dict["bbox"].size.numel():
box_2d_preds = preds_dict["bbox"].data.cpu().numpy()
box_preds = preds_dict["box3d_camera"].data.cpu().numpy()
scores = preds_dict["scores"].data.cpu().numpy()
box_preds_lidar = preds_dict["box3d_lidar"].data.cpu().numpy()
# write pred to file
box_preds = box_preds[:, [0, 1, 2, 4, 5, 3,
6]] # lhw->hwl(label file format)
label_preds = preds_dict["label_preds"].data.cpu().numpy()
# label_preds = np.zeros([box_2d_preds.shape[0]], dtype=np.int32)
result_lines = []
for box, box_lidar, bbox, score, label in zip(
box_preds, box_preds_lidar, box_2d_preds, scores,
label_preds):
if not lidar_input:
if bbox[0] > image_shape[1] or bbox[1] > image_shape[0]:
continue
if bbox[2] < 0 or bbox[3] < 0:
continue
# print(img_shape)
if center_limit_range is not None:
limit_range = np.array(center_limit_range)
if (np.any(box_lidar[:3] < limit_range[:3])
or np.any(box_lidar[:3] > limit_range[3:])):
continue
bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])
bbox[:2] = np.maximum(bbox[:2], [0, 0])
result_dict = {
'name': class_names[int(label)],
'alpha': -
|
np.arctan2(-box_lidar[1], box_lidar[0])
|
numpy.arctan2
|
import numpy as np
import matplotlib.pyplot as plt
from testCases_v2 import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
np.random.seed(1)
X, Y = load_planar_dataset()
plt.scatter(X[0, :], X[1, :], c=Y.reshape(400,), s=40, cmap=plt.cm.Spectral)
shape_x = X.shape
shape_y = Y.shape
m = shape_x[1]
print("shape of X: " + str(shape_x))
print("shape of Y: " + str(shape_y))
print("I have m = %d training examples. " % m)
#####纯线性回归#####
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X.T, np.squeeze(Y).T)
plot_decision_boundary(lambda x: clf.predict(x), X, np.squeeze(Y))
plt.title("Logistic Regression")
LR_predictions = clf.predict(X.T)
print("Accuracy of logistic regression: %d " %
float((np.dot(Y, LR_predictions) + np.dot(1-Y, 1-LR_predictions))/float(Y.size)*100) + "%" +
"(percentage of correctly labelled datapoints)")
#####带一层隐藏层的神经网络#####
def layer_sizes(X, Y):
"""
:param X:
:param Y:
:return:
n_x -- the size of the input layer
n_h -- the size of the hidden layer
n_y -- the size of the output layer
"""
n_x = X.shape[0]
n_h = 4
n_y = Y.shape[0]
return (n_x, n_h, n_y)
def initialize_parameters(n_x, n_h, n_y):
|
np.random.seed(2)
|
numpy.random.seed
|
from numpy.testing import *
from numpy import random
import numpy as np
class TestRegression(TestCase):
def test_VonMises_range(self):
"""Make sure generated random variables are in [-pi, pi].
Regression test for ticket #986.
"""
for mu in np.linspace(-7., 7., 5):
r = random.mtrand.vonmises(mu,1,50)
assert np.all(r > -np.pi) and np.all(r <= np.pi)
def test_hypergeometric_range(self) :
"""Test for ticket #921"""
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
def test_logseries_convergence(self) :
"""Test for ticket #923"""
N = 1000
np.random.seed(0)
rvsn = np.random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / float(N)
msg = "Frequency was %f, should be > 0.45" % freq
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / float(N)
msg = "Frequency was %f, should be < 0.23" % freq
assert_(freq < 0.23, msg)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert -5 <=
|
random.randint(-5,-1)
|
numpy.random.randint
|
import numpy as np
from scipy.stats import expon
from pfb.opt.power_method import power_method
from pfb.opt.pcg import pcg
from pfb.opt.primal_dual import primal_dual
from pfb.operators.psi import DaskPSI
from pfb.operators.psf import PSF
from pfb.prox.prox_21 import prox_21
from pfb.utils.fits import save_fits
from pfb.utils.misc import Gaussian2D
import pyscilog
log = pyscilog.get_logger('SARA')
def resid_func(x, dirty, hessian, mask, beam, wsum):
"""
Returns the unattenuated residual
"""
residual = dirty - hessian(mask(beam(x)))/wsum
residual_mfs = np.sum(residual, axis=0)
residual = residual
return residual, residual_mfs
def sara(psf, model, residual, mask=None, beam_image=None, hessian=None,
wsum=1, adapt_sig21=True, hdr=None, hdr_mfs=None, outfile=None, cpsf=None,
nthreads=1, sig_21=1e-6, sigma_frac=100, maxit=10, tol=1e-3,
gamma=0.99, psi_levels=2, psi_basis=None, alpha=None,
pdtol=1e-6, pdmaxit=250, pdverbose=1, positivity=True,
cgtol=1e-6, cgminit=25, cgmaxit=150, cgverbose=1,
pmtol=1e-5, pmmaxit=50, pmverbose=1):
if len(residual.shape) > 3:
raise ValueError("Residual must have shape (nband, nx, ny)")
nband, nx, ny = residual.shape
if beam_image is None:
def beam(x): return x
def beaminv(x): return x
else:
try:
assert beam.shape == (nband, nx, ny)
def beam(x): return beam_image * x
def beaminv(x): return np.where(beam_image > 0.01, x / beam_image, x)
except BaseException:
raise ValueError("Beam has incorrect shape")
if mask is None:
def mask(x): return x
else:
try:
if mask.ndim == 2:
assert mask.shape == (nx, ny)
def mask(x): return mask[None] * x
elif mask.ndim == 3:
assert mask.shape == (1, nx, ny)
def mask(x): return mask * x
else:
raise ValueError
except BaseException:
raise ValueError("Mask has incorrect shape")
# PSF operator
psfo = PSF(psf, residual.shape, nthreads=nthreads) #, backward_undersize=1.2)
if cpsf is None:
raise ValueError
else:
cpsfo = PSF(cpsf, residual.shape, nthreads=nthreads)
residual_mfs = np.sum(residual, axis=0)
residual = mask(beam(residual))
rmax = np.abs(residual_mfs).max()
rms =
|
np.std(residual_mfs)
|
numpy.std
|
""" Test functions for linalg module
"""
import os
import sys
import itertools
import traceback
import textwrap
import subprocess
import pytest
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy import multiply, atleast_2d, inf, asarray
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
assert_raises_regex, HAS_LAPACK64,
)
from numpy.testing._private.utils import requires_memory
def consistent_subclass(out, in_):
# For ndarray subclass input, our output should have the same subclass
# (non-ndarray input gets converted to ndarray).
return type(out) is (type(in_) if isinstance(in_, np.ndarray)
else np.ndarray)
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = single_decimal
else:
decimal = double_decimal
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
# used to categorize tests
all_tags = {
'square', 'nonsquare', 'hermitian', # mutually exclusive
'generalized', 'size-0', 'strided' # optional additions
}
class LinalgCase:
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
name, the operands a and b, and a set of tags to filter the tests
"""
assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
self.tags = frozenset(tags) # prevent shared tags
def check(self, do):
"""
Run the function `do` on this test case, expanding arguments
"""
do(self.a, self.b, tags=self.tags)
def __repr__(self):
return f'<LinalgCase: {self.name}>'
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
objects)
"""
assert tag in all_tags, "Invalid tag"
for case in cases:
case.tags = case.tags | {tag}
return cases
#
# Base test cases
#
np.random.seed(1234)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),
LinalgCase("0x0",
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
])
# non-square test-cases
CASES += apply_tag('nonsquare', [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(8)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(1)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(5)),
LinalgCase("0x4",
np.random.rand(0, 4),
np.random.rand(0),
tags={'size-0'}),
LinalgCase("4x0",
np.random.rand(4, 0),
np.random.rand(4),
tags={'size-0'}),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
np.empty((0, 0), dtype=double),
None,
tags={'size-0'}),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
])
#
# Gufunc test cases
#
def _make_generalized_cases():
new_cases = []
for case in CASES:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2 * case.a, 3 * case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
return new_cases
CASES += _make_generalized_cases()
#
# Generate stride combination variations of the above
#
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)] * x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert_(np.all(xi == x))
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
def _make_strided_cases():
new_cases = []
for case in CASES:
for a, a_label in _stride_comb_iter(case.a):
for b, b_label in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b,
tags=case.tags | {'strided'})
new_cases.append(new_case)
return new_cases
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
class LinalgTestCase:
TEST_CASES = CASES
def check_cases(self, require=set(), exclude=set()):
"""
Run func on each of the cases with all of the tags in require, and none
of the tags in exclude
"""
for case in self.TEST_CASES:
# filter by require and exclude
if case.tags & require != require:
continue
if case.tags & exclude:
continue
try:
case.check(self.do)
except Exception as e:
msg = f'In test case: {case!r}\n\n'
msg += traceback.format_exc()
raise AssertionError(msg) from e
class LinalgSquareTestCase(LinalgTestCase):
def test_sq_cases(self):
self.check_cases(require={'square'},
exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
self.check_cases(require={'square', 'size-0'},
exclude={'generalized'})
class LinalgNonsquareTestCase(LinalgTestCase):
def test_nonsq_cases(self):
self.check_cases(require={'nonsquare'},
exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
self.check_cases(require={'nonsquare', 'size-0'},
exclude={'generalized'})
class HermitianTestCase(LinalgTestCase):
def test_herm_cases(self):
self.check_cases(require={'hermitian'},
exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
self.check_cases(require={'hermitian', 'size-0'},
exclude={'generalized'})
class LinalgGeneralizedSquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_sq_cases(self):
self.check_cases(require={'generalized', 'square'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_sq_cases(self):
self.check_cases(require={'generalized', 'square', 'size-0'})
class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
class HermitianGeneralizedTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian', 'size-0'},
exclude={'none'})
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# kept apart from TestSolve for use for testing with matrices.
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(consistent_subclass(x, b))
class TestSolve(SolveCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0, :]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = linalg.solve(a, b)[:, :, 0:0]
result = linalg.solve(a, b[:, :, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# test both zero.
expected = linalg.solve(a, b)[:, 0:0, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
assert_(consistent_subclass(a_inv, a))
class TestInv(InvCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.inv(x).dtype, dtype)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
class TestEigvals(EigvalsCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.complex64)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
assert_(consistent_subclass(evectors, a))
class TestEig(EigCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, dtype)
assert_equal(v.dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, get_complex_dtype(dtype))
assert_equal(v.dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class SVDBaseTests:
hermitian = False
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
u, s, vh = linalg.svd(x)
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian)
assert_equal(s.dtype, get_real_dtype(dtype))
class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVD(SVDCases, SVDBaseTests):
def test_empty_identity(self):
""" Empty input should put an identity matrix in u or vh """
x = np.empty((4, 0))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (4, 4))
assert_equal(vh.shape, (0, 0))
assert_equal(u, np.eye(4))
x = np.empty((0, 4))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (0, 0))
assert_equal(vh.shape, (4, 4))
assert_equal(vh, np.eye(4))
class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False, hermitian=True)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
def hermitian(mat):
axes = list(range(mat.ndim))
axes[-1], axes[-2] = axes[-2], axes[-1]
return np.conj(np.transpose(mat, axes=axes))
assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape))
assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape))
assert_equal(np.sort(s)[..., ::-1], s)
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVDHermitian(SVDHermitianCases, SVDBaseTests):
hermitian = True
class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
def do(self, a, b, tags):
c = asarray(a) # a might be a matrix
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.cond, c)
return
# +-2 norms
s = linalg.svd(c, compute_uv=False)
assert_almost_equal(
linalg.cond(a), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 2), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -2), s[..., -1] / s[..., 0],
single_decimal=5, double_decimal=11)
# Other norms
cinv = np.linalg.inv(c)
assert_almost_equal(
linalg.cond(a, 1),
abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -1),
abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, np.inf),
abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -np.inf),
abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 'fro'),
np.sqrt((abs(c)**2).sum(-1).sum(-1)
* (abs(cinv)**2).sum(-1).sum(-1)),
single_decimal=5, double_decimal=11)
class TestCond(CondCases):
def test_basic_nonsvd(self):
# Smoketest the non-svd norms
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
assert_almost_equal(linalg.cond(A, inf), 4)
assert_almost_equal(linalg.cond(A, -inf), 2/3)
assert_almost_equal(linalg.cond(A, 1), 4)
assert_almost_equal(linalg.cond(A, -1), 0.5)
assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12))
def test_singular(self):
# Singular matrices have infinite condition number for
# positive norms, and negative norms shouldn't raise
# exceptions
As = [np.zeros((2, 2)), np.ones((2, 2))]
p_pos = [None, 1, 2, 'fro']
p_neg = [-1, -2]
for A, p in itertools.product(As, p_pos):
# Inversion may not hit exact infinity, so just check the
# number is large
assert_(linalg.cond(A, p) > 1e15)
for A, p in itertools.product(As, p_neg):
linalg.cond(A, p)
def test_nan(self):
# nans should be passed through, not converted to infs
ps = [None, 1, -1, 2, -2, 'fro']
p_pos = [None, 1, 2, 'fro']
A = np.ones((2, 2))
A[0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(isinstance(c, np.float_))
assert_(np.isnan(c))
A = np.ones((3, 2, 2))
A[1,0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(np.isnan(c[1]))
if p in p_pos:
assert_(c[0] > 1e15)
assert_(c[2] > 1e15)
else:
assert_(not np.isnan(c[0]))
assert_(not np.isnan(c[2]))
def test_stacked_singular(self):
# Check behavior when only some of the stacked matrices are
# singular
np.random.seed(1234)
A = np.random.rand(2, 2, 2, 2)
A[0,0] = 0
A[1,1] = 0
for p in (None, 1, 2, 'fro', -1, -2):
c = linalg.cond(A, p)
assert_equal(c[0,0], np.inf)
assert_equal(c[1,1], np.inf)
assert_(np.isfinite(c[0,1]))
assert_(np.isfinite(c[1,0]))
class PinvCases(LinalgSquareTestCase,
LinalgNonsquareTestCase,
LinalgGeneralizedSquareTestCase,
LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinv(PinvCases):
pass
class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a, hermitian=True)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinvHermitian(PinvHermitianCases):
pass
class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, multiply.reduce(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
m = (s != 0)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
class TestDet(DetCases):
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
def test_0_size(self):
a = np.zeros((0, 0), dtype=np.complex64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.complex64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.complex64)
assert_(res[1].dtype.type is np.float32)
a = np.zeros((0, 0), dtype=np.float64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.float64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.float64)
assert_(res[1].dtype.type is np.float64)
class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, False)
x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)
if m == 0:
assert_((x == 0).all())
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if np.asarray(b).ndim == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(consistent_subclass(x, b))
assert_(consistent_subclass(residuals, b))
class TestLstsq(LstsqCases):
def test_future_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
[1., 0., 1., 0., 0., 4.],
[0., 0., 0., 2., 3., 0.]]).T
b = np.array([1, 0, 0, 0, 0, 0])
with suppress_warnings() as sup:
w = sup.record(FutureWarning, "`rcond` parameter will change")
x, residuals, rank, s = linalg.lstsq(a, b)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
assert_(rank == 3)
# Warning should be raised exactly once (first command)
assert_(len(w) == 1)
@pytest.mark.parametrize(["m", "n", "n_rhs"], [
(4, 2, 2),
(0, 4, 1),
(0, 4, 2),
(4, 0, 1),
(4, 0, 2),
(4, 2, 0),
(0, 0, 0)
])
def test_empty_a_b(self, m, n, n_rhs):
a = np.arange(m * n).reshape(m, n)
b = np.ones((m, n_rhs))
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
if m == 0:
assert_((x == 0).all())
assert_equal(x.shape, (n, n_rhs))
assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,)))
if m > n and n_rhs > 0:
# residuals are exactly the squared norms of b's columns
r = b - np.dot(a, x)
assert_almost_equal(residuals, (r * r).sum(axis=-2))
assert_equal(rank, min(m, n))
assert_equal(s.shape, (min(m, n),))
def test_incompatible_dims(self):
# use modified version of docstring example
x = np.array([0, 1, 2, 3])
y = np.array([-1, 0.2, 0.9, 2.1, 3.3])
A = np.vstack([x, np.ones(len(x))]).T
with assert_raises_regex(LinAlgError, "Incompatible dimensions"):
linalg.lstsq(A, y, rcond=None)
@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
class TestMatrixPower:
rshft_0 = np.eye(4)
rshft_1 = rshft_0[[3, 0, 1, 2]]
rshft_2 = rshft_0[[2, 3, 0, 1]]
rshft_3 = rshft_0[[1, 2, 3, 0]]
rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3]
noninv = array([[1, 0], [0, 0]])
stacked = np.block([[[rshft_0]]]*2)
#FIXME the 'e' dtype might work in future
dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')]
def test_large_power(self, dt):
rshft = self.rshft_1.astype(dt)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3)
def test_power_is_zero(self, dt):
def tz(M):
mz = matrix_power(M, 0)
assert_equal(mz, identity_like_generalized(M))
assert_equal(mz.dtype, M.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_one(self, dt):
def tz(mat):
mz = matrix_power(mat, 1)
assert_equal(mz, mat)
assert_equal(mz.dtype, mat.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_two(self, dt):
def tz(mat):
mz = matrix_power(mat, 2)
mmul = matmul if mat.dtype != object else dot
assert_equal(mz, mmul(mat, mat))
assert_equal(mz.dtype, mat.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_minus_one(self, dt):
def tz(mat):
invmat = matrix_power(mat, -1)
mmul = matmul if mat.dtype != object else dot
assert_almost_equal(
mmul(invmat, mat), identity_like_generalized(mat))
for mat in self.rshft_all:
if dt not in self.dtnoinv:
tz(mat.astype(dt))
def test_exceptions_bad_power(self, dt):
mat = self.rshft_0.astype(dt)
assert_raises(TypeError, matrix_power, mat, 1.5)
assert_raises(TypeError, matrix_power, mat, [1])
def test_exceptions_non_square(self, dt):
assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1)
assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1)
assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1)
def test_exceptions_not_invertible(self, dt):
if dt in self.dtnoinv:
return
mat = self.noninv.astype(dt)
assert_raises(LinAlgError, matrix_power, mat, -1)
class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev = linalg.eigvalsh(a, 'L')
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))
ev2 = linalg.eigvalsh(a, 'U')
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
class TestEigvalsh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w = np.linalg.eigvalsh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigvalsh, x, "lower")
assert_raises(ValueError, np.linalg.eigvalsh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w = np.linalg.eigvalsh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w = np.linalg.eigvalsh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w = np.linalg.eigvalsh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w = np.linalg.eigvalsh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w = np.linalg.eigvalsh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvalsh(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvalsh(a)
assert_(res.dtype.type is np.float32)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev, evc = linalg.eigh(a)
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
assert_allclose(dot_generalized(a, evc),
np.asarray(ev)[..., None, :] * np.asarray(evc),
rtol=get_rtol(ev.dtype))
ev2, evc2 = linalg.eigh(a, 'U')
assert_almost_equal(ev2, evalues)
assert_allclose(dot_generalized(a, evc2),
np.asarray(ev2)[..., None, :] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
class TestEigh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eigh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
assert_equal(v.dtype, dtype)
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigh, x, "lower")
assert_raises(ValueError, np.linalg.eigh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w, v = np.linalg.eigh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w, v = np.linalg.eigh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w, v = np.linalg.eigh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w, v = np.linalg.eigh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w, v = np.linalg.eigh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.float32)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class _TestNormBase:
dt = None
dec = None
class _TestNormGeneral(_TestNormBase):
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def test_vector_return_type(self):
a = np.array([1, 0, 1])
exact_types = np.typecodes['AllInteger']
inexact_types = np.typecodes['AllFloat']
all_types = exact_types + inexact_types
for each_inexact_types in all_types:
at = a.astype(each_inexact_types)
an = norm(at, -np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 0.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 0.0)
an = norm(at, 0)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2)
an = norm(at, 1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0))
an = norm(at, 4)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0))
an = norm(at, np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
c = [-1, 2, -3, 4]
def _test(v):
np.testing.assert_almost_equal(norm(v), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, inf), 4.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -inf), 1.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 1), 10.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5),
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 0), 4,
decimal=self.dec)
for v in (a, b, c,):
_test(v)
for v in (array(a, dtype=self.dt), array(b, dtype=self.dt),
array(c, dtype=self.dt)):
_test(v)
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
# or column separately.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
assert_almost_equal(norm(A, ord=order, axis=0), expected0)
expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])]
assert_almost_equal(norm(A, ord=order, axis=1), expected1)
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
nd = B.ndim
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
for axis in itertools.combinations(range(-nd, nd), 2):
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if row_axis == col_axis:
assert_raises(ValueError, norm, B, ord=order, axis=axis)
else:
n = norm(B, ord=order, axis=axis)
# The logic using k_index only works for nd = 3.
# This has to be changed if nd is increased.
k_index = nd - (row_axis + col_axis)
if row_axis < col_axis:
expected = [norm(B[:].take(k, axis=k_index), ord=order)
for k in range(B.shape[k_index])]
else:
expected = [norm(B[:].take(k, axis=k_index).T, ord=order)
for k in range(B.shape[k_index])]
assert_almost_equal(n, expected)
def test_keepdims(self):
A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
allclose_err = 'order {0}, axis = {1}'
shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}'
# check the order=None, axis=None case
expected = norm(A, ord=None, axis=None)
found = norm(A, ord=None, axis=None, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(None, None))
expected_shape = (1, 1, 1)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, None, None))
# Vector norms.
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
for k in range(A.ndim):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
# Matrix norms.
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']:
for k in itertools.permutations(range(A.ndim), 2):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k[0]] = 1
expected_shape[k[1]] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
class _TestNorm2D(_TestNormBase):
# Define the part for 2d arrays separately, so we can subclass this
# and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg.
array = np.array
def test_matrix_empty(self):
assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0)
def test_matrix_return_type(self):
a = self.array([[1, 0, 1], [0, 1, 1]])
exact_types = np.typecodes['AllInteger']
# float32, complex64, float64, complex128 types are the only types
# allowed by `linalg`, which performs the matrix operations used
# within `norm`.
inexact_types = 'fdFD'
all_types = exact_types + inexact_types
for each_inexact_types in all_types:
at = a.astype(each_inexact_types)
an = norm(at, -np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
an = norm(at, 1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 3.0**(1.0/2.0))
an = norm(at, -2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
an = norm(at, np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 'fro')
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 'nuc')
assert_(issubclass(an.dtype.type, np.floating))
# Lower bar needed to support low precision floats.
# They end up being off by 1 in the 7th place.
np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)
def test_matrix_2x2(self):
A = self.array([[1, 3], [5, 7]], dtype=self.dt)
assert_almost_equal(norm(A), 84 ** 0.5)
assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 10.0)
assert_almost_equal(norm(A, inf), 12.0)
assert_almost_equal(norm(A, -inf), 4.0)
assert_almost_equal(norm(A, 1), 10.0)
assert_almost_equal(norm(A, -1), 6.0)
assert_almost_equal(norm(A, 2), 9.1231056256176615)
assert_almost_equal(norm(A, -2), 0.87689437438234041)
assert_raises(ValueError, norm, A, 'nofro')
assert_raises(ValueError, norm, A, -3)
assert_raises(ValueError, norm, A, 0)
def test_matrix_3x3(self):
# This test has been added because the 2x2 example
# happened to have equal nuclear norm and induced 1-norm.
# The 1/10 scaling factor accommodates the absolute tolerance
# used in assert_almost_equal.
A = (1 / 10) * \
self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
assert_almost_equal(norm(A, inf), 1.1)
assert_almost_equal(norm(A, -inf), 0.6)
assert_almost_equal(norm(A, 1), 1.0)
assert_almost_equal(norm(A, -1), 0.4)
assert_almost_equal(norm(A, 2), 0.88722940323461277)
assert_almost_equal(norm(A, -2), 0.19456584790481812)
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'`
# or `ord='nuc'` or any other string raises a ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, A, 'nuc', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
assert_raises(ValueError, norm, [3, 4], 'nuc', None)
assert_raises(ValueError, norm, [3, 4], 'test', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
for order in [0, 3]:
assert_raises(ValueError, norm, A, order, None)
assert_raises(ValueError, norm, A, order, (0, 1))
assert_raises(ValueError, norm, B, order, (1, 2))
# Invalid axis
assert_raises(np.AxisError, norm, B, None, 3)
assert_raises(np.AxisError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
class _TestNorm(_TestNorm2D, _TestNormGeneral):
pass
class TestNorm_NonSystematic:
def test_longdouble_norm(self):
# Non-regression test: p-norm of longdouble would previously raise
# UnboundLocalError.
x = np.arange(10, dtype=np.longdouble)
old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2)
def test_intmin(self):
# Non-regression test: p-norm of signed integer would previously do
# float cast and abs in the wrong order.
x = np.array([-2 ** 31], dtype=np.int32)
old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5)
def test_complex_high_ord(self):
# gh-4156
d = np.empty((2,), dtype=np.clongdouble)
d[0] = 6 + 7j
d[1] = -6 + 7j
res = 11.615898132184
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10)
d = d.astype(np.complex128)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9)
d = d.astype(np.complex64)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
# Separate definitions so we can use them for matrix tests.
class _TestNormDoubleBase(_TestNormBase):
dt = np.double
dec = 12
class _TestNormSingleBase(_TestNormBase):
dt = np.float32
dec = 6
class _TestNormInt64Base(_TestNormBase):
dt = np.int64
dec = 12
class TestNormDouble(_TestNorm, _TestNormDoubleBase):
pass
class TestNormSingle(_TestNorm, _TestNormSingleBase):
pass
class TestNormInt64(_TestNorm, _TestNormInt64Base):
pass
class TestMatrixRank:
def test_matrix_rank(self):
# Full rank matrix
assert_equal(4, matrix_rank(np.eye(4)))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
assert_equal(matrix_rank(I), 3)
# All zeros - zero rank
assert_equal(matrix_rank(np.zeros((4, 4))), 0)
# 1 dimension - rank 1 unless all 0
assert_equal(matrix_rank([1, 0, 0, 0]), 1)
assert_equal(matrix_rank(np.zeros((4,))), 0)
# accepts array-like
assert_equal(matrix_rank([1]), 1)
# greater than 2 dimensions treated as stacked matrices
ms = np.array([I, np.eye(4), np.zeros((4,4))])
assert_equal(matrix_rank(ms), np.array([3, 4, 0]))
# works on scalar
assert_equal(matrix_rank(1), 1)
def test_symmetric_rank(self):
assert_equal(4, matrix_rank(np.eye(4), hermitian=True))
assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True))
assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
assert_equal(3, matrix_rank(I, hermitian=True))
# manually supplied tolerance
I[-1, -1] = 1e-8
assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8))
assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8))
def test_reduced_rank():
# Test matrices with reduced rank
rng = np.random.RandomState(20120714)
for i in range(100):
# Make a rank deficient matrix
X = rng.normal(size=(40, 10))
X[:, 0] = X[:, 1] + X[:, 2]
# Assert that matrix_rank detected deficiency
assert_equal(matrix_rank(X), 9)
X[:, 3] = X[:, 4] + X[:, 5]
assert_equal(matrix_rank(X), 8)
class TestQR:
# Define the array class here, so run this on matrices elsewhere.
array = np.array
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape
k = min(m, n)
# mode == 'complete'
q, r = linalg.qr(a, mode='complete')
assert_(q.dtype == a_dtype)
assert_(r.dtype == a_dtype)
assert_(isinstance(q, a_type))
assert_(isinstance(r, a_type))
assert_(q.shape == (m, m))
assert_(r.shape == (m, n))
assert_almost_equal(dot(q, r), a)
assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
assert_almost_equal(np.triu(r), r)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape == (m, k))
assert_(r1.shape == (k, n))
assert_almost_equal(dot(q1, r1), a)
assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
assert_almost_equal(np.triu(r1), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
@pytest.mark.parametrize(["m", "n"], [
(3, 0),
(0, 3),
(0, 0)
])
def test_qr_empty(self, m, n):
k = min(m, n)
a = np.empty((m, n))
self.check_qr(a)
h, tau = np.linalg.qr(a, mode='raw')
assert_equal(h.dtype, np.double)
assert_equal(tau.dtype, np.double)
assert_equal(h.shape, (n, m))
assert_equal(tau.shape, (k,))
def test_mode_raw(self):
# The factorization is not unique and varies between libraries,
# so it is not possible to check against known values. Functional
# testing is a possibility, but awaits the exposure of more
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
# Test double
h, tau = linalg.qr(a, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (2, 3))
assert_(tau.shape == (2,))
h, tau = linalg.qr(a.T, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (3, 2))
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
a = self.array([[1, 2], [3, 4]])
b = self.array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
class TestCholesky:
# TODO: are there no other tests for cholesky?
def test_basic_property(self):
# Check A = L L^H
shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
dtypes = (np.float32, np.float64, np.complex64, np.complex128)
for shape, dtype in itertools.product(shapes, dtypes):
np.random.seed(1)
a = np.random.randn(*shape)
if np.issubdtype(dtype, np.complexfloating):
a = a + 1j*np.random.randn(*shape)
t = list(range(len(shape)))
t[-2:] = -1, -2
a = np.matmul(a.transpose(t).conj(), a)
a = np.asarray(a, dtype=dtype)
c = np.linalg.cholesky(a)
b = np.matmul(c, c.transpose(t).conj())
assert_allclose(b, a,
err_msg=f'{shape} {dtype}\n{a}\n{c}',
atol=500 * a.shape[0] * np.finfo(dtype).eps)
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.cholesky(a)
assert_equal(a.shape, res.shape)
assert_(res.dtype.type is np.float64)
# for documentation purpose:
assert_(isinstance(res, np.ndarray))
a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.cholesky(a)
assert_equal(a.shape, res.shape)
assert_(res.dtype.type is np.complex64)
assert_(isinstance(res, np.ndarray))
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_generalized_raise_multiloop():
# It should raise an error even if the error doesn't occur in the
# last iteration of the ufunc inner loop
invertible = np.array([[1, 2], [3, 4]])
non_invertible = np.array([[1, 1], [1, 1]])
x = np.zeros([4, 4, 2, 2])[1::2]
x[...] = invertible
x[0, 0] = non_invertible
assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
def test_xerbla_override():
# Check that our xerbla has been successfully linked in. If it is not,
# the default xerbla routine is called, which prints a message to stdout
# and may, or may not, abort the process depending on the LAPACK package.
XERBLA_OK = 255
try:
pid = os.fork()
except (OSError, AttributeError):
# fork failed, or not running on POSIX
pytest.skip("Not POSIX or fork failed.")
if pid == 0:
# child; close i/o file handles
os.close(1)
os.close(0)
# Avoid producing core files.
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# These calls may abort.
try:
np.linalg.lapack_lite.xerbla()
except ValueError:
pass
except Exception:
os._exit(os.EX_CONFIG)
try:
a = np.array([[1.]])
np.linalg.lapack_lite.dorgqr(
1, 1, 1, a,
0, # <- invalid value
a, a, 0, 0)
except ValueError as e:
if "DORGQR parameter number 5" in str(e):
# success, reuse error code to mark success as
# FORTRAN STOP returns as success.
os._exit(XERBLA_OK)
# Did not abort, but our xerbla was not linked in.
os._exit(os.EX_CONFIG)
else:
# parent
pid, status = os.wait()
if os.WEXITSTATUS(status) != XERBLA_OK:
pytest.skip('Numpy xerbla not linked in.')
@pytest.mark.slow
def test_sdot_bug_8577():
# Regression test that loading certain other libraries does not
# result to wrong results in float32 linear algebra.
#
# There's a bug gh-8577 on OSX that can trigger this, and perhaps
# there are also other situations in which it occurs.
#
# Do the check in a separate process.
bad_libs = ['PyQt5.QtWidgets', 'IPython']
template = textwrap.dedent("""
import sys
{before}
try:
import {bad_lib}
except ImportError:
sys.exit(0)
{after}
x = np.ones(2, dtype=np.float32)
sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1)
""")
for bad_lib in bad_libs:
code = template.format(before="import numpy as np", after="",
bad_lib=bad_lib)
subprocess.check_call([sys.executable, "-c", code])
# Swapped import order
code = template.format(after="import numpy as np", before="",
bad_lib=bad_lib)
subprocess.check_call([sys.executable, "-c", code])
class TestMultiDot:
def test_basic_function_with_three_arguments(self):
# multi_dot with three arguments uses a fast hand coded algorithm to
# determine the optimal order. Therefore test it separately.
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C))
assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))
def test_basic_function_with_two_arguments(self):
# separate code path with two arguments
A = np.random.random((6, 2))
B = np.random.random((2, 6))
assert_almost_equal(multi_dot([A, B]), A.dot(B))
assert_almost_equal(multi_dot([A, B]), np.dot(A, B))
def test_basic_function_with_dynamic_programing_optimization(self):
# multi_dot with four or more arguments uses the dynamic programing
# optimization and therefore deserve a separate
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 1))
assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D))
def test_vector_as_first_argument(self):
# The first argument can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 2))
# the result should be 1-D
assert_equal(multi_dot([A1d, B, C, D]).shape, (2,))
def test_vector_as_last_argument(self):
# The last argument can be 1-D
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be 1-D
assert_equal(multi_dot([A, B, C, D1d]).shape, (6,))
def test_vector_as_first_and_last_argument(self):
# The first and last arguments can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be a scalar
assert_equal(multi_dot([A1d, B, C, D1d]).shape, ())
def test_three_arguments_and_out(self):
# multi_dot with three arguments uses a fast hand coded algorithm to
# determine the optimal order. Therefore test it separately.
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
out = np.zeros((6, 2))
ret = multi_dot([A, B, C], out=out)
assert out is ret
assert_almost_equal(out, A.dot(B).dot(C))
assert_almost_equal(out, np.dot(A, np.dot(B, C)))
def test_two_arguments_and_out(self):
# separate code path with two arguments
A =
|
np.random.random((6, 2))
|
numpy.random.random
|
import logging
log = logging.getLogger(__name__)
import itertools
import importlib
from functools import partial
from collections import defaultdict
import numpy as np
import pandas as pd
import pyqtgraph as pg
from atom.api import (Str, Float, Tuple, Int, Typed, Property, Atom,
Bool, Enum, List, Dict, Callable, Value, observe)
from enaml.application import deferred_call, timed_call
from enaml.colors import parse_color
from enaml.core.api import Looper, Declarative, d_, d_func
from enaml.qt.QtGui import QColor
from psi.util import octave_space, SignalBuffer, ConfigurationException
from psi.core.enaml.api import load_manifests, PSIContribution
from psi.controller.calibration import util
from psi.context.context_item import ContextMeta
################################################################################
# Utility functions
################################################################################
def get_x_fft(fs, duration):
n_time = int(fs * duration)
freq = np.fft.rfftfreq(n_time, fs**-1)
return np.log10(freq)
def get_color_cycle(name):
module_name, cmap_name = name.rsplit('.', 1)
module = importlib.import_module(module_name)
cmap = getattr(module, cmap_name)
return itertools.cycle(cmap.colors)
def make_color(color):
if isinstance(color, tuple):
return QColor(*color)
elif isinstance(color, str):
return QColor(color)
else:
raise ValueError('Unknown color %r', color)
################################################################################
# Style mixins
################################################################################
class ColorCycleMixin(Declarative):
#: Define the pen color cycle. Can be a list of colors or a string
#: indicating the color palette to use in palettable.
pen_color_cycle = d_(Typed(object))
_plot_colors = Typed(dict)
def _default_pen_color_cycle(self):
return ['k']
def _make_plot_cycle(self):
if isinstance(self.pen_color_cycle, str):
cycle = get_color_cycle(self.pen_color_cycle)
else:
cycle = itertools.cycle(self.pen_color_cycle)
return defaultdict(lambda: next(cycle))
@d_func
def get_pen_color(self, key):
if self._plot_colors is None:
self._plot_colors = self._make_plot_cycle()
color = self._plot_colors[key]
if not isinstance(color, str):
return QColor(*color)
else:
return QColor(color)
def _observe_pen_color_cycle(self, event):
self._plot_colors = self._make_plot_cycle()
self._reset_plots()
def _reset_plots(self):
raise NotImplementedError
################################################################################
# Supporting classes
################################################################################
class BaseDataRange(Atom):
container = Typed(object)
# Size of display window
span = Float(1)
# Delay before clearing window once data has "scrolled off" the window.
delay = Float(0)
# Current visible data range
current_range = Tuple(Float(), Float())
def add_source(self, source):
cb = partial(self.source_added, source=source)
source.add_callback(cb)
def _default_current_range(self):
return 0, self.span
def _observe_delay(self, event):
self._update_range()
def _observe_span(self, event):
self._update_range()
def _update_range(self):
raise NotImplementedError
class EpochDataRange(BaseDataRange):
max_duration = Float()
def source_added(self, data, source):
n = [len(d['signal']) for d in data]
max_duration = max(n) / source.fs
self.max_duration = max(max_duration, self.max_duration)
def _observe_max_duration(self, event):
self._update_range()
def _update_range(self):
self.current_range = 0, self.max_duration
class ChannelDataRange(BaseDataRange):
# Automatically updated. Indicates last "seen" time based on all data
# sources reporting to this range.
current_time = Float(0)
current_samples = Typed(defaultdict, (int,))
current_times = Typed(defaultdict, (float,))
def _observe_current_time(self, event):
self._update_range()
def _update_range(self):
low_value = (self.current_time//self.span)*self.span - self.delay
high_value = low_value+self.span
self.current_range = low_value, high_value
def add_event_source(self, source):
cb = partial(self.event_source_added, source=source)
source.add_callback(cb)
def source_added(self, data, source):
self.current_samples[source] += data.shape[-1]
self.current_times[source] = self.current_samples[source]/source.fs
self.current_time = max(self.current_times.values())
def event_source_added(self, data, source):
self.current_times[source] = data[-1][1]
self.current_time = max(self.current_times.values())
def create_container(children, x_axis=None):
log.debug('Creating graphics layout')
container = pg.GraphicsLayout()
container.setSpacing(10)
# Add the x and y axes to the layout, along with the viewbox.
for i, child in enumerate(children):
log.debug('... child %d with viewbox %r', i, child.viewbox)
container.addItem(child.y_axis, i, 0)
container.addItem(child.viewbox, i, 1)
try:
container.addItem(child.viewbox_norm, i, 1)
except AttributeError:
pass
if x_axis is not None:
container.addItem(x_axis, i+1, 1)
# Link the child viewboxes together
for child in children[1:]:
child.viewbox.setXLink(children[0].viewbox)
return container
################################################################################
# Containers (defines a shared set of containers across axes)
################################################################################
class BasePlotContainer(PSIContribution):
label = d_(Str())
container = Typed(pg.GraphicsWidget)
x_axis = Typed(pg.AxisItem)
base_viewbox = Property()
legend = Typed(pg.LegendItem)
x_transform = Callable()
buttons = d_(List())
current_button = d_(Value())
allow_auto_select = d_(Bool(True))
auto_select = d_(Bool(True))
@d_func
def fmt_button(self, key):
return str(key)
def _observe_buttons(self, event):
if not self.buttons:
return
if self.current_button not in self.buttons:
self.current_button = self.buttons[0]
def _observe_allow_auto_select(self, event):
if not self.allow_auto_select:
self.auto_select = False
def _default_x_transform(self):
return lambda x: x
def _default_container(self):
container = pg.GraphicsLayout()
container.setSpacing(10)
# Add the x and y axes to the layout, along with the viewbox.
for i, child in enumerate(self.children):
container.addItem(child.y_axis, i, 0)
container.addItem(child.viewbox, i, 1)
try:
container.addItem(child.viewbox_norm, i, 1)
except AttributeError:
pass
child._configure_viewbox()
if self.x_axis is not None:
container.addItem(self.x_axis, i+1, 1)
# Link the child viewboxes together
for child in self.children[1:]:
child.viewbox.setXLink(self.children[0].viewbox)
return container
def add_legend_item(self, plot, label):
self.legend.addItem(plot, label)
def _default_legend(self):
legend = pg.LegendItem()
legend.setParentItem(self.container)
return legend
def _get_base_viewbox(self):
return self.children[0].viewbox
def _default_x_axis(self):
x_axis = pg.AxisItem('bottom')
x_axis.setGrid(64)
x_axis.linkToView(self.children[0].viewbox)
return x_axis
def update(self, event=None):
pass
def find(self, name):
for child in self.children:
if child.name == name:
return child
def format_container(self):
pass
def _reset_plots(self):
pass
class PlotContainer(BasePlotContainer):
x_min = d_(Float(0))
x_max = d_(Float(0))
@observe('x_min', 'x_max')
def format_container(self, event=None):
# If we want to specify values relative to a psi context variable, we
# cannot do it when initializing the plots.
if (self.x_min != 0) or (self.x_max != 0):
self.base_viewbox.setXRange(self.x_min, self.x_max, padding=0)
def update(self, event=None):
deferred_call(self.format_container)
class BaseTimeContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same time-based X-axis
'''
data_range = Typed(BaseDataRange)
span = d_(Float(1))
delay = d_(Float(0.25))
def _default_container(self):
container = super()._default_container()
# Ensure that the x axis shows the planned range
self.base_viewbox.setXRange(0, self.span, padding=0)
self.data_range.observe('current_range', self.update)
return container
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Time', unitPrefix='sec.')
return x_axis
def update(self, event=None):
low, high = self.data_range.current_range
deferred_call(self.base_viewbox.setXRange, low, high, padding=0)
super().update()
class TimeContainer(BaseTimeContainer):
def _default_data_range(self):
return ChannelDataRange(container=self, span=self.span,
delay=self.delay)
def update(self, event=None):
for child in self.children:
child.update()
super().update()
class EpochTimeContainer(BaseTimeContainer):
def _default_data_range(self):
return EpochDataRange(container=self, span=self.span, delay=self.delay)
def format_log_ticks(values, scale, spacing):
values = 10**np.array(values).astype(np.float)
return ['{:.1f}'.format(v * 1e-3) for v in values]
class FFTContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same frequency-based X-axis
'''
freq_lb = d_(Float(500))
freq_ub = d_(Float(50000))
octave_spacing = d_(Bool(True))
def _default_x_transform(self):
return np.log10
@observe('container', 'freq_lb', 'freq_ub')
def _update_x_limits(self, event):
self.base_viewbox.setXRange(np.log10(self.freq_lb),
np.log10(self.freq_ub),
padding=0)
if self.octave_spacing:
major_ticks = octave_space(self.freq_lb / 1e3, self.freq_ub / 1e3, 1.0)
major_ticklabs = [str(t) for t in major_ticks]
major_ticklocs = np.log10(major_ticks * 1e3)
minor_ticks = octave_space(self.freq_lb / 1e3, self.freq_ub / 1e3, 0.125)
minor_ticklabs = [str(t) for t in minor_ticks]
minor_ticklocs = np.log10(minor_ticks * 1e3)
ticks = [
list(zip(major_ticklocs, major_ticklabs)),
list(zip(minor_ticklocs, minor_ticklabs)),
]
self.x_axis.setTicks(ticks)
else:
self.x_axis.setTicks()
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Frequency (kHz)')
x_axis.logTickStrings = format_log_ticks
x_axis.setLogMode(True)
return x_axis
################################################################################
# ViewBox
################################################################################
class ViewBox(PSIContribution):
# Make this weak-referenceable so we can bind methods to Qt slots.
__slots__ = '__weakref__'
viewbox = Typed(pg.ViewBox)
viewbox_norm = Typed(pg.ViewBox)
y_axis = Typed(pg.AxisItem)
y_min = d_(Float(0))
y_max = d_(Float(0))
y_mode = d_(Enum('mouse', 'fixed'))
data_range = Property()
save_limits = d_(Bool(False))
@observe('y_min', 'y_max')
def _update_limits(self, event=None):
self.viewbox.setYRange(self.y_min, self.y_max, padding=0)
def _default_name(self):
return self.label
def _get_data_range(self):
return self.parent.data_range
def _default_y_axis(self):
y_axis = pg.AxisItem('left')
y_axis.setLabel(self.label)
y_axis.setGrid(64)
return y_axis
def _sync_limits(self, vb=None):
with self.suppress_notifications():
box = self.viewbox.viewRange()
self.y_min = float(box[1][0])
self.y_max = float(box[1][1])
def _default_viewbox(self):
return pg.ViewBox(enableMenu=False)
def _configure_viewbox(self):
viewbox = self.viewbox
viewbox.setMouseEnabled(
x=False,
y=self.y_mode == 'mouse'
)
viewbox.disableAutoRange()
viewbox.setBackgroundColor('w')
self.y_axis.linkToView(viewbox)
viewbox.setYRange(self.y_min, self.y_max, padding=0)
for child in self.children:
plots = child.get_plots()
if isinstance(plots, dict):
for label, plot in plots.items():
deferred_call(self.add_plot, plot, label)
else:
for plot in plots:
deferred_call(self.add_plot, plot)
viewbox.sigRangeChanged.connect(self._sync_limits)
return viewbox
def _default_viewbox_norm(self):
viewbox = pg.ViewBox(enableMenu=False)
viewbox.setMouseEnabled(x=False, y=False)
viewbox.disableAutoRange()
return viewbox
def update(self, event=None):
for child in self.children:
child.update()
def add_plot(self, plot, label=None):
self.viewbox.addItem(plot)
if label:
self.parent.legend.addItem(plot, label)
def plot(self, x, y, color='k', log_x=False, log_y=False, label=None,
kind='line'):
'''
Convenience function used by plugins
This is typically used in post-processing routines to add static plots
to existing view boxes.
'''
if log_x:
x = np.log10(x)
if log_y:
y = np.log10(y)
x = np.asarray(x)
y = np.asarray(y)
m = np.isfinite(x) & np.isfinite(y)
x = x[m]
y = y[m]
if kind == 'line':
item = pg.PlotCurveItem(pen=pg.mkPen(color))
elif kind == 'scatter':
item = pg.ScatterPlotItem(pen=pg.mkPen(color))
item.setData(x, y)
self.add_plot(item)
if label is not None:
self.parent.legend.addItem(item, label)
################################################################################
# Plots
################################################################################
class BasePlot(PSIContribution):
# Make this weak-referenceable so we can bind methods to Qt slots.
__slots__ = '__weakref__'
source_name = d_(Str())
source = Typed(object)
label = d_(Str())
def update(self, event=None):
pass
def _reset_plots(self):
pass
################################################################################
# Single plots
################################################################################
class SinglePlot(BasePlot):
pen_color = d_(Typed(object))
pen_width = d_(Float(0))
antialias = d_(Bool(False))
label = d_(Str())
pen = Typed(object)
plot = Typed(object)
def get_plots(self):
return [self.plot]
def _default_pen_color(self):
return 'black'
def _default_pen(self):
color = make_color(self.pen_color)
return pg.mkPen(color, width=self.pen_width)
def _default_name(self):
return self.source_name + '_plot'
class ChannelPlot(SinglePlot):
downsample = Int(0)
decimate_mode = d_(Enum('extremes', 'mean'))
_cached_time = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_channel_plot'
def _default_plot(self):
return pg.PlotCurveItem(pen=self.pen, antialias=self.antialias)
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_source(self.source)
self.parent.data_range.observe('span', self._update_time)
self.source.add_callback(self._append_data)
self.parent.viewbox.sigResized.connect(self._update_decimation)
self._update_time(None)
self._update_decimation(self.parent.viewbox)
def _update_time(self, event):
# Precompute the time array since this can be the "slow" point
# sometimes in computations
n = round(self.parent.data_range.span*self.source.fs)
self._cached_time = np.arange(n)/self.source.fs
self._update_decimation()
self._update_buffer()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs,
self.parent.data_range.span*2)
def _update_decimation(self, viewbox=None):
try:
width, _ = self.parent.viewbox.viewPixelSize()
dt = self.source.fs**-1
self.downsample = round(width/dt/2)
except Exception as e:
pass
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def update(self, event=None):
low, high = self.parent.data_range.current_range
data = self._buffer.get_range_filled(low, high, np.nan)
t = self._cached_time[:len(data)] + low
if self.downsample > 1:
t = t[::self.downsample]
if self.decimate_mode == 'extremes':
d_min, d_max = decimate_extremes(data, self.downsample)
t = t[:len(d_min)]
x = np.c_[t, t].ravel()
y = np.c_[d_min, d_max].ravel()
if x.shape == y.shape:
deferred_call(self.plot.setData, x, y, connect='pairs')
elif self.decimate_mode == 'mean':
d = decimate_mean(data, self.downsample)
t = t[:len(d)]
if t.shape == d.shape:
deferred_call(self.plot.setData, t, d)
else:
t = t[:len(data)]
deferred_call(self.plot.setData, t, data)
def _reshape_for_decimate(data, downsample):
# Determine the "fragment" size that we are unable to decimate. A
# downsampling factor of 5 means that we perform the operation in chunks of
# 5 samples. If we have only 13 samples of data, then we cannot decimate
# the last 3 samples and will simply discard them.
last_dim = data.ndim
offset = data.shape[-1] % downsample
if offset > 0:
data = data[..., :-offset]
shape = (len(data), -1, downsample) if data.ndim == 2 else (-1, downsample)
return data.reshape(shape)
def decimate_mean(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
data = _reshape_for_decimate(data, downsample).copy()
return data.mean(axis=-1)
def decimate_extremes(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
# Force a copy to be made, which speeds up min()/max(). Apparently min/max
# make a copy of a reshaped array before performing the operation, so we
# force it now so the copy only occurs once.
data = _reshape_for_decimate(data, downsample).copy()
return data.min(axis=-1), data.max(axis=-1)
class FFTChannelPlot(ChannelPlot):
time_span = d_(Float(1))
window = d_(Enum('hamming', 'flattop'))
_x = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_fft_plot'
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._append_data)
self.source.observe('fs', self._cache_x)
self._update_buffer()
self._cache_x()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs, self.time_span)
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def _cache_x(self, event=None):
if self.source.fs:
self._x = get_x_fft(self.source.fs, self.time_span)
def update(self, event=None):
if self._buffer.get_time_ub() >= self.time_span:
log.debug('Time span %f to %f', -self.time_span, 0)
data = self._buffer.get_latest(-self.time_span, 0)
psd = util.psd(data, self.source.fs, self.window)
spl = self.source.calibration.get_spl(self._x, psd)
deferred_call(self.plot.setData, self._x, spl)
class BaseTimeseriesPlot(SinglePlot):
rect_center = d_(Float(0.5))
rect_height = d_(Float(1))
fill_color = d_(Typed(object))
brush = Typed(object)
_rising = Typed(list, ())
_falling = Typed(list, ())
def _default_brush(self):
return pg.mkBrush(self.fill_color)
def _default_plot(self):
plot = pg.QtGui.QGraphicsPathItem()
plot.setPen(self.pen)
plot.setBrush(self.brush)
return plot
def update(self, event=None):
lb, ub = self.parent.data_range.current_range
current_time = self.parent.data_range.current_time
starts = self._rising
ends = self._falling
if len(starts) == 0 and len(ends) == 1:
starts = [0]
elif len(starts) == 1 and len(ends) == 0:
ends = [current_time]
elif len(starts) > 0 and len(ends) > 0:
if starts[0] > ends[0]:
starts = np.r_[0, starts]
if starts[-1] > ends[-1]:
ends = np.r_[ends, current_time]
try:
epochs = np.c_[starts, ends]
except ValueError as e:
log.exception(e)
log.warning('Unable to update %r, starts shape %r, ends shape %r',
self, starts, ends)
return
m = ((epochs >= lb) & (epochs < ub)) | np.isnan(epochs)
epochs = epochs[m.any(axis=-1)]
path = pg.QtGui.QPainterPath()
y_start = self.rect_center - self.rect_height*0.5
for x_start, x_end in epochs:
x_width = x_end-x_start
r = pg.QtCore.QRectF(x_start, y_start, x_width, self.rect_height)
path.addRect(r)
deferred_call(self.plot.setPath, path)
class EventPlot(BaseTimeseriesPlot):
event = d_(Str())
def _observe_event(self, event):
if self.event is not None:
self.parent.data_range.observe('current_time', self.update)
def _default_name(self):
return self.event + '_timeseries'
def _append_data(self, bound, timestamp):
if bound == 'start':
self._rising.append(timestamp)
elif bound == 'end':
self._falling.append(timestamp)
self.update()
class TimeseriesPlot(BaseTimeseriesPlot):
source_name = d_(Str())
source = Typed(object)
def _default_name(self):
return self.source_name + '_timeseries'
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_event_source(self.source)
self.parent.data_range.observe('current_time', self.update)
self.source.add_callback(self._append_data)
def _append_data(self, data):
for (etype, value) in data:
if etype == 'rising':
self._rising.append(value)
elif etype == 'falling':
self._falling.append(value)
################################################################################
# Group plots
################################################################################
class FixedTextItem(pg.TextItem):
def updateTransform(self, force=False):
p = self.parentItem()
if p is None:
pt = pg.QtGui.QTransform()
else:
pt = p.sceneTransform()
if not force and pt == self._lastTransform:
return
t = pt.inverted()[0]
# reset translation
t.setMatrix(1, t.m12(), t.m13(), t.m21(), 1, t.m23(), 0, 0, t.m33())
# apply rotation
angle = -self.angle
if self.rotateAxis is not None:
d = pt.map(self.rotateAxis) - pt.map(Point(0, 0))
a = np.arctan2(d.y(), d.x()) * 180 / np.pi
angle += a
t.rotate(angle)
self.setTransform(t)
self._lastTransform = pt
self.updateTextPos()
class GroupMixin(ColorCycleMixin):
source = Typed(object)
pen_width = d_(Int(0))
antialias = d_(Bool(False))
plots = Dict()
labels = Dict()
_data_cache = Typed(object)
_data_count = Typed(object)
_data_updated = Typed(object)
_data_n_samples = Typed(object)
_pen_color_cycle = Typed(object)
_plot_colors = Typed(object)
_x = Typed(np.ndarray)
n_update = d_(Int(1))
#: List of attributes that define the tab groups
tab_grouping = d_(List())
#: List of attributes that define the plot groups
plot_grouping = d_(List())
#: List of existing tab keys
tab_keys = d_(List())
#: List of existing plot keys
plot_keys = d_(List())
#: Which tab is currently selected?
selected_tab = d_(Value())
#: Should we auto-select the tab based on the most recently acquired data?
auto_select = d_(Bool(False))
#: What was the most recent tab key seen?
last_seen_key = Value()
#: Function that takes the epoch metadata and returns a key that is used to
#: assign the epoch to a group. Return None to exclude the epoch from the
#: group criteria.
@d_func
def group_key(self, md):
plot_key = tuple(md[a] for a in self.plot_grouping)
tab_key = tuple(md[a] for a in self.tab_grouping)
return tab_key, plot_key
@d_func
def fmt_plot_label(self, key):
return None
def _observe_allow_auto_select(self, event):
if not self.allow_auto_select:
self.auto_select = False
def _default_selected_tab(self):
return ()
def _observe_selected_tab(self, event):
self.update(tab_changed=True)
@observe('last_seen_key', 'auto_select')
def _update_selected_tab(self, event):
if not self.auto_select:
return
if self.last_seen_key is None:
return
if self.last_seen_key[0] != self.selected_tab:
self.selected_tab = self.last_seen_key[0]
def _reset_plots(self):
# Clear any existing plots and reset color cycle
for plot in self.plots.items():
self.parent.viewbox.removeItem(plot)
for label in self.labels.items():
self.parent.viewbox_norm.removeItem(label)
self.plots = {}
self._data_cache = defaultdict(list)
self._data_count = defaultdict(int)
self._data_updated = defaultdict(int)
self._data_n_samples = defaultdict(int)
def get_plots(self):
return []
def _make_new_plot(self, key):
try:
pen_color = self.get_pen_color(key)
pen = pg.mkPen(pen_color, width=self.pen_width)
plot = pg.PlotCurveItem(pen=pen, antialias=self.antialias)
self.plots[key] = plot
deferred_call(self.parent.viewbox.addItem, plot)
label = self.fmt_plot_label(key)
if label is not None:
text = pg.TextItem(label, color=pen_color,
border=pg.mkPen(pen_color),
fill=pg.mkBrush('w'))
deferred_call(self.parent.viewbox_norm.addItem, text)
self.labels[key] = text
except KeyError as key_error:
key = key_error.args[0]
m = f'Cannot update plot since a field, {key}, ' \
'required by the plot is missing.'
raise ConfigurationException(m) from key_error
def get_plot(self, key):
if key not in self.plots:
self._make_new_plot(key)
return self.plots[key]
class EpochGroupMixin(GroupMixin):
duration = Float()
def _y(self, epoch):
return np.mean(epoch, axis=0) if len(epoch) \
else np.full_like(self._x, np.nan)
def _update_duration(self, event=None):
self.duration = self.source.duration
def _epochs_acquired(self, epochs):
for d in epochs:
key = self.group_key(d['info']['metadata'])
if key is not None:
signal = d['signal']
self._data_cache[key].append(signal)
self._data_count[key] += 1
# Track number of samples
n = max(self._data_n_samples[key], len(signal))
self._data_n_samples[key] = n
self.last_seen_key = key
# Does at least one epoch need to be updated?
self._check_selected_tab_count()
def _get_selected_tab_keys(self):
return [k for k in self._data_count if k[0] == self.selected_tab]
def _check_selected_tab_count(self):
for key in self._get_selected_tab_keys():
current_n = self._data_count[key]
last_n = self._data_updated[key]
if current_n >= (last_n + self.n_update):
n = max(self._data_n_samples.values())
self.duration = n / self.source.fs
self.update()
break
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._epochs_acquired)
self.source.observe('duration', self._update_duration)
self.source.observe('fs', self._cache_x)
self.observe('duration', self._cache_x)
self._reset_plots()
self._cache_x()
def _observe_selected_tab(self, event):
self.update(tab_changed=True)
def update(self, event=None, tab_changed=False):
todo = []
if self._x is None:
return
for pk in self.plot_keys:
plot = self.get_plot(pk)
key = (self.selected_tab, pk)
try:
last_n = self._data_updated[key]
current_n = self._data_count[key]
needs_update = current_n >= (last_n + self.n_update)
if tab_changed or needs_update:
data = self._data_cache[key]
self._data_updated[key] = len(data)
if data:
x = self._x
y = self._y(data)
else:
x = y = np.array([])
todo.append((plot.setData, x, y))
except KeyError:
if tab_changed:
x = y = np.array([])
todo.append((plot.setData, x, y))
def update():
for setter, x, y in todo:
setter(x, y)
deferred_call(update)
class GroupedEpochAveragePlot(EpochGroupMixin, BasePlot):
def _cache_x(self, event=None):
# Set up the new time axis
if self.source.fs and self.duration:
n_time = round(self.source.fs * self.duration)
self._x = np.arange(n_time)/self.source.fs
def _default_name(self):
return self.source_name + '_grouped_epoch_average_plot'
def _observe_source(self, event):
super()._observe_source(event)
if self.source is not None:
self.parent.data_range.add_source(self.source)
class GroupedEpochFFTPlot(EpochGroupMixin, BasePlot):
def _default_name(self):
return self.source_name + '_grouped_epoch_fft_plot'
def _cache_x(self, event=None):
# Cache the frequency points. Must be in units of log for PyQtGraph.
# TODO: This could be a utility function stored in the parent?
if self.source.fs and self.duration:
self._x = get_x_fft(self.source.fs, self.duration)
def _y(self, epoch):
y =
|
np.mean(epoch, axis=0)
|
numpy.mean
|
# BSD 3-Clause License
#
# Copyright (c) 2018 Rigetti & Co, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable=C
from itertools import product
import numpy as np
import pytest
from openfermion.third_party._higham import (heaviside, higham_polynomial,
higham_root, map_to_tensor,
map_to_matrix,
fixed_trace_positive_projection)
def test_heaviside():
assert np.isclose(heaviside(0), 1.0)
assert np.isclose(heaviside(0.5), 1.0)
assert np.isclose(heaviside(-0.5), 0.0)
assert np.isclose(heaviside(-0.5, -1), 1.0)
assert np.isclose(heaviside(-2, -1), 0)
def test_highham_polynomial():
eigs = np.arange(10)
assert np.isclose(higham_polynomial(eigs, eigs[-1]), 0.0)
assert np.isclose(higham_polynomial(eigs, 0), sum(eigs))
assert np.isclose(higham_polynomial(eigs, 5), sum(eigs[5:] - 5))
assert np.isclose(higham_polynomial(eigs, 8), sum(eigs[8:] - 8))
def test_higham_root():
dim = 20
np.random.seed(42)
mat = np.random.random((dim, dim))
mat = 0.5 * (mat + mat.T)
w, _ = np.linalg.eigh(mat)
target_trace = np.round(w[-1] - 1)
sigma = higham_root(w, target_trace)
assert np.isclose(higham_polynomial(w, shift=sigma), target_trace)
with pytest.raises(ValueError):
higham_root(w, target_trace=-1)
tw = higham_root(w, target_trace=0)
assert np.isclose(tw, w[-1])
def test_matrix_2_tensor():
dim = 10
np.random.seed(42)
mat = np.random.random((dim**2, dim**2))
mat = 0.5 * (mat + mat.T)
tensor = map_to_tensor(mat)
for p, q, r, s in product(range(dim), repeat=4):
assert np.isclose(tensor[p, q, r, s], mat[p * dim + q, r * dim + s])
test_mat = map_to_matrix(tensor)
assert np.allclose(test_mat, mat)
with pytest.raises(TypeError):
map_to_tensor(np.zeros((4, 4, 4, 4)))
with pytest.raises(TypeError):
map_to_matrix(np.zeros((4, 4)))
def test_reconstruction():
dim = 20
|
np.random.seed(42)
|
numpy.random.seed
|
"""
Created on Wed Jan 15 11:17:10 2020
@author: mesch
"""
from colorama import init, Fore, Back
init(autoreset=True) #to convert termcolor to wins color
import copy
from pyqum.instrument.benchtop import RSA5
from pyqum.instrument.benchtop import PSGA
from pyqum.instrument.modular import AWG
from pyqum.instrument.logger import status_code
from pyqum.instrument.analyzer import curve
from numpy import sin, cos, pi, array, lcm, float64, sum, dot
# print('lcm of 12 ad 10 is %s' %lcm(12,10))
# Initialize instruments:
# PSGA
saga = PSGA.Initiate()
PSGA.rfoutput(saga, action=['Set', 1])
PSGA.frequency(saga, action=['Set', "5.5" + "GHz"])
PSGA.power(saga, action=['Set', "12" + "dBm"])
# Rigol SA
rsa = RSA5.Initiate()
RSA5.frequency(rsa, action=['Set','5.525GHz'])
RSA5.fspan(rsa, action=['Set','150MHz'])
RSA5.rbw(rsa, action=['Set','1MHz'])
RSA5.vbw(rsa, action=['Set','100kHz'])
# AWG
awgsess = AWG.InitWithOptions()
AWG.Abort_Gen(awgsess)
AWG.ref_clock_source(awgsess, action=['Set',int(1)]) # External 10MHz clock-reference
AWG.predistortion_enabled(awgsess, action=['Set',True])
AWG.output_mode_adv(awgsess, action=['Set',int(2)]) # Sequence output mode
AWG.arb_sample_rate(awgsess, action=['Set',float(1250000000)]) # maximum sampling rate
AWG.active_marker(awgsess, action=['Set','1']) # master
AWG.marker_delay(awgsess, action=['Set',float(0)])
AWG.marker_pulse_width(awgsess, action=['Set',float(1e-7)])
AWG.marker_source(awgsess, action=['Set',int(7)])
samplingrate = AWG.arb_sample_rate(awgsess)[1]
dt = 1e9/samplingrate # in ns
# PRESET Output:
for ch in range(2):
channel = str(ch + 1)
AWG.output_config(awgsess, RepCap=channel, action=["Set", 0]) # Single-ended
AWG.output_filter_bandwidth(awgsess, RepCap=channel, action=["Set", 0])
AWG.arb_gain(awgsess, RepCap=channel, action=["Set", 0.5])
AWG.output_impedance(awgsess, RepCap=channel, action=["Set", 50])
# output settings:
for ch in range(2):
channel = str(ch + 1)
AWG.output_enabled(awgsess, RepCap=channel, action=["Set", int(1)]) # ON
AWG.output_filter_enabled(awgsess, RepCap=channel, action=["Set", True])
AWG.output_config(awgsess, RepCap=channel, action=["Set", int(2)]) # Amplified 1:2
AWG.output_filter_bandwidth(awgsess, RepCap=channel, action=["Set", 0])
AWG.arb_gain(awgsess, RepCap=channel, action=["Set", 0.5])
AWG.output_impedance(awgsess, RepCap=channel, action=["Set", 50])
def AWG_Sinewave(ifreq,IQparams):
'''
ifreq: IF frequency in MHz
'''
AWG.Clear_ArbMemory(awgsess)
WAVE = []
# print("ampratio: %s" %type(ampratio))
Ioffset, Qoffset, ampratio, Iphase, Qphase = IQparams
if (ampratio > -1.0) and (ampratio < 1.0):
Iamp = 1
Qamp = Iamp * ampratio
else:
Qamp = 1
Iamp = Qamp/ampratio
ifvoltag = [min(abs(Qamp),1), min(abs(Iamp),1)] # contain amplitude within 1V
iffunction = ['sin', 'cos']
iffreq = [ifreq, ifreq]
ifoffset = [Qoffset, Ioffset]
# Iphase = 0
# relphase = min(abs(relphase), 90)
# Qphase = Iphase + relphase
ifphase = [Qphase, Iphase]
# construct waveform:
for ch in range(2):
channel = str(ch + 1)
Nperiod = lcm(round(1000/iffreq[ch]/dt*100),800)//100
Nperiod *= 8
# print("Waveform contains %s points per sequence" %Nperiod)
wavefom = [ifvoltag[ch] * eval(iffunction[ch] + '(x*%s*%s/1000*2*pi + %s/180*pi)' %(dt,iffreq[ch],ifphase[ch])) + ifoffset[ch] for x in range(Nperiod)]
stat, wave = AWG.CreateArbWaveform(awgsess, wavefom)
# print('Waveform channel %s: %s <%s>' %(channel, wave, status_code(stat)))
WAVE.append(wave)
# Building Sequences:
for ch in range(2):
channel = str(ch + 1)
status, seqhandl = AWG.CreateArbSequence(awgsess, [WAVE[ch]], [1]) # loop# canbe >1 if longer sequence is needed in the future!
# print('Sequence channel %s: %s <%s>' %(channel, seqhandl, status_code(status)))
# Channel Assignment:
stat = AWG.arb_sequence_handle(awgsess, RepCap=channel, action=["Set", seqhandl])
# print('Sequence channel %s embeded: %s <%s>' %(channel, stat[1], status_code(stat[0])))
# Trigger Settings:
for ch in range(2):
channel = str(ch + 1)
AWG.operation_mode(awgsess, RepCap=channel, action=["Set", 0])
AWG.trigger_source_adv(awgsess, RepCap=channel, action=["Set", 0])
AWG.Init_Gen(awgsess)
AWG.Send_Pulse(awgsess, 1)
return
class IQ_Cal:
def __init__(self, suppression='LO', IQparams=array([0.018,-0.022,-1/0.707,-7.1,0.]), STEP=array([-0.5,-0.5,0.5,12,12])):
self.IQparams = IQparams
self.STEP = STEP
self.suppression = suppression
if self.suppression == 'LO':
self.var = copy.copy(self.IQparams[:2])
self.step = self.STEP[:2]
elif self.suppression == 'MR':
self.var = copy.copy(self.IQparams[2:])
self.step = self.STEP[2:]
def nelder_mead(self, no_improve_thr=10e-6, no_improv_break=10, max_iter=0,
alpha=1., gamma=2., rho=-0.5, sigma=0.5, time=0):
'''
Pure Python/Numpy implementation of the Nelder-Mead algorithm.
Reference: https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method
'''
'''
@param f (function): function to optimize, must return a scalar score
and operate over a numpy array of the same dimensions as x_start
@param x_start (numpy array): initial position
@param step (float): look-around radius in initial step
@no_improv_thr, no_improv_break (float, int): break after no_improv_break iterations with
an improvement lower than no_improv_thr
@max_iter (int): always break after this number of iterations.
Set it to 0 to loop indefinitely.
@alpha, gamma, rho, sigma (floats): parameters of the algorithm
(see Wikipedia page for reference)
return: tuple (best parameter array, best score)
'''
# def params(IQparams, index):
# if index == 0:
# params = IQparams[:2] # IQ offsets
# else:
# params = IQparams[2:] # IQ imbalance, phase skew
# return params
index = time%2
dim = len(self.var)
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
prev_best = power
no_improv = 0
res = [[self.var, prev_best]]
# while True:
# print("LOPower: %s" %power)
# if bool(input('hello')): break
for i in range(dim):
x = copy.copy(self.var)
x[i] = x[i] + self.step[i]
print('applying %s' %x)
"tell AWG to apply DC offset(x) on I & Q"
# params(IQparams, index) = x
if self.suppression == 'LO': self.IQparams[:2] = x
elif self.suppression == 'MR': self.IQparams[2:] = x
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
score = power
res.append([x, score])
# simplex iter
iters = 0
while 1:
# order
res.sort(key=lambda x: x[1])
if self.suppression == 'LO': self.IQparams[:2] = res[0][0]
elif self.suppression == 'MR': self.IQparams[2:] = res[0][0]
print(Fore.YELLOW + "\rProgress time#%s: %s" %(time, self.IQparams), end='\r', flush=True)
best = res[0][1]
# break after max_iter
if max_iter and iters >= max_iter:
return res[0]
iters += 1
# break after no_improv_break iterations with no improvement
# print('...best so far:', best)
# AWG_Sinewave(25, self.IQparams)
# if float((RSA5.fpower(rsa, str(5.5)+'GHz')).split('dBm')[0]) < -65. and float((RSA5.fpower(rsa, str(5.475)+'GHz')).split('dBm')[0]) < -65.:
# return array([self.IQparams, best, 0.])
if best < prev_best - no_improve_thr or best == prev_best:
no_improv = 0
prev_best = best
else:
no_improv += 1
if no_improv >= no_improv_break:
AWG_Sinewave(25, self.IQparams)
print("Rest at Optimized IQ Settings: %s" %self.IQparams)
return array([self.IQparams, best]) # Optimized parameters
# centroid
x0 = [0.] * dim
for tup in res[:-1]:
for i, c in enumerate(tup[0]):
x0[i] += c / (len(res)-1)
# reflection
xr = x0 + alpha*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xr
elif self.suppression == 'MR': self.IQparams[2:] = xr
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
rscore = power
if res[0][1] <= rscore < res[-2][1]:
del res[-1]
res.append([xr, rscore])
continue
# expansion
if rscore < res[0][1]:
xe = x0 + gamma*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xe
elif self.suppression == 'MR': self.IQparams[2:] = xe
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
escore = power
if escore < rscore:
del res[-1]
res.append([xe, escore])
continue
else:
del res[-1]
res.append([xr, rscore])
continue
# contraction
xc = x0 + rho*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xc
elif self.suppression == 'MR': self.IQparams[2:] = xc
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
cscore = power
if cscore < res[-1][1]:
del res[-1]
res.append([xc, cscore])
continue
# reduction
x1 = res[0][0]
nres = []
for tup in res:
redx = x1 + sigma*(tup[0] - x1)
if self.suppression == 'LO': self.IQparams[:2] = redx
elif self.suppression == 'MR': self.IQparams[2:] = redx
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
score = power
nres.append([redx, score])
res = nres
if __name__ == "__main__":
LO_0 = float((RSA5.fpower(rsa, str(5.5)+'GHz')).split('dBm')[0])
Mirror_0 = float((RSA5.fpower(rsa, str(5.475)+'GHz')).split('dBm')[0])
Initial = [0.018, -0.022, -1/0.707, -7.1, 0.]
time = 0
OPT = IQ_Cal()
OPT.IQparams = array(Initial,dtype=float64)
result = OPT.nelder_mead(time = time)
prev = result[0]
no_improv, no_improv_thr, no_improv_break = 0, 1e-5, 10
LO, Mirror, T = [], [], []
while True:
time += 1
if time%2: OPT = IQ_Cal('MR',result[0])
else: OPT = IQ_Cal('LO',result[0])
result = OPT.nelder_mead(time = time)
# if len(result) == 3:
# print("Optimized IQ parameters:\n %s" %result)
# break
LO.append(float((RSA5.fpower(rsa, str(5.5)+'GHz')).split('dBm')[0]) - LO_0)
Mirror.append(float((RSA5.fpower(rsa, str(5.475)+'GHz')).split('dBm')[0]) - Mirror_0)
print(Back.BLUE + Fore.WHITE + "Mirror has been suppressed for %s from %s" %(Mirror[-1],Mirror_0))
T.append(time)
ssq =
|
sum((result[0] - prev)**2)
|
numpy.sum
|
import numpy as np
import matplotlib.pyplot as plt
# Make some fake data.
a = b = np.arange(0, 3, .02)
c = np.exp(a)
d = c[::-1]
x = np.array(['raw', 'last_fired', 'changepoint'])
activity_count = 10
# the order is 1min,
hmm_freq_1min = np.array([0.246, 0.279, 0.157])
hmm_freq_30sec = np.array([0.298, 0.161, 0.251])
hmm_freq_6sec =
|
np.array([0.318, 0.176, 0.394])
|
numpy.array
|
"""The ``textures`` module includes functions to generate textures.
"""
import numpy as np
from penkit.textures.util import fit_texture
def make_lines_texture(num_lines=10, resolution=50):
"""Makes a texture consisting of a given number of horizontal lines.
Args:
num_lines (int): the number of lines to draw
resolution (int): the number of midpoints on each line
Returns:
A texture.
"""
x, y = np.meshgrid(
np.hstack([np.linspace(0, 1, resolution), np.nan]),
np.linspace(0, 1, num_lines),
)
y[np.isnan(x)] = np.nan
return x.flatten(), y.flatten()
def make_grid_texture(num_h_lines=10, num_v_lines=10, resolution=50):
"""Makes a texture consisting of a grid of vertical and horizontal lines.
Args:
num_h_lines (int): the number of horizontal lines to draw
num_v_lines (int): the number of vertical lines to draw
resolution (int): the number of midpoints to draw on each line
Returns:
A texture.
"""
x_h, y_h = make_lines_texture(num_h_lines, resolution)
y_v, x_v = make_lines_texture(num_v_lines, resolution)
return np.concatenate([x_h, x_v]), np.concatenate([y_h, y_v])
def make_spiral_texture(spirals=6.0, ccw=False, offset=0.0, resolution=1000):
"""Makes a texture consisting of a spiral from the origin.
Args:
spirals (float): the number of rotations to make
ccw (bool): make spirals counter-clockwise (default is clockwise)
offset (float): if non-zero, spirals start offset by this amount
resolution (int): number of midpoints along the spiral
Returns:
A texture.
"""
dist = np.sqrt(np.linspace(0., 1., resolution))
if ccw:
direction = 1.
else:
direction = -1.
angle = dist * spirals * np.pi * 2. * direction
spiral_texture = (
(np.cos(angle) * dist / 2.) + 0.5,
(np.sin(angle) * dist / 2.) + 0.5
)
return spiral_texture
def make_hex_texture(grid_size = 2, resolution=1):
"""Makes a texture consisting on a grid of hexagons.
Args:
grid_size (int): the number of hexagons along each dimension of the grid
resolution (int): the number of midpoints along the line of each hexagon
Returns:
A texture.
"""
grid_x, grid_y = np.meshgrid(
np.arange(grid_size),
np.arange(grid_size)
)
ROOT_3_OVER_2 = np.sqrt(3) / 2
ONE_HALF = 0.5
grid_x = (grid_x * np.sqrt(3) + (grid_y % 2) * ROOT_3_OVER_2).flatten()
grid_y = grid_y.flatten() * 1.5
grid_points = grid_x.shape[0]
x_offsets = np.interp(np.arange(4 * resolution),
np.arange(4) * resolution, [
ROOT_3_OVER_2,
0.,
-ROOT_3_OVER_2,
-ROOT_3_OVER_2,
])
y_offsets = np.interp(np.arange(4 * resolution),
np.arange(4) * resolution, [
-ONE_HALF,
-1.,
-ONE_HALF,
ONE_HALF
])
tmx = 4 * resolution
x_t =
|
np.tile(grid_x, (tmx, 1))
|
numpy.tile
|
from enum import Enum
import numpy as np
from cvxopt import matrix, solvers
from processing import vector_of_quants
import math
class KnowledgePatternManager:
@staticmethod
def checkConsistency(knowledgePattern):
return KnowledgePatternManager.__getConsistencyChecker(knowledgePattern.type) \
.isConsistent(knowledgePattern)
@staticmethod
def __getConsistencyChecker(type):
if type == KnowledgePatternType.QUANTS:
return QuantConsistencyChecker()
elif type == KnowledgePatternType.DISJUNCTS:
return DisjunctConsistencyChecker()
elif type == KnowledgePatternType.CONJUNCTS:
return ConjunctConsistencyChecker()
else:
raise TypeError("Correct type of knowledge pattern")
@staticmethod
def getProbabilityFormula(knowledgePattern, formulaPattern):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
vector = FormulaManager.getQuantsVector(formulaPattern, int(math.log(size, 2)))
return LinearProgrammingProblemSolver.findOptimalFormulaValues(matrix, intervals, size, vector)
@staticmethod
def __getEvidenceCorrector(type):
if type == EvidencePatternType.DETERMINISTIC:
return DeterministicEvidenceCorrector()
elif type == EvidencePatternType.STOCHASTIC:
return StochasticEvidenceCorrector()
elif type == EvidencePatternType.INACCURATE:
return InaccurateEvidenceCorrector()
@staticmethod
def correctEvidenceData(knowledgePattern, evidencePattern):
return KnowledgePatternManager.__getEvidenceCorrector(evidencePattern.type).getCorrectData(knowledgePattern, evidencePattern)
class FormulaManager:
@staticmethod
def getQuantsVector(formulaPattern, size):
return vector_of_quants(formulaPattern.string, size)
@staticmethod
def getFormulaForOptimise(knowledgePattern, evidencePattern):
size = knowledgePattern.size
size_evidence = 2**(evidencePattern.size)
result_formula = np.zeros(size)
vector = EvidenceManager.getSubIdealProbability(evidencePattern)
I = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
for i in range(0, 2**evidencePattern.size):
array = [[ideal[i]], [ideal[size_evidence - 1 - i]]]
formula = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))[0]
formula = np.dot(formula, np.dot(I, vector)[i])
result_formula += formula
return result_formula
@staticmethod
def getConjunctstoQuantsVector(vector):
return np.dot(MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(len(vector), 2))), vector)
@staticmethod
def getFormulaForOptimiseIn(knowledgePattern, evidencePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
matrix_for_opt = FormulaManager.getSubIdealtoIdealMatrix(evidencePattern, knowledgePattern)
size_evidence = 2 ** (evidencePattern.size)
result_formula_min = np.zeros(2 **evidencePattern.size)
result_formula_max = np.zeros(2 **evidencePattern.size)
I = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
for i in range(0, 2**evidencePattern.size):
array = [[ideal[i]], [ideal[size_evidence - 1 - i]]]
formula = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))[0]
prob = LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValues(matrix, intervals, size, formula).array
result_formula_min += I[i]*prob[0]
result_formula_max += I[i]*prob[1]
result = np.vstack([result_formula_min, result_formula_max])
return result
@staticmethod
def getSubIdealtoIdealMatrix(evidencePattern, knowledgePattern):
I = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
Matrix = np.zeros((2 ** evidencePattern.size, knowledgePattern.size), dtype = np.double)
for i in range(0, 2 ** evidencePattern.size):
for j in range(0, 2 **evidencePattern.size):
Matrix[i][int(ideal[j])] = I[i][j]
return Matrix
class EvidenceManager:
@staticmethod
def getConjunctsVector(evidencePattern):
arr_conj = []
num_conj = 0
p_arr = evidencePattern.p_array
for i in range(len(p_arr)):
if p_arr[i] == 0: continue #?
num_conj += pow(2, p_arr[i] - 1)
arr_conj.append(num_conj)
num_conj = 0
m_arr = evidencePattern.m_array
for i in range(len(m_arr)):
num_conj += pow(2, p_arr[i] - 1)
arr_conj.append(num_conj)
return np.array(arr_conj)
@staticmethod
def getProbabilityOfDeterministicEvidence(knowledgePattern, mas):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
vector = MatrixProducer.getTMatrix(mas, int(math.log(size, 2)))[0].tolist()
return LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValues(matrix, intervals, size, vector)
@staticmethod
def getProbabilityofStochasticEvidence(knowledgePattern, evidencePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
vector = FormulaManager.getFormulaForOptimise(knowledgePattern, evidencePattern)
return LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValues(matrix, intervals, size, vector)
@staticmethod
def getProbabilityofInaccurateEvidence(knowledgePattern, evidencePattern):
size = evidencePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
vectors = FormulaManager.getFormulaForOptimiseIn(knowledgePattern, evidencePattern)
intervals = EvidenceManager.getSubIdealIntervalProbability(evidencePattern)
return LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValuesIn(matrix, intervals, size, vectors)
@staticmethod
def getSubIdealProbability(evidencePattern):
vector = np.ones(2 ** evidencePattern.size)
array = evidencePattern.arr
for i in range(0, 2**evidencePattern.size-1):
vector[i+1] = array[i][1]
return vector
@staticmethod
def getSubIdealIntervalProbability(evidencePattern):
vector_min = np.ones(2 ** evidencePattern.size)
vector_max = np.ones(2 ** evidencePattern.size)
array = evidencePattern.arr
for i in range(0, 2**evidencePattern.size-1):
vector_min[i+1] = array[i][1]
vector_max[i+1] = array[i][2]
vector = []
vector.append(vector_min)
vector.append(vector_max)
return vector
@staticmethod
def getSubIdeal(evidencePattern):
vector = np.zeros(2 ** evidencePattern.size)
array = evidencePattern.arr
for i in range(0, 2**evidencePattern.size-1):
vector[i+1] = array[i][0]
return vector
class EvidencePatternType(Enum):
DETERMINISTIC = 'deterministic',
STOCHASTIC = 'stochastic',
INACCURATE = 'inaccurate'
class KnowledgePatternType(Enum):
QUANTS = 'quants',
DISJUNCTS = 'disjuncts',
CONJUNCTS = 'conjuncts'
class ConsistencyChecker:
@staticmethod
def isConsistent(knowledgePattern):
raise NotImplementedError("It's a method of abstract class, use appropriate implementation")
class EvidenceCorrector:
@staticmethod
def getCorrextData(knowledgePattern, evidencePattern):
raise NotImplementedError("It's a method of abstract class, use appropriate implementation")
class DeterministicEvidenceCorrector(EvidenceCorrector):
@staticmethod
def getCorrectData(knowledgePattern, evidencePattern):
# разобраться с 1 и нулем
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
return LinearProgrammingProblemSolver.findOptimalEvidenceValues(matrix, intervals, size, MatrixProducer.getEvidencevector(evidencePattern.arr, int(math.log(size, 2))), intervals, MatrixProducer.getTMatrix(evidencePattern.arr, int(math.log(size, 2))))
class StochasticEvidenceCorrector(EvidenceCorrector):
@staticmethod
def getCorrectData(knowledgePattern, evidencePattern):
size = knowledgePattern.size
size_evidence = 2 ** (evidencePattern.size)
result = [[0, 0] for i in range(knowledgePattern.size)]
vector = EvidenceManager.getSubIdealProbability(evidencePattern) #p_ca
I = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(knowledgePattern.size, 2)))
I_1 = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
vector_quants = np.dot(I_1, vector)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
intervals = np.array(knowledgePattern.array, dtype=np.double)
for i in range(0, 2 ** evidencePattern.size):
array = [[ideal[i]], [ideal[size_evidence - 1 - i]]]
divider = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))[0]
numerator = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))
ideal_ = LinearProgrammingProblemSolver.findOptimalStochasticEvidenceValues(I, intervals, size, numerator, divider)
if len(ideal_) == 0:
return EvidenceCorrectorResult(False, [])
for j in range(size):
result[j][0] += round(vector_quants[i] * ideal_[j][0], 3)
result[j][1] += round(vector_quants[i] * ideal_[j][1], 3)
if result[0][0] == 0: return EvidenceCorrectorResult(False, [])
return EvidenceCorrectorResult(True, result)
class InaccurateEvidenceCorrector(EvidenceCorrector):
@staticmethod
def getCorrectData(knowledgePattern, evidencePattern):
size = knowledgePattern.size
size_evidence = 2 ** (evidencePattern.size)
result_formula_min = np.zeros((size, size_evidence))
result_formula_max = np.zeros((size, size_evidence))
I = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(knowledgePattern.size, 2)))
I_1 = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
#vector_quants = np.dot(I_1, vector)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
intervals = np.array(knowledgePattern.array, dtype=np.double)
for i in range(0, 2 ** evidencePattern.size):
array = [[ideal[i]], [ideal[size_evidence - 1 - i]]]
divider = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))[0]
numerator = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))
ideal_ = LinearProgrammingProblemSolver.findOptimalStochasticEvidenceValues(I, intervals, size, numerator, divider)
if len(ideal_) == 0:
return EvidenceCorrectorResult(False, [])
for j in range(size):
result_formula_min[j] += I_1[i] * ideal_[j][0]
result_formula_max[j] += I_1[i] * ideal_[j][1]
return LinearProgrammingProblemSolver.findOptimalInaccurateEvidenceValues(I_1, EvidenceManager.getSubIdealIntervalProbability(evidencePattern),size, size_evidence, result_formula_min, result_formula_max)
class QuantConsistencyChecker(ConsistencyChecker):
@staticmethod
def isConsistent(knowledgePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getIdentityMatrix(size)
intervals = np.array(knowledgePattern.array, dtype=np.double)
result = LinearProgrammingProblemSolver.findOptimalValues(matrix, intervals, size)
if result.consistent:
result = LinearProgrammingProblemSolver.findNormalizedOptimalValues(np.array(result.array, dtype=np.double),
size)
return result
class ConjunctConsistencyChecker(ConsistencyChecker):
@staticmethod
def isConsistent(knowledgePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
return LinearProgrammingProblemSolver.findOptimalValues(matrix, intervals, size)
class DisjunctConsistencyChecker(ConsistencyChecker):
@staticmethod
def isConsistent(knowledgePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getDisjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
return LinearProgrammingProblemSolver.findOptimalValues(matrix, intervals, size)
class MatrixProducer:
@staticmethod
def getDisjunctsToQuantsMatrix(n):
return np.linalg.inv(MatrixProducer.getQuantsToDisjunctsMatrix(n))
@staticmethod
def getQuantsToDisjunctsMatrix(n):
if n == 0:
return np.array([1], dtype=np.double)
elif n == 1:
return np.array([[1, 1], [0, 1]], dtype=np.double)
else:
k = MatrixProducer.getQuantsToDisjunctsMatrix(n - 1)
i = np.ones((2 ** (n - 1), 2 ** (n - 1)), dtype=np.double)
k_o = k.copy()
k_o[0] = [0] * 2 ** (n - 1)
return np.block([[k, k], [k_o, i]])
@staticmethod
def getConjunctsToQuantsMatrix(n):
if n == 0:
return np.array([1], dtype=np.double)
elif n == 1:
return np.array([[1, -1], [0, 1]], dtype=np.double)
else:
i = MatrixProducer.getConjunctsToQuantsMatrix(n - 1)
o = np.zeros((2 ** (n - 1), 2 ** (n - 1)), dtype=np.double)
return np.block([[i, (-1) * i], [o, i]])
@staticmethod
def getIdentityMatrix(size):
return np.eye(size, dtype=np.double)
@staticmethod
def getTMatrix(mas, size):
matrix = np.array([1])
I_1 = MatrixProducer.getConjunctsToQuantsMatrix(1)
J_1 = np.linalg.inv(I_1)
H_p = np.array([[0, 0], [0, 1]])
H_m = np.array([[1, 0], [0, 0]])
H = MatrixProducer.getIdentityMatrix(2)
for i in range(size, 0, -1):
if i == 0:
matrix = np.kron(matrix, np.dot(np.dot(J_1, H_p), I_1))
continue
if i in mas[0]:
matrix = np.kron(matrix, np.dot(np.dot(J_1, H_p), I_1))
elif i in mas[1]:
matrix = np.kron(matrix, np.dot(np.dot(J_1, H_m), I_1))
else:
matrix = np.kron(matrix, np.dot(np.dot(J_1, H), I_1))
return matrix
@staticmethod
def getEvidencevector(mas, size):
return MatrixProducer.getTMatrix(mas, size)[0]
class LinearProgrammingProblemSolver:
@staticmethod
def findOptimalFormulaValues(matrixs, array, size, vector):
a = np.vstack(((-1) * matrixs, (-1) * np.eye(size, dtype=np.double), np.eye(size, dtype=np.double)))
a = matrix(a)
b = np.hstack((np.zeros(size, dtype=np.double), (-1) * array[:, 0], array[:, 1]))
b = matrix(b)
c = np.dot(np.array(MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(len(vector), 2))).transpose()), vector)
c = matrix(c)
return LinearProgrammingProblemSolver.optimizeForFormula(a, b, c)
@staticmethod
def findOptimalConjunctsFormulaValues(matrixs, array, size, vector):
a = np.vstack(((-1) * matrixs, (-1) * np.eye(size, dtype=np.double), np.eye(size, dtype=np.double)))
a = matrix(a)
b = np.hstack((np.zeros(size, dtype=np.double), (-1) * array[:, 0], array[:, 1]))
b = matrix(b)
c = np.array(vector)
c = matrix(c)
return LinearProgrammingProblemSolver.optimizeForFormula(a, b, c)
@staticmethod
def findOptimalValues(matrixs, array, size):
a = np.vstack(((-1) * matrixs, (-1) * np.eye(size, dtype=np.double), np.eye(size, dtype=np.double)))
a = matrix(a)
b = np.hstack((np.zeros(size, dtype=np.double), (-1) * array[:, 0], array[:, 1]))
b = matrix(b)
c = np.array(np.zeros(size, dtype=np.double))
c = matrix(c)
return LinearProgrammingProblemSolver.optimizeForMatrices(a, b, c, size, array)
@staticmethod
def findNormalizedOptimalValues(array, size):
a = np.vstack(((-1) * np.ones(size, dtype=np.double), np.ones(size, dtype=np.double),
(-1) * np.eye(size, dtype=np.double), np.eye(size, dtype=np.double)))
a = matrix(a)
b = np.hstack(
((-1) * np.ones(1, dtype=np.double), np.ones(1, dtype=np.double), (-1) * array[:, 0], array[:, 1]))
b = matrix(b)
c = np.array(np.zeros(size, dtype=np.double))
c = matrix(c)
return LinearProgrammingProblemSolver.optimizeForMatrices(a, b, c, size, array)
@staticmethod
def optimizeForMatrices(a, b, c, size, intervals):
solvers.options['show_progress'] = False
_intervals = intervals.copy()
for i in range(size):
c[i] = 1
sol = solvers.lp(c, a, b)
if sol['status'] != 'optimal':
return ConsistencyResult(False, [])
_intervals[i][0] = round(sol['x'][i], 3)
c[i] = -1
sol = solvers.lp(c, a, b)
if sol['status'] != 'optimal':
return ConsistencyResult(False, [])
_intervals[i][1] = round(sol['x'][i], 3)
c[i] = 0
return ConsistencyResult(True, _intervals.tolist())
@staticmethod
def optimizeForFormula(a, b, c):
answer = np.zeros(2)
solvers.options['show_progress'] = False
sol = solvers.lp(c, a, b)
if sol['status'] != 'optimal':
return ProbabilityFormulaResult(False, [])
ans = 0
for i in range(len(c)):
ans += sol['x'][i]*c[i]
answer[0] = round(ans, 3)
c = -1 * c
solvers.options['show_progress'] = False
sol = solvers.lp(c, a, b)
ans = 0
if sol['status'] != 'optimal':
return ProbabilityFormulaResult(False, [])
for i in range(len(c)):
ans += sol['x'][i]*c[i]
answer[1] = round(-ans, 3)
return ProbabilityFormulaResult(True, answer.tolist())
@staticmethod
def findOptimalEvidenceValues(matrixs, array, size, vector, intervals, T):
a = np.vstack(((-1) * matrixs, (-1) * np.eye(size, dtype=np.double), np.eye(size, dtype=np.double), vector, (-1) * vector, np.zeros(size, dtype=np.double)))
t = np.hstack((np.zeros(size, dtype=np.double), array[:, 0], (-1) * array[:, 1], np.zeros(2, dtype=np.double), np.array([-1])))
a = np.column_stack((a, t))
a = matrix(a)
b = np.hstack((np.zeros(3 * size, dtype=np.double), np.array([1]), np.array([-1]), np.array([0])))
b = matrix(b)
return LinearProgrammingProblemSolver.optimizeForEvidenceMatrices(a, b, size, intervals, T)
@staticmethod
def findOptimalInaccurateEvidenceValues(matrixs, intervals, size, size_evidence, min_vect, max_vect):
a = np.vstack(((-1) * matrixs, (-1) * np.eye(size_evidence, dtype=np.double), np.eye(size_evidence, dtype=np.double)))
a = matrix(a)
b = np.hstack((np.zeros(size_evidence, dtype=np.double), (-1) *intervals[0], intervals[1]))
b = matrix(b)
return LinearProgrammingProblemSolver.optimizeForInaccurateEvidenceMatrices(a, b, np.array(min_vect), np.array(max_vect), size, size_evidence)
@staticmethod
def optimizeForInaccurateEvidenceMatrices(a, b, c1, c2, size, size_evidence):
solvers.options['show_progress'] = False
_intervals = np.zeros((size, 2))
for i in range(size):
c = np.double(c1[i])
c = matrix(c)
sol = solvers.lp(c, a, b)
if sol['status'] != 'optimal':
return EvidenceCorrectorResult(False, [])
for j in range(size_evidence):
_intervals[i][0] += np.round(sol['x'][j] * c[j], 3)
c = (-1)*np.double(c2[i])
c = matrix(c)
sol = solvers.lp(c, a, b)
if sol['status'] != 'optimal':
return []
_intervals[i][1] = np.double(0)
for j in range(size_evidence):
_intervals[i][1] += np.round(sol['x'][j], 3) *
|
np.round(c[j], 3)
|
numpy.round
|
import numpy as np
import matplotlib.pyplot as pl
import scope
from tqdm import tqdm
import itertools
from everest.pool import Pool
from everest.missions.k2 import CDPP
from everest.config import EVEREST_SRC
import os
import os.path
# astroML format for consistent plotting style
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=10, usetex=True)
# Number of targets to simulate
niter = 5
# Magnitude and motion arrays
mags = np.arange(10., 16., .5)
m_mags =
|
np.arange(0., 21., 1)
|
numpy.arange
|
# Unit test preprocess_last_window
# ==============================================================================
import pytest
import numpy as np
import pandas as pd
from skforecast.utils import preprocess_last_window
def test_output_preprocess_last_window_when_last_window_index_is_DatetimeIndex_and_has_frequecy():
'''
Test values returned by when last_window is a pandas Series with DatetimeIndex
and freq is not None.
'''
last_window = pd.Series(
data = np.arange(3),
index = pd.date_range("1990-01-01", periods=3, freq='D')
)
results = preprocess_last_window(last_window)
expected = (
|
np.arange(3)
|
numpy.arange
|
import cv2
import numpy as np
from scipy.ndimage import filters, measurements
from scipy.ndimage.morphology import (
binary_dilation,
binary_fill_holes,
distance_transform_cdt,
distance_transform_edt,
)
from skimage.morphology import remove_small_objects, watershed
####
def proc_np_hv(pred, marker_mode=2, energy_mode=2, rgb=None):
"""
Process Nuclei Prediction with XY Coordinate Map
Args:
pred: prediction output, assuming
channel 0 contain probability map of nuclei
channel 1 containing the regressed X-map
channel 2 containing the regressed Y-map
"""
assert marker_mode == 2 or marker_mode == 1, "Only support 1 or 2"
assert energy_mode == 2 or energy_mode == 1, "Only support 1 or 2"
blb_raw = pred[..., 0]
h_dir_raw = pred[..., 1]
v_dir_raw = pred[..., 2]
##### Processing
blb = np.copy(blb_raw)
blb[blb >= 0.5] = 1
blb[blb < 0.5] = 0
blb = measurements.label(blb)[0]
blb = remove_small_objects(blb, min_size=10)
blb[blb > 0] = 1 # back ground is 0 already
#####
if energy_mode == 2 or marker_mode == 2:
h_dir = cv2.normalize(
h_dir_raw,
None,
alpha=0,
beta=1,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F,
)
v_dir = cv2.normalize(
v_dir_raw,
None,
alpha=0,
beta=1,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F,
)
sobelh = cv2.Sobel(h_dir, cv2.CV_64F, 1, 0, ksize=21)
sobelv = cv2.Sobel(v_dir, cv2.CV_64F, 0, 1, ksize=21)
sobelh = 1 - (
cv2.normalize(
sobelh,
None,
alpha=0,
beta=1,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F,
)
)
sobelv = 1 - (
cv2.normalize(
sobelv,
None,
alpha=0,
beta=1,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F,
)
)
overall = np.maximum(sobelh, sobelv)
overall = overall - (1 - blb)
overall[overall < 0] = 0
if energy_mode == 2:
dist = (1.0 - overall) * blb
## nuclei values form mountains so inverse to get basins
dist = -cv2.GaussianBlur(dist, (3, 3), 0)
if marker_mode == 2:
overall[overall >= 0.4] = 1
overall[overall < 0.4] = 0
marker = blb - overall
marker[marker < 0] = 0
marker = binary_fill_holes(marker).astype("uint8")
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
marker = cv2.morphologyEx(marker, cv2.MORPH_OPEN, kernel)
marker = measurements.label(marker)[0]
marker = remove_small_objects(marker, min_size=10)
if energy_mode == 1:
dist = h_dir_raw * h_dir_raw + v_dir_raw * v_dir_raw
dist[blb == 0] = np.amax(dist)
# nuclei values are already basins
dist = filters.maximum_filter(dist, 7)
dist = cv2.GaussianBlur(dist, (3, 3), 0)
if marker_mode == 1:
h_marker = np.copy(h_dir_raw)
v_marker = np.copy(v_dir_raw)
h_marker = np.logical_and(h_marker < 0.075, h_marker > -0.075)
v_marker = np.logical_and(v_marker < 0.075, v_marker > -0.075)
marker =
|
np.logical_and(h_marker > 0, v_marker > 0)
|
numpy.logical_and
|
"""
Copyright 2018-2019 CS Systèmes d'Information
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import random
import unittest
import mock
import numpy as np
from numpy.random import normal, exponential
from scipy.special import binom
from ikats.algo.pattern.collision import LOGGER as COLL_LOGGER
from ikats.algo.pattern.recognition import LOGGER as RECOG_LOGGER
from ikats.algo.sax.sliding_sax import LOGGER as SAX_LOGGER
from ikats.algo.pattern.random_proj import LOGGER, random_projections, regex_from_pattern_results, \
EMPTY_REGEX_MESSAGE, ConfigSax, ConfigCollision, ConfigRecognition
LOGGER = logging.getLogger(__name__)
# Add logs to the unittest stdout
for the_logger in [SAX_LOGGER, RECOG_LOGGER, COLL_LOGGER, LOGGER, LOGGER]:
the_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(funcName)s:%(message)s')
# Create another handler that will redirect log entries to STDOUT
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
the_logger.addHandler(stream_handler)
SAX_INFO = ConfigSax(paa=20,
sequences_size=1000,
with_mean=True,
with_std=True,
global_norm=False,
local_norm=True,
linear_filter=True,
recovery=0.8,
coefficients=[0.1, 0.9],
alphabet_size=10)
COLLISION_INFO = ConfigCollision(iterations=1, index=2, config_sax=SAX_INFO)
# Avoiding spark jobs here: already tested in test_recognition
RECOGNITION_INFO = ConfigRecognition(is_stopped_by_eq9=True,
is_algo_method_global=True,
min_value=1,
iterations=10,
radius=1.5,
neighborhood_method=2,
activate_spark=False)
def create_values(size, parameter, distribution):
"""
Create a pattern with a gaussian or exponential distribution, or a linear pattern. The timestamps are not created.
:param size: the number of points of the pattern
:type size: int
:param parameter: the variance of the gaussian distribution, or the lambda parameter of the exponential
distribution. Not used if the distribution parameter is 'linear'.
:type parameter: int or float
:param distribution: the distribution of the pattern : gaussian, exponential, or linear
:type distribution: str
:return: the values of the pattern corresponding to the distribution
:rtype: numpy.ndarray
"""
if distribution is 'gaussian':
return normal(0, parameter, size)
elif distribution is 'exponential':
return exponential(1 / parameter, size)
elif distribution is 'linear':
# example : size = 8 => linear_pattern = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5]
return np.arange(- int(size / 4), int(size / 4), 0.5)
def create_ts(pattern_positions, pattern_list, pattern_size, is_noise):
"""
Create a time serie with a list of patterns. The size of the patterns have to be the same for all patterns.
:param pattern_positions: the place of the patterns in the time serie.
:type pattern_positions: list of int
:param pattern_list: the values of the patterns
:type pattern_list: list of numpy.ndarray
:param pattern_size: the number of points of the patterns.
:type pattern_size: int
:param is_noise: if we add or not some noise which is a gaussian (mean = 0, std = std(time_serie) / 10)
:type is_noise: bool
:return: the time serie with the timestamps
:rtype: numpy.ndarray
Example : pattern_positions = [1, ..., 1]; pattern_list = [pattern_10, pattern_4, ...]
In this example, the pattern_4 is at the beginning and the end of the time serie.
"""
# Create an array with the time serie values
ts_values = np.array([])
for i in pattern_positions:
ts_values = np.concatenate((ts_values, pattern_list[i]), axis=0)
ts_size = len(pattern_positions) * pattern_size
# add noise
if is_noise is True:
std_ts = np.std(ts_values)
noise = normal(0, std_ts / 10, ts_size)
ts_values = np.array(list(map(lambda x, y: x + y, ts_values, noise)))
# add timestamps
timestamp = range(ts_size)
return np.array(list(map(lambda x, y: [x, y], timestamp, ts_values)))
def mock_read_ts(tsuid_list):
"""
Mock of IkatsApi.ts.read method
:param tsuid_list: the ts name choosen (str !)
:type: str
:return: list of numpy.array containing data ([[timestamp, time_serie_values],...]
:rtype : list of numpy.array
The 'random_projection' function call IkatsTimeseries.read(tsuid_list=tsuid_list)[0]
here, mock_get_ts(tsuid_list)[0] return 'result'
"""
result = np.array([])
# test_random_proj_one_ts
if tsuid_list in ["ts1"]:
# The time serie have 10 000 points, with 10 patterns created where the pattern4 is a linear pattern. We check
# if this pattern is delete from the sequences list by the filter.
# Create patterns
pattern_size = 1000
pattern1 = create_values(pattern_size, 0.1, 'gaussian')
pattern2 = create_values(pattern_size, 2, 'exponential')
pattern3 = create_values(pattern_size, 200, 'gaussian')
pattern4 = create_values(pattern_size, 0, 'linear')
# Create the time serie
result = create_ts(pattern_positions=[0, 1, 2, 0, 3, 1, 1, 3, 0, 2],
pattern_list=[pattern1, pattern2, pattern3, pattern4],
pattern_size=pattern_size,
is_noise=True)
# test_random_proj_dataset
if tsuid_list in ["tsa", "tsb"]:
# The time serie have 10 000 points, with 10 patterns created where the pattern4 is a linear pattern. We check
# if this pattern is delete from the sequences list by the filter.
# Create patterns
pattern_size = 1000
pattern1 = create_values(pattern_size, 0.1, 'gaussian')
pattern2 = create_values(pattern_size, 2, 'exponential')
pattern3 = create_values(pattern_size, 200, 'gaussian')
pattern4 = create_values(pattern_size, 0, 'linear')
# Create the time serie
if tsuid_list == "tsa":
result = create_ts(pattern_positions=[0, 1, 2, 0],
pattern_list=[pattern1, pattern2, pattern3, pattern4],
pattern_size=pattern_size,
is_noise=True)
if tsuid_list == "tsb":
result = create_ts(pattern_positions=[3, 1, 1, 3, 0, 2],
pattern_list=[pattern1, pattern2, pattern3, pattern4],
pattern_size=pattern_size,
is_noise=True)
# test_paa_values
if tsuid_list in ["test_paa_values"]:
# Create patterns
pattern_size = 1000
pattern1 = create_values(pattern_size, 0.1, 'gaussian')
pattern2 = create_values(pattern_size, 2, 'exponential')
pattern3 = create_values(pattern_size, 200, 'gaussian')
pattern4 = create_values(pattern_size, 0, 'linear')
# Create the time serie
result = create_ts(pattern_positions=[0, 1, 2, 0, 3, 1, 1, 3, 0, 2],
pattern_list=[pattern1, pattern2, pattern3, pattern4],
pattern_size=pattern_size,
is_noise=True)
if tsuid_list in ["testPatternA",
"tesPatternB",
"testPatternC",
"testPatternConstant",
"testPatternLinear",
"testPatternTooSmall",
"testPatternTrivialMatch",
"testPatternRealistic"]:
if tsuid_list == "testPatternA":
result = np.array([[np.float64(5000), -2],
[np.float64(6000), 2],
[np.float64(7000), -2],
[np.float64(8000), 0],
[
|
np.float64(9000)
|
numpy.float64
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Compare drought buffering effect compared with basin stats
Created on Thu Dec 10 15:39:17 2020
@author: lizz
"""
import numpy as np
import matplotlib.pyplot as plt
import gSPEI as gSPEI
## Labels: (P)arametric or (NP)nonparametric;
## Standardization (1) lumped or (2) split by starting month
fpath_NP2 = './data/SPEI_Files/nonparametric-var_stom_c/'
## Settings in filenames
integration_times = np.arange(3, 28, 4) # all SPEI integration times used
modelnames = ['CanESM2', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'GISS-E2-R', 'INMCM4', 'MIROC-ESM', 'NorESM1-M'] # all models used in comparison
scenarios = ['Rcp4p5', 'Rcp8p5'] # climate scenarios
## Basins in the order they are written
basin_names = ['INDUS','TARIM','BRAHMAPUTRA','ARAL SEA','COPPER','GANGES','YUKON','ALSEK','SUSITNA','BALKHASH','STIKINE','SANTA CRUZ',
'FRASER','BAKER','YANGTZE','SALWEEN','COLUMBIA','ISSYK-KUL','AMAZON','COLORADO','TAKU','MACKENZIE','NASS','THJORSA','JOEKULSA A F.',
'KUSKOKWIM','RHONE','SKEENA','OB','OELFUSA','MEKONG','DANUBE','NELSON RIVER','PO','KAMCHATKA','RHINE','GLOMA','HUANG HE','INDIGIRKA',
'LULE','RAPEL','SANTA','SKAGIT','KUBAN','TITICACA','NUSHAGAK','BIOBIO','IRRAWADDY','NEGRO','MAJES','CLUTHA','DAULE-VINCES',
'KALIXAELVEN','MAGDALENA','DRAMSELV','COLVILLE']
BasinArea=[1139075,1051731,518011,1233148,64959,1024462,829632,28422,49470,423657,51147,30599,
239678,30760,1745094,258475,668561,191032,5880854,390631,17967,1752001,21211,7527,7311,
118114,97485,42944,2701040,5678,787256,793704,1099380,73066,54103,190522,42862,988062,341227,
25127,15689,11882,7961,58935,107215,29513,24108,411516,130062,18612,17118,41993,
17157,261204,17364,57544] # area of each basin in km2
basin_glacier_area = [26893.8, 24645.4, 16606.7, 15176.7, 12998., 11216., 9535.4, 5614.8, 4304.,
3945.4, 3467.6, 3027.8, 2495.1, 2372.3, 2317.4, 2295.9, 1878.4, 1677.3,
1634.1, 1601.2, 1583.6, 1519.2, 1337.3, 1251.8, 1098.6, 1032.8, 904.2, 742.3,
739.5, 683.4, 485.7, 408.4, 374.7, 347.3, 312.7, 285.0, 269.4, 267.9, 248.4,
247.2, 238.1, 198.9, 159.5, 146., 134.5, 86.4, 76.2, 71.2, 64.1, 57.3, 46.5,
40.6, 37.9, 33.3, 32.1, 31.9]
yrs = np.linspace(1900, 2101, num=2412)
SPEI_by_model_C = {m: {} for m in modelnames} # create dictionary indexed by model name
for m in modelnames:
norunoff_f_m = fpath_NP2+'NRunoff_{}_{}_{}_Conduct.txt'.format(integration_times[3], m, scenarios[0])
wrunoff_f_m = fpath_NP2+'WRunoff_{}_{}_{}_Conduct.txt'.format(integration_times[3], m, scenarios[0])
SPEI_by_model_C[m]['NRunoff'] =
|
np.loadtxt(norunoff_f_m)
|
numpy.loadtxt
|
from typing import Any, Dict, Union
import numpy as np
from numpy.core.defchararray import center
import panda_gym
from panda_gym.envs.core import Task
from panda_gym.utils import distance
class ReachBimanual(Task):
def __init__(
self,
sim,
get_ee_position0,
get_ee_position1,
reward_type="sparse",
distance_threshold=0.05,
goal_range=0.35,
has_object = False,
absolute_pos = False,
obj_not_in_hand_rate = 1,
) -> None:
super().__init__(sim)
self.has_object = has_object
self.absolute_pos = absolute_pos
self.object_size = 0.04
self.reward_type = reward_type
self.distance_threshold = distance_threshold
self.obj_not_in_hand_rate = obj_not_in_hand_rate
self.get_ee_position0 = get_ee_position0
self.get_ee_position1 = get_ee_position1
self.goal_range_low = np.array([goal_range / 4, goal_range / 4, -goal_range/1.5])
self.goal_range_high = np.array([goal_range, goal_range, goal_range/1.5])
obj_xyz_range=[0.3, 0.3, 0]
self.obj_range_low = np.array([0.1, -obj_xyz_range[1] / 2, self.object_size/2])
self.obj_range_high = np.array(obj_xyz_range) + self.obj_range_low
with self.sim.no_rendering():
self._create_scene()
self.sim.place_visualizer(target_position=np.zeros(3), distance=0.9, yaw=45, pitch=-30)
self._max_episode_steps = 50
def _create_scene(self) -> None:
self.sim.create_plane(z_offset=-0.4)
self.sim.create_table(length=1., width=0.7, height=0.4, x_offset=-0.575)
self.sim.create_table(length=1., width=0.7, height=0.4, x_offset=0.575)
self.sim.create_sphere(
body_name="target0",
radius=0.02,
mass=0.0,
ghost=True,
position=np.zeros(3),
rgba_color=np.array([0.1, 0.9, 0.1, 0.3]),
)
self.sim.create_sphere(
body_name="target1",
radius=0.02,
mass=0.0,
ghost=True,
position=np.zeros(3),
rgba_color=np.array([0.9, 0.1, 0.1, 0.3]),
)
self.sim.create_sphere(
body_name="target2",
radius=0.03,
mass=0.0,
ghost=True,
position=np.zeros(3),
rgba_color=
|
np.array([0.1, 0.1, 0.9, 0.5])
|
numpy.array
|
import numpy as np
from zhou_accv_2018 import p2p1l
# fix seed to allow for reproducible results
np.random.seed(0)
np.random.seed(42)
# instantiate a couple of points centered around the origin
pts = 0.6 * (np.random.random((2, 3)) - 0.5)
# 3D lines are parameterized as pts and direction stacked into a tuple
# instantiate a couple of points centered around the origin
pts_l = 0.6 * (np.random.random((1, 3)) - 0.5)
# generate normalized directions
directions = 2 * (np.random.random((1, 3)) - 0.5)
directions /= np.linalg.norm(directions, axis=1)[:, None]
line_3d = (pts_l, directions)
# Made up projective matrix
K = np.array([[160, 0, 320], [0, 120, 240], [0, 0, 1]])
# A pose
R_gt = np.array(
[
[0.89802142, -0.41500101, 0.14605372],
[0.24509948, 0.7476071, 0.61725997],
[-0.36535431, -0.51851499, 0.77308372],
]
)
t_gt = np.array([-0.0767557, 0.13917375, 1.9708239])
# sample 2 points from each line and stack all
pts_ls = np.hstack((pts_l, pts_l + directions)).reshape((-1, 3))
pts_all = np.vstack((pts, pts_ls))
# Project everything to 2D
pts_all_2d = (pts_all @ R_gt.T + t_gt) @ K.T
pts_all_2d = (pts_all_2d / pts_all_2d[:, -1, None])[:, :-1]
pts_2d = pts_all_2d[:2]
line_2d = pts_all_2d[2:].reshape((-1, 2, 2))
# Compute pose candidates. the problem is not minimal so only one
# will be provided
poses = p2p1l(pts_2d=pts_2d, line_2d=line_2d, pts_3d=pts, line_3d=line_3d, K=K)
# The error criteria for lines is to ensure that both 3D points and
# direction, after transformation, are inside the plane formed by the
# line projection. We start by computing the plane normals
# line in 2D has two sampled points.
line_2d_c = np.linalg.solve(
K, np.vstack((line_2d.reshape((2 * 1, 2)).T,
|
np.ones((1, 2 * 1))
|
numpy.ones
|
from caffe2.python import core
from functools import partial
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
import os
def _gen_test_add_padding(with_pad_data=True,
is_remove=False):
def gen_with_size(args):
lengths, inner_shape = args
data_dim = [sum(lengths)] + inner_shape
lengths = np.array(lengths, dtype=np.int32)
if with_pad_data:
return st.tuples(
st.just(lengths),
hu.arrays(data_dim),
hu.arrays(inner_shape),
hu.arrays(inner_shape))
else:
return st.tuples(st.just(lengths), hu.arrays(data_dim))
min_len = 4 if is_remove else 0
lengths = st.lists(
st.integers(min_value=min_len, max_value=10),
min_size=0,
max_size=5)
inner_shape = st.lists(
st.integers(min_value=1, max_value=3),
min_size=0,
max_size=2)
return st.tuples(lengths, inner_shape).flatmap(gen_with_size)
def _add_padding_ref(
start_pad_width, end_pad_width, ret_lengths,
data, lengths, start_padding=None, end_padding=None):
if start_padding is None:
start_padding = np.zeros(data.shape[1:], dtype=data.dtype)
end_padding = (
end_padding if end_padding is not None else start_padding)
out_size = data.shape[0] + (
start_pad_width + end_pad_width) * len(lengths)
out = np.ndarray((out_size,) + data.shape[1:])
in_ptr = 0
out_ptr = 0
for length in lengths:
out[out_ptr:(out_ptr + start_pad_width)] = start_padding
out_ptr += start_pad_width
out[out_ptr:(out_ptr + length)] = data[in_ptr:(in_ptr + length)]
in_ptr += length
out_ptr += length
out[out_ptr:(out_ptr + end_pad_width)] = end_padding
out_ptr += end_pad_width
lengths_out = lengths + (start_pad_width + end_pad_width)
if ret_lengths:
return (out, lengths_out)
else:
return (out, )
def _remove_padding_ref(start_pad_width, end_pad_width, data, lengths):
pad_width = start_pad_width + end_pad_width
out_size = data.shape[0] - (
start_pad_width + end_pad_width) * len(lengths)
out = np.ndarray((out_size,) + data.shape[1:])
in_ptr = 0
out_ptr = 0
for length in lengths:
out_length = length - pad_width
out[out_ptr:(out_ptr + out_length)] = data[
(in_ptr + start_pad_width):(in_ptr + length - end_pad_width)]
in_ptr += length
out_ptr += out_length
lengths_out = lengths - (start_pad_width + end_pad_width)
return (out, lengths_out)
def _gather_padding_ref(start_pad_width, end_pad_width, data, lengths):
start_padding = np.zeros(data.shape[1:], dtype=data.dtype)
end_padding = np.zeros(data.shape[1:], dtype=data.dtype)
pad_width = start_pad_width + end_pad_width
ptr = 0
for length in lengths:
for _ in range(start_pad_width):
start_padding += data[ptr]
ptr += 1
ptr += length - pad_width
for _ in range(end_pad_width):
end_padding += data[ptr]
ptr += 1
return (start_padding, end_padding)
class TestSequenceOps(serial.SerializedTestCase):
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=True),
ret_lengths=st.booleans(),
**hu.gcs)
@settings(deadline=1000)
def test_add_padding(
self, start_pad_width, end_pad_width, args, ret_lengths, gc, dc
):
lengths, data, start_padding, end_padding = args
start_padding = np.array(start_padding, dtype=np.float32)
end_padding = np.array(end_padding, dtype=np.float32)
outputs = ['output', 'lengths_out'] if ret_lengths else ['output']
op = core.CreateOperator(
'AddPadding', ['data', 'lengths', 'start_padding', 'end_padding'],
outputs,
padding_width=start_pad_width,
end_padding_width=end_pad_width
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data, lengths, start_padding, end_padding],
reference=partial(
_add_padding_ref, start_pad_width, end_pad_width, ret_lengths
)
)
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=False),
**hu.gcs)
def test_add_zero_padding(self, start_pad_width, end_pad_width, args, gc, dc):
lengths, data = args
op = core.CreateOperator(
'AddPadding',
['data', 'lengths'],
['output', 'lengths_out'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
gc,
op,
[data, lengths],
partial(_add_padding_ref, start_pad_width, end_pad_width, True))
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
data=hu.tensor(min_dim=1, max_dim=3),
**hu.gcs)
def test_add_padding_no_length(self, start_pad_width, end_pad_width, data, gc, dc):
op = core.CreateOperator(
'AddPadding',
['data'],
['output', 'output_lens'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
gc,
op,
[data],
partial(
_add_padding_ref, start_pad_width, end_pad_width, True,
lengths=np.array([data.shape[0]])))
# Uncomment the following seed to make this fail.
# @seed(302934307671667531413257853548643485645)
# See https://github.com/caffe2/caffe2/issues/1547
@unittest.skip("flaky test")
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=False, is_remove=True),
**hu.gcs)
def test_remove_padding(self, start_pad_width, end_pad_width, args, gc, dc):
lengths, data = args
op = core.CreateOperator(
'RemovePadding',
['data', 'lengths'],
['output', 'lengths_out'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data, lengths],
reference=partial(_remove_padding_ref, start_pad_width, end_pad_width))
@given(start_pad_width=st.integers(min_value=0, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=True),
**hu.gcs)
@settings(deadline=10000)
def test_gather_padding(self, start_pad_width, end_pad_width, args, gc, dc):
lengths, data, start_padding, end_padding = args
padded_data, padded_lengths = _add_padding_ref(
start_pad_width, end_pad_width, True, data,
lengths, start_padding, end_padding)
op = core.CreateOperator(
'GatherPadding',
['data', 'lengths'],
['start_padding', 'end_padding'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[padded_data, padded_lengths],
reference=partial(_gather_padding_ref, start_pad_width, end_pad_width))
@given(data=hu.tensor(min_dim=3, max_dim=3, dtype=np.float32,
elements=hu.floats(min_value=-np.inf,
max_value=np.inf),
min_value=1, max_value=10),
**hu.gcs)
@settings(deadline=10000)
def test_reverse_packed_segs(self, data, gc, dc):
max_length = data.shape[0]
batch_size = data.shape[1]
lengths = np.random.randint(max_length + 1, size=batch_size)
op = core.CreateOperator(
"ReversePackedSegs",
["data", "lengths"],
["reversed_data"])
def op_ref(data, lengths):
rev_data = np.array(data, copy=True)
for i in range(batch_size):
seg_length = lengths[i]
for j in range(seg_length):
rev_data[j][i] = data[seg_length - 1 - j][i]
return (rev_data,)
def op_grad_ref(grad_out, outputs, inputs):
return op_ref(grad_out, inputs[1]) + (None,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data, lengths],
reference=op_ref,
output_to_grad='reversed_data',
grad_reference=op_grad_ref)
@given(data=hu.tensor(min_dim=1, max_dim=3, dtype=np.float32,
elements=hu.floats(min_value=-np.inf,
max_value=np.inf),
min_value=10, max_value=10),
indices=st.lists(st.integers(min_value=0, max_value=9),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_remove_data_blocks(self, data, indices, gc, dc):
indices = np.array(indices)
op = core.CreateOperator(
"RemoveDataBlocks",
["data", "indices"],
["shrunk_data"])
def op_ref(data, indices):
unique_indices = np.unique(indices)
sorted_indices = np.sort(unique_indices)
shrunk_data = np.delete(data, sorted_indices, axis=0)
return (shrunk_data,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data, indices],
reference=op_ref)
@given(elements=st.lists(st.integers(min_value=0, max_value=9),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
@settings(deadline=1000)
def test_find_duplicate_elements(self, elements, gc, dc):
mapping = {
0: "a",
1: "b",
2: "c",
3: "d",
4: "e",
5: "f",
6: "g",
7: "h",
8: "i",
9: "j"}
data = np.array([mapping[e] for e in elements], dtype='|S')
op = core.CreateOperator(
"FindDuplicateElements",
["data"],
["indices"])
def op_ref(data):
unique_data = []
indices = []
for i, e in enumerate(data):
if e in unique_data:
indices.append(i)
else:
unique_data.append(e)
return (
|
np.array(indices, dtype=np.int64)
|
numpy.array
|
import glob as glob
import matplotlib as mpl
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
import bz2
import corner
import json
import pathlib
import pickle
import warnings
from astropy import constants as const
from astropy import units as uni
from astropy.io import ascii, fits
from astropy.time import Time
from mpl_toolkits.axes_grid1 import ImageGrid
warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered")
warnings.filterwarnings("ignore", r"Degrees of freedom <= 0 for slice")
def _bad_idxs(s):
if s == "[]":
return []
else:
# Merges indices/idxs specified in `s` into a single numpy array of
# indices to omit
s = s.strip("[]").split(",")
bad_idxs = list(map(_to_arr, s))
bad_idxs = np.concatenate(bad_idxs, axis=0)
return bad_idxs
def _to_arr(idx_or_slc):
# Converts str to 1d numpy array
# or slice to numpy array of ints.
# This format makes it easier for flattening multiple arrays in `_bad_idxs`
if ":" in idx_or_slc:
lower, upper = map(int, idx_or_slc.split(":"))
return np.arange(lower, upper + 1)
else:
return np.array([int(idx_or_slc)])
def compress_pickle(fname_out, fpath_pickle):
data = load_pickle(fpath_pickle)
with bz2.BZ2File(f"{fname_out}.pbz2", "wb") as f:
pickle.dump(data, f)
def decompress_pickle(fname):
data = bz2.BZ2File(fname, "rb")
return pickle.load(data)
def get_evidences(base_dir, relative_to_spot_only=False):
fit_R0 = "fitR0" if "fit_R0" in base_dir else "NofitR0"
species = ["Na", "K", "TiO", "Na_K", "Na_TiO", "K_TiO", "Na_K_TiO"]
model_names_dict = {
"clear": f"NoHet_FitP0_NoClouds_NoHaze_{fit_R0}",
"clear+cloud": f"NoHet_FitP0_Clouds_NoHaze_{fit_R0}",
"clear+haze": f"NoHet_FitP0_NoClouds_Haze_{fit_R0}",
"clear+cloud+haze": f"NoHet_FitP0_Clouds_Haze_{fit_R0}",
"clear+spot": f"Het_FitP0_NoClouds_NoHaze_{fit_R0}",
"clear+spot+cloud": f"Het_FitP0_Clouds_NoHaze_{fit_R0}",
"clear+spot+haze": f"Het_FitP0_NoClouds_Haze_{fit_R0}",
"clear+spot+cloud+haze": f"Het_FitP0_Clouds_Haze_{fit_R0}",
}
data_dict = {
sp: {
model_name: load_pickle(f"{base_dir}/HATP23_E1_{model_id}_{sp}/retrieval.pkl")
for (model_name, model_id) in model_names_dict.items()
}
for sp in species
}
lnZ = {}
lnZ_err = {}
for species_name, species_data in data_dict.items():
lnZ[species_name] = {}
lnZ_err[species_name] = {}
for model_name, model_data in species_data.items():
lnZ[species_name][model_name] = model_data["lnZ"]
lnZ_err[species_name][model_name] = model_data["lnZerr"]
df_lnZ = pd.DataFrame(lnZ)
df_lnZ_err = pd.DataFrame(lnZ_err)
# Get log evidence for spot-only model and compute relative to this instead
if relative_to_spot_only:
model_id = f"Het_FitP0_NoClouds_NoHaze_{fit_R0}_no_features"
df_lnZ_min = load_pickle(f"{base_dir}/HATP23_E1_{model_id}/retrieval.pkl")
#print(f"spot only lnZ: {df_lnZ_min['lnZ']} +/- {df_lnZ_min['lnZerr']}")
species_min = "no_features"
model_min = "spot only"
else:
species_min = df_lnZ.min().idxmin()
model_min = df_lnZ[species_min].idxmin()
df_lnZ_min = data_dict[species_min][model_min]
df_Delta_lnZ = df_lnZ - df_lnZ_min["lnZ"]
df_Delta_lnZ_err = np.sqrt(df_lnZ_err ** 2 + df_lnZ_min["lnZerr"] ** 2)
return df_Delta_lnZ, df_Delta_lnZ_err, species_min, model_min, data_dict
def get_phases(t, P, t0):
"""
Given input times, a period (or posterior dist of periods)
and time of transit center (or posterior), returns the
phase at each time t. From juliet =]
"""
if type(t) is not float:
phase = ((t - np.median(t0)) / np.median(P)) % 1
ii = np.where(phase >= 0.5)[0]
phase[ii] = phase[ii] - 1.0
else:
phase = ((t - np.median(t0)) / np.median(P)) % 1
if phase >= 0.5:
phase = phase - 1.0
return phase
def get_result(fpath, key="t0", unc=True):
data = np.genfromtxt(fpath, encoding=None, dtype=None)
for line in data:
if key in line:
if unc:
return line
else:
return line[1]
print(f"{key} not found. Check results.dat file.")
def get_table_stats(df, ps=[0.16, 0.5, 0.84], columns=None):
ps_strs = [f"{p*100:.0f}%" for p in ps]
df_stats = df.describe(percentiles=ps).loc[ps_strs]
df_latex = pd.DataFrame(columns=df.columns)
df_latex.loc["p"] = df_stats.loc[ps_strs[1]]
df_latex.loc["p_u"] = df_stats.loc[ps_strs[2]] - df_stats.loc[ps_strs[1]]
df_latex.loc["p_d"] = df_stats.loc[ps_strs[1]] - df_stats.loc[ps_strs[0]]
latex_strs = df_latex.apply(write_latex_row2, axis=0)
return pd.DataFrame(latex_strs, columns=columns)
def load_pickle(fpath):
with open(fpath, "rb") as f:
data = pickle.load(f, encoding="latin") # Python 2 -> 3
return data
def myparser(s):
dt, day_frac = s.split(".")
dt = datetime.strptime(dt, "%Y-%m-%d")
ms = 86_400_000.0 * float(f".{day_frac}")
ms = timedelta(milliseconds=int(ms))
return dt + ms
def plot_binned(
ax,
idxs_used,
fluxes,
bins,
offset,
colors,
annotate=False,
utc=False,
species=None,
bold_species=True,
plot_kwargs=None,
annotate_kwargs=None,
annotate_rms_kwargs=None,
models=None,
):
"""
Plots binned light curves.
Parameters
----------
ax : matplotib.axes object
Current axis to plot on
idxs_used: index, time, phase, etc.
fluxes : ndarray
`time[idxs_used]` x `wbin` array of fluxes. Each column corresponds to a wavelength
binned LC, where `wbin` is the number of wavelength bins
bins : ndarray
`wbin` x 2 array of wavelength bins. The first column holds the lower
bound of each bin, and the second column holds the upper bound for each.
offset : int, float
How much space to put between each binned LC on `ax`
colors : ndarray
`wbin` x 3 array of RGB values to set color palette
annotate : bool, optional
Whether to annotate wavelength bins on plot. Default is True.
utc : bool, optional
Whether to convert `time` to UTC or not. Default is False.
bold_species : bool, optional
Whether to make annotated bins bold if they are in
plot_kwargs : dict, optional
Optional keyword arguments to pass to plot function
annotate_kwargs : dict, optional
Optional keyword arguments to pass to annotate function
Returns
-------
ax : matplotib.axes object
Current axis that was plotted on.
"""
if plot_kwargs is None:
plot_kwargs = {}
if annotate_kwargs is None:
annotate_kwargs = {}
if annotate_rms_kwargs is None:
annotate_rms_kwargs = {}
offs = 0
if idxs_used is None:
idx_used = range
slc = slice(0, len(fluxes.shape[0]) + 1)
else:
slc = idxs_used
# fluxes = fluxes[slc, :]
N = bins.shape[0] # number of wavelength bins
for i in range(N):
wav_bin = [round(bins[i][j], 3) for j in range(2)]
if utc:
t_date = Time(time, format="jd")
ax.plot_date(
t_date.plot_date,
fluxes[:, i] + offs,
c=colors[i],
label=wav_bin,
**plot_kwargs,
)
else:
ax.plot(
idxs_used,
fluxes[:, i] + offs,
c=0.9 * colors[i],
label=wav_bin,
# mec=0.9*colors[i],
**plot_kwargs,
)
if models is not None:
ax.plot(idxs_used, models[:, i] + offs, c=0.6 * colors[i], lw=2)
if annotate:
# trans = transforms.blended_transform_factory(
# ax.transAxes, ax.transData
# )
trans = transforms.blended_transform_factory(ax.transData, ax.transData)
# Annotate wavelength bins
ann = ax.annotate(
wav_bin,
# xy=(0, 1.004*(1 + offs)),
xy=(idxs_used[-1], 1.002 * (1 + offs)),
xycoords=trans,
**annotate_kwargs,
)
rms = np.std(fluxes[:, i]) * 1e6
ann_rms = ax.annotate(
f"{int(rms)}",
xy=(idxs_used[0], 1.002 * (1 + offs)),
xycoords=trans,
**annotate_rms_kwargs,
)
# Make annotations bold if bin is a species bin
if bold_species:
if species is None:
species = dict()
for spec, spec_wav in species.items():
if wav_bin[0] <= spec_wav <= wav_bin[1]:
ann.set_text(f"{spec}\n{ann.get_text()}")
ann.set_weight("bold")
offs += offset
return ax
def plot_chips(dirpath, fpathame, target="", vmin=0, vmax=2_000, spec_ap=0, sky_ap=0):
# This plots the chips by numbers:
#
# 1 2 3 4
# 6 5 8 7
#
class CoordinateData:
"""
A simple class to hold coordinate data.
"""
def __init__(self, filename, skiplines=0):
self.fpathame = filename
self.obj =
|
np.array([])
|
numpy.array
|
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.FATAL)
import sys, os
sys.path.append('../')
figs_dir = 'figs8'
data_dir = os.path.join(figs_dir, 'data')
if not os.path.isdir(figs_dir):
os.mkdir(figs_dir)
os.mkdir(data_dir)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
Ns = 200
Xs =
|
np.linspace(-4, 4, Ns)
|
numpy.linspace
|
#!python -m unittest tests.test_processing
import numba
import numpy as np
import pandas as pd
import tqdm
import h5py
import random
import statsmodels.stats.multitest
import urllib.request, json
import os
import socket
import re
import Bio.PDB.MMCIF2Dict
from itertools import groupby
import unittest
from scipy.spatial.transform import Rotation as R
from Bio import PDB
from structuremap.processing import download_alphafold_cif, \
download_alphafold_pae, \
format_alphafold_data, \
get_3d_dist, \
rotate_vector_around_axis, \
get_angle, \
get_paired_error, \
get_neighbors, \
annotate_accessibility, \
smooth_score, \
get_smooth_score, \
get_avg_3d_dist, \
get_avg_1d_dist, \
find_idr_pattern, \
annotate_proteins_with_idr_pattern, \
extend_flexible_pattern, \
get_extended_flexible_pattern, \
get_mod_ptm_fraction
THIS_FOLDER = os.path.dirname(__file__)
TEST_FOLDER = os.path.join(
f"{os.path.dirname(THIS_FOLDER)}",
"data",
"test_files",
)
class TestProcessing(unittest.TestCase):
def test_download_alphafold_cif(self, ):
valid, invalid, existing = download_alphafold_cif(
proteins=['O15552','Q5VSL9','Q7Z6M3','O15552yy'],
out_folder=TEST_FOLDER)
np.testing.assert_equal(valid, np.array(['Q5VSL9']))
np.testing.assert_equal(invalid, np.array(['O15552yy']))
np.testing.assert_equal(existing, np.array(['O15552','Q7Z6M3']))
os.remove(
os.path.join(
TEST_FOLDER,
'Q5VSL9.cif'
)
)
def test_download_alphafold_pae(self, ):
valid, invalid, existing = download_alphafold_pae(
proteins=['O15552','Q5VSL9','Q7Z6M3','O15552yy'],
out_folder=TEST_FOLDER)
np.testing.assert_equal(valid, np.array(['Q5VSL9']))
np.testing.assert_equal(invalid, np.array(['O15552yy']))
np.testing.assert_equal(existing, np.array(['O15552','Q7Z6M3']))
os.remove(
os.path.join(
TEST_FOLDER,
'pae_Q5VSL9.hdf'
)
)
def test_format_alphafold_data(self, ):
alphafold_formatted = format_alphafold_data(
directory=TEST_FOLDER, protein_ids=["Q7Z6M3","O15552"])
alphafold_formatted_ini = pd.read_csv(
os.path.join(
TEST_FOLDER,
'test_alphafold_annotation.csv'
)
)
pd.testing.assert_frame_equal(alphafold_formatted, alphafold_formatted_ini, check_dtype=False)
def test_get_3d_dist(self, ):
x = np.array([1.1,1.1,1.1,1.1,5.1])
y = np.array([1.1,2.1,3.1,1.1,10.1])
z = np.array([1.1,3.1,5.1,1.1,4.1])
coordinate_array = np.vstack([x,y,z]).T
np.testing.assert_equal(2.236068, np.round(get_3d_dist(coordinate_array, coordinate_array, 0, 1), decimals=6))
np.testing.assert_equal(4.472136, np.round(get_3d_dist(coordinate_array, coordinate_array, 0, 2), decimals=6))
np.testing.assert_equal(4.472136, np.round(get_3d_dist(coordinate_array, coordinate_array, 2, 0), decimals=6))
def rotate_vector_around_axis_scipy(self, vector, axis, theta):
theta = np.radians(theta)
axis_norm = axis / np.linalg.norm(axis)
r = R.from_rotvec(theta * axis_norm)
return(r.apply(vector))
def test_rotate_vector_around_axis(self, ):
v = np.array([3.0, 5.0, 0.0])
a = np.array([4.0, 4.0, 1.0])
t = 90
res_real = rotate_vector_around_axis(v, a, t)
res_scipy = self.rotate_vector_around_axis_scipy(v, a, t)
np.testing.assert_almost_equal(res_real, res_scipy, decimal=10)
def test_get_angle(self, ):
x_a = np.array([1.1,1.1,1.1])
y_a = np.array([1.1,2.1,-3.1])
z_a = np.array([1.1,3.1,5.1])
x_b = np.array([1.5,np.nan,1.5])
y_b = np.array([1.5,2.5,3.5])
z_b = np.array([1.5,3.5,5.5])
x_c = np.array([1.5,1.5,10.6])
y_c = np.array([1.5,2.5,11.6])
z_c = np.array([1.5,3.5,5.6])
x_n = np.array([4.5,1.8,1.5])
y_n = np.array([40.5,7.8,3.5])
z_n = np.array([3.5,3.8,5.5])
coordinate_array_a = np.vstack([x_a,y_a,z_a]).T
coordinate_array_b = np.vstack([x_b,y_b,z_b]).T
coordinate_array_c = np.vstack([x_c,y_c,z_c]).T
coordinate_array_n = np.vstack([x_n,y_n,z_n]).T
np.testing.assert_equal(39.231520,
np.round(get_angle(coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
0, 1), decimals=6))
np.testing.assert_equal(91.140756,
np.round(get_angle(coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
0, 2), decimals=6))
np.testing.assert_equal(47.168228,
np.round(get_angle(coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
2, 0), decimals=6))
# test gly
np.testing.assert_equal(93.985035,
np.round(get_angle(coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
1, 2), decimals=6))
def test_get_paired_error(self, ):
pos = np.array([1,2,3])
error = np.array([[0,2,10],[1,0,5],[10,4,0]])
np.testing.assert_equal(2, get_paired_error(pos, error, 0,1))
np.testing.assert_equal(0, get_paired_error(pos, error, 2,2))
pos = np.array([1,3])
np.testing.assert_equal(10, get_paired_error(pos, error, 0,1))
def test_get_neighbors(self, ):
idxl = np.array([0,1,2])
x_a = np.array([1.1,1.1,1.1])
y_a = np.array([1.1,2.1,-3.1])
z_a = np.array([1.1,3.1,5.1])
x_b = np.array([1.5,np.nan,1.5])
y_b = np.array([1.5,2.5,3.5])
z_b = np.array([1.5,3.5,5.5])
x_c = np.array([1.5,1.5,10.6])
y_c = np.array([1.5,2.5,11.6])
z_c = np.array([1.5,3.5,5.6])
x_n = np.array([4.5,1.8,1.5])
y_n = np.array([40.5,7.8,3.5])
z_n = np.array([3.5,3.8,5.5])
coordinate_array_a = np.vstack([x_a,y_a,z_a]).T
coordinate_array_b = np.vstack([x_b,y_b,z_b]).T
coordinate_array_c = np.vstack([x_c,y_c,z_c]).T
coordinate_array_n = np.vstack([x_n,y_n,z_n]).T
pos=np.array([1,2,3])
error = np.array([[0,2,10],[1,0,5],[10,4,0]])
np.testing.assert_equal(np.array([1, 0, 0]),
get_neighbors(idxl, coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
pos, error, 5, 40))
np.testing.assert_equal(np.array([1, 1, 0]),
get_neighbors(idxl, coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
pos, error, 5, 150))
np.testing.assert_equal(np.array([2, 2, 2]),
get_neighbors(idxl, coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
pos, error, 50, 140))
def test_annotate_accessibility(self, ):
radius = 12.0
alphafold_annotation = pd.read_csv(
os.path.join(
TEST_FOLDER,
'test_alphafold_annotation.csv'
)
)
res_accessability = annotate_accessibility(
df=alphafold_annotation[alphafold_annotation.protein_id=="Q7Z6M3"],
max_dist=12,
max_angle=90,
error_dir=None)
# comparison to https://biopython.org/docs/dev/api/Bio.PDB.HSExposure.html#Bio.PDB.HSExposure.HSExposureCB
with open(
os.path.join(
TEST_FOLDER,
'Q7Z6M3.pdb'
)
) as pdbfile:
p=PDB.PDBParser()
s=p.get_structure('X', pdbfile)
m=s[0]
hse=PDB.HSExposureCB(m, radius)
residue_list=PDB.Selection.unfold_entities(m,'R')
res_hse = []
for r in residue_list:
res_hse.append(r.xtra['EXP_HSE_B_U'])
np.testing.assert_equal(np.array(res_hse), res_accessability.nAA_12_90_nopae.values)
# @ToDo: test with actual error_dir
def test_smooth_score(self, ):
np.testing.assert_equal(np.array([1.5, 2. , 3. , 4. , 4.5]),smooth_score(score=np.array([1,2,3,4,5]), half_window=1))
def test_get_smooth_score(self, ):
testdata = pd.DataFrame({'protein_id':[1,1,1,1,1,1,2,2,2,2,2,2],
'protein_number':[1,1,1,1,1,1,2,2,2,2,2,2],
'position':[1,2,3,4,5,6,1,2,3,4,5,6],
'score':[1,2,3,4,5,6,7,8,9,10,11,12],
'score_2':[10,20,30,40,50,60,70,80,90,100,110,120]})
test_res = get_smooth_score(testdata, np.array(['score','score_2']), [1])
np.testing.assert_equal([1.5,2,3,4,5,5.5,7.5,8,9,10,11,11.5], test_res.score_smooth1.values)
np.testing.assert_equal([15,20,30,40,50,55,75,80,90,100,110,115], test_res.score_2_smooth1.values)
def test_get_avg_3d_dist(self, ):
x = np.array([1.1,1.1,1.1,1.1,1.1,1.1])
y = np.array([1.1,2.1,3.1,1.1,10.1,20.1])
z = np.array([1.1,3.1,5.1,10.1,11.1,12.1])
pos = np.array([1,2,3,4,5,6])
error = np.array([[0,2,10,2,3,4],[1,0,5,3,2,9],[10,4,0,3,6,7],[10,4,5,0,6,7],[10,4,5,3,0,7],[10,4,0,3,6,0]])
coordinate_array = np.vstack([x,y,z]).T
np.testing.assert_equal(6.976812, np.round(get_avg_3d_dist(np.array([0,4]), coordinate_array, pos, error), decimals=6))
np.testing.assert_equal(3.5, np.round(get_avg_3d_dist(np.array([0,2]), coordinate_array, pos, error), decimals=6))
np.testing.assert_equal(5.668168, np.round(get_avg_3d_dist(np.array([0,3,4]), coordinate_array, pos, error), decimals=6))
np.testing.assert_equal(4.666667, np.round(get_avg_3d_dist(np.array([0,3,4]), coordinate_array, pos, error, metric='min'), decimals=6))
np.testing.assert_equal(14, np.round(get_avg_3d_dist(np.array([0,4]), coordinate_array, pos, error, error_operation='plus'), decimals=6))
error = 0.1*error
np.testing.assert_equal(13.876812, np.round(get_avg_3d_dist(np.array([0,4]), coordinate_array, pos, error, error_operation='plus'), decimals=6))
x =
|
np.array([1.1,1.1,1.1,1.1])
|
numpy.array
|
#!/usr/bin/env python3
"""
Helper functions for Fourier transform algorithms
"""
# Standard libraries
import numpy as np
def twiddle_factor(k,N, type='exp'):
"""
Return twiddle factors.
"""
if type=='cos':
twiddle_factor = np.cos(2*np.pi*k/N)
elif type=='sin':
twiddle_factor = np.sin(2*np.pi*k/N)
elif type=='exp':
twiddle_factor = np.exp(2j*np.pi*k/N)
return twiddle_factor
def normalize(weights, platform):
"""
Normalize the weights for computing the 1-D FT
"""
if platform in ("numpy"):
weights /= weights.size
if platform == "loihi":
correction_coef = 127 / weights.max()
weights = np.ceil(weights * 127 - 0.5)*2
if platform == "brian":
weights = weights.T
return weights
def dft_connection_matrix(nsamples, platform):
"""
Calculate network weights based on Fourier transform equation
Parameters:
nsamples (int): Number of samples in a chirp
platform (str [loihi|traditional]): If "loihi", values are normalized
between the limits imposed by the chip; Re-scale weights to the range
admitted by Loihi (8-bit even values -257 to 254). If "traditional",
each weight is divided by the total length of the chirp, as in a
conventional Fourier transform
Returns:
real_weight_norm: weights for the connections to the "real" compartments
imag_weight_norm: weights for the connections to the "imag" compartments
"""
c = 2 * np.pi/nsamples
n = np.arange(nsamples).reshape(nsamples, 1)
k = np.arange(nsamples).reshape(1, nsamples)
trig_factors = np.dot(n, k) * c
real_weights = np.cos(trig_factors)
imag_weights = -np.sin(trig_factors)
# Normalize the weights based on the used platform
real_weights_norm = normalize(real_weights, platform)
imag_weights_norm = normalize(imag_weights, platform)
return (real_weights_norm, imag_weights_norm)
def fft_connection_matrix(layer, nsamples, platform):
"""
Connection matrix for a radix-4 fft
"""
radix = 4
n_layers = int(np.log(nsamples)/
|
np.log(radix)
|
numpy.log
|
import numpy as np
from scipy.stats import skew, kurtosis
__all__ = ['sky_noise_error', 'propagate_noise_error', 'mcnoise']
def sky_noise_error(nu_obs, nu_emit, nu_ch_bw, tint, a_eff, n_station, bmax):
"""Calculate instrument noise error of an interferometer.
This assume that Tsys is dominated by Tsky.
(see Furlanetto et al. (2006) section 9)
Parameters
----------
nu_obs : float or array-like
Observing frequency in [MHz].
Can be array-like to compute noise at multiple frequencies.
nu_emit : float
Emitted frequency of the observed spectral line in [MHz].
nu_ch_bw : float
Observed frequency channel bandwidth in [MHz].
tint : float, optional
Integration time. Default is 1000. [hours]
a_eff : float
Effective area of a station in [m**2].
n_station : integer
Number of antennas (or stations in case of a phase-array).
bmax : float
Maximum baseline length of the array in [wavelength].
Returns
-------
Noise error (standard deviation) in [mK] in the same format as nu_obs.
"""
nu_obs = np.asarray(nu_obs)
a_tot = a_eff * n_station
z = (nu_emit / nu_obs) - 1.
theta = 1.22 / bmax * 60 * 180 / np.pi
err = 2.9 * (1.e5 / a_tot) * (10. / theta) ** 2 * \
((1 + z) / 10.0) ** 4.6 * np.sqrt(100. / (nu_ch_bw * tint))
return err
def propagate_noise_error(noise_err, m2, m3, m4, m6, npix):
"""Analytically propagate error to variance and skewness.
Based on error propagation described in the appendix of
Watkinson & Pritchard (2014)
Parameters
----------
noise_err : float or array-like
Noise error.
m2 : float
2nd moment of the data
m3 : float
3rd moment of the data
m4 : float
4th moment of the data
m6 : float
6th moment of the data
npix : int
Number of pixels in the data
Returns
-------
Error of 2nd moment, 3rd moment and skewness.
"""
noise_var = np.asarray(noise_err) ** 2
m2_var = (2. / npix) * (2 * m2 * noise_var + noise_var ** 2)
m3_var = (3. / npix) * (3 * noise_var * m4 + 12 * m2 * noise_var ** 2 +
5 * noise_var ** 3)
m4_var = (8. / npix) * (2 * m6 * noise_var + 21 * m4 * noise_var ** 2 +
48 * m2 * noise_var ** 3 + 12 * noise_var ** 4)
m2m3_cov = (6 / npix) * m3 * noise_var
m2m4_cov = (4. / npix) * (2 * m4 * noise_var + 9 * m2 * noise_var ** 2 +
3 * noise_var ** 3)
skew_var = (m3_var / (m2 ** 3)) + \
((9 * m3 ** 2 * m2_var) / (4 * m2 ** 5)) - \
(3 * m3 * m2m3_cov / (m2 ** 4))
kurt_var = (1. / m2 ** 4) * m4_var + 4 * (m4 ** 2 / m2 ** 6) * m2_var - \
4 * (m4 / m2 ** 5) * m2m4_cov
return
|
np.sqrt(m2_var)
|
numpy.sqrt
|
import cv2
import math
import numpy as np
from ImageProcessor import ImageProcessor,ImageLines
import Settings
import logger
import time
import copy
from keras.models import load_model
from skimage import measure
from skimage import morphology
from skimage.color import rgb2grey
import imutils
from TrafficSignType import detect_obstacle
from skimage import morphology
logger = logger.get_logger(__name__)
#lostLine_model = load_model('lostLine_v4.h5')
lostLine_model = load_model('lostLine_v5.h5')
def logit(msg):
pass
#if Settings.DEBUG:
# logger.info("%s" % msg)
# print(msg)
class GreyLines(object):
prev_obs_detect_time = -1
prev_obs_turn_start_pos = -1
@staticmethod
def drawline(greyimage):
cv2.line(greyimage, (0, 0), (100, 100), (255, 0, 0), 2)
@staticmethod
def PrintLines(lines, name, img):
if lines is not None:
for line in lines:
if line is not None:
for x1, y1, x2, y2 in line:
#if(y1 == y2 and (abs(x1-x2) > 5)):
# continue
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 1)
ImageProcessor.show_image(img, name)
@staticmethod
def DrawPoints(nparray, name):
img = np.zeros((132,320,1), np.uint8)
for xx in nparray:
x = xx[1]
y = xx[0]
cv2.circle(img,(x, y),1,(55,255,155),1)
ImageProcessor.show_image(img, name)
@staticmethod
def ArraySum(array):
arrsum = np.sum(array, 0)
#print "DstArray size: %d, arrsum: %s" % (array.size, arrsum)
if array.size >0 and array.shape[0] != 0:
lanepoint = arrsum/array.shape[0]
else:
lanepoint = [0, 0]
return [lanepoint[1], lanepoint[0]]
@staticmethod
def GetCoordinate(edge):
#np.set_printoptions(threshold='nan')
ans = np.zeros((edge.shape[1], edge.shape[0]), dtype=int)
for y in range(0, edge.shape[0]):
for x in range(0, edge.shape[1]):
if edge[y, x] != 0:
ans[x, y] = 1
#print "(%d, %d)" % (x, y)
print("numpy shape: %d %d"% (ans.shape[0], ans.shape[1]))
#print "ans: %s" % ans
return ans
@staticmethod
def GetZone(edgecoor, part = [0, 0, 0, 0]):
#np.set_printoptions(threshold='nan')
#print "part: %s, %s, %s, %s" % (part[0], part[1], part[2], part[3])
ybeg = edgecoor.shape[0]/part[0] if part[0] > 0 else 0
yend = edgecoor.shape[0]/part[1] if part[1] > 0 else edgecoor.shape[0]
xbeg = edgecoor.shape[1]/part[2] if part[2] > 0 else 0
xend = edgecoor.shape[1]/part[3] if part[3] > 0 else edgecoor.shape[1]
#print "ybeg: %d, yend: %d, xbeg: %d, xend: %d"% (ybeg, yend, xbeg, xend)
targetzone = edgecoor[ybeg:yend,xbeg:xend]
#print "targetzone: %s" % targetzone
return targetzone
@staticmethod
def EdgeMergeByZone(srcedge, objedge, zone = [0, 0, 0, 0]):
srcedgeslice = GreyLines.GetZone(srcedge, zone)
objectedgeslice = GreyLines.GetZone(objedge, zone)
rstedge = np.logical_xor(srcedgeslice, objectedgeslice)
#print "rstedge: %s " % rstedge
indexs = np.transpose(rstedge.nonzero())
#print "index of nonzero: %s" % indexs
return indexs
@staticmethod
def EdgeMergeByValidPixel(srcedge, objedge):
##np.set_printoptions(threshold='nan')
rstedge = np.logical_xor(srcedge, objedge)
#print "rstedge: %s " % rstedge
indexs = np.transpose(rstedge.nonzero())
#print "index of nonzero: %s" % indexs
return indexs[:indexs.shape[0], :]
@staticmethod
def Drawline(img, point):
image_height = img.shape[0]
image_width = img.shape[1]
#print "image: heigh: %d, width: %d" % (image_height, image_width)
cv2.line(img,(image_width/2, image_height),(point[0], point[1]),(255,255,0),3)
@staticmethod
def GetAngle(carx, cary, expectx, expecty):
#print carx, cary, expectx, expecty
myradians = math.atan2(expectx-carx, cary - expecty)
return math.degrees(myradians)
@staticmethod
def GetleftWallZoneCoor(rawarray, blackwallimg):
rows = np.where(rawarray[:, 0] > blackwallimg.shape[0] * Settings.eagle_wall_ylevel)
indexs1 = rawarray[rows]
rows = np.where(indexs1[:,1] < blackwallimg.shape[1] * Settings.eagle_wall_xlevel)
indexs = indexs1[rows]
firstclomn = indexs[:,1]
validcolumn = list(set(firstclomn))
yorglen, yselen = len(firstclomn), len(validcolumn)
wallpoint = [0, 0]
rstangle = 0
if yorglen < Settings.eagle_wall_throttle:
return wallpoint, rstangle
elif yorglen > Settings.eagle_wall_throttle:
#logit('GetleftWallZoneCoor xdensity (%s %s) ydensity(%s %s)' % (xorglen, xselen, yorglen, yselen))
if yselen > 0 and yorglen/yselen > Settings.eagle_wall_density:
return wallpoint, rstangle
else:
if Settings.DEBUG:
logit('GetleftWallZoneCoor indexs size: %s' % (indexs.size))
logit('ydensity(%s %s)' % (yorglen, yselen))
#logit('GetleftWallZoneCoor indexs :%s' % (indexs))
wallpoint = GreyLines.ArraySum(indexs)
rstangle = GreyLines.GetAngle(blackwallimg.shape[1]/2, blackwallimg.shape[0], wallpoint[0], wallpoint[1])
return wallpoint, rstangle
@staticmethod
def GetRightWallZoneCoor(rawarray, blackwallimg):
rows = np.where(rawarray[:, 0] > blackwallimg.shape[0] * Settings.eagle_wall_ylevel)
indexs1 = rawarray[rows]
rows = np.where( indexs1[:,1] > blackwallimg.shape[1] * (1-Settings.eagle_wall_xlevel))
indexs = indexs1[rows]
firstclomn = indexs[:,1]
validcolumn = list(set(firstclomn))
yorglen, yselen = len(firstclomn), len(validcolumn)
wallpoint = [0, 0]
rstangle = 0
if yorglen < Settings.eagle_wall_throttle:
return wallpoint, rstangle
elif yorglen > Settings.eagle_wall_throttle:
if yselen > 0 and yorglen/yselen > Settings.eagle_wall_density:
return wallpoint, rstangle
else:
if Settings.DEBUG:
logit('GetRightWallZoneCoor indexs size:%s' % (indexs.size))
logit('ydensity(%s %s)' % (yorglen, yselen))
#logit('GetleftWallZoneCoor indexs :%s' % (indexs))
wallpoint = GreyLines.ArraySum(indexs)
rstangle = GreyLines.GetAngle(blackwallimg.shape[1]/2, blackwallimg.shape[0], wallpoint[0], wallpoint[1])
return wallpoint, rstangle
@staticmethod
def GetWallAngle(Walls, blackwallimg):
##np.set_printoptions(threshold='nan')
indices = np.where( Walls != [0])
indexs = np.array (zip(indices[0], indices[1]))
leftlanepoint, leftangle = [0, 0], 0
rightlanepoint, rightangle = [0, 0], 0
if indexs.size == 0:
return leftlanepoint, leftangle, rightlanepoint, rightangle
leftlanepoint, leftangle = GreyLines.GetleftWallZoneCoor(indexs, blackwallimg)
#print "left lanepoint: %s, angle: %s" % (leftlanepoint, leftangle)
rightlanepoint, rightangle = GreyLines.GetRightWallZoneCoor(indexs, blackwallimg)
#print "right lanepoint: %s, angle: %s" % (rightlanepoint, rightangle)
#For to turn angle, wall on left, go to righ and vise versa
# if leftangle != 0 and rightangle != 0:
# angletotrun = lanesangle if lanesangle !=0 else last_lanes_angle
# wallpoint = [(rightlanepoint[0] + leftlanepoint[0])/2, (rightlanepoint[1] + leftlanepoint[1])/2]
# elif leftangle != 0 or rightangle != 0: # compare y coordinate
# if rightangle == 0 or (leftangle != 0 and leftlanepoint[1] < rightlanepoint[1]):
# angletotrun = 90 + leftangle
# wallpoint = leftlanepoint
# else:
# angletotrun = rightangle - 90
# wallpoint = rightlanepoint
if Settings.DEBUG_IMG:
if leftangle != 0:
GreyLines.Drawline(blackwallimg, leftlanepoint)
if rightangle != 0:
GreyLines.Drawline(blackwallimg, rightlanepoint)
#ImageProcessor.show_image(LanesCoor, "Lanes")
ImageProcessor.show_image(Walls, "Walls")
return leftlanepoint, leftangle, rightlanepoint, rightangle
#parts cooradinate ratio: [ybeg, yend, xbeg, xend]
@staticmethod
def GetLanesAngle(WallAndLanes, Walls, blackwallimg, direction = 0):
targetxy = GreyLines.EdgeMergeByValidPixel(WallAndLanes, Walls) # Remove wall edges
#targetzone = GreyLines.EdgeMergeByZone(WallAndLanes, Walls, Settings.birdviewpart)
rstangle, lanepoint = 0, [0, 0]
if targetxy.size == 0:
return rstangle, lanepoint
slicedarray = targetxy[:int(targetxy.shape[0]/3), :]
firstclomn = slicedarray[:,1]
validcolumn = list(set(firstclomn))
orglen, selen = len(firstclomn), len(validcolumn)
if orglen < Settings.eagle_lanes_throttle:
return rstangle, lanepoint
#print 'GetLanesAngle indexs size:%s,selen %s' % (orglen, selen)
validarray = slicedarray
sliceratio = 0
if direction == -1: #turn left
sliceratio = blackwallimg.shape[1] * (1-Settings.eagle_traffice_slice_ratio)
rows = np.where(slicedarray[:, 1] < sliceratio)
validarray = slicedarray[rows]
if direction == 1: #turn right
sliceratio = blackwallimg.shape[1] * Settings.eagle_traffice_slice_ratio
rows = np.where(slicedarray[:, 1] > sliceratio)
validarray = slicedarray[rows]
lanepoint = GreyLines.ArraySum(validarray) # Pickup specified zone to calc the center points
if Settings.DEBUG_IMG:
GreyLines.Drawline(blackwallimg,lanepoint)
#ImageProcessor.show_image(blackwallimg, "blackwallimg")
#ImageProcessor.show_image(LanesCoor, "Lanes")
#ImageProcessor.show_image(Walls, "Walls")
if lanepoint[0] == 0 and lanepoint[1] == 0:
if Settings.DEBUG:
logit("No lanes can be found")
logit("slice ratio: %s, turn %s" % (sliceratio, direction))
else:
rstangle = GreyLines.GetAngle(blackwallimg.shape[1]/2, blackwallimg.shape[0], lanepoint[0], lanepoint[1])
return rstangle, lanepoint,
@staticmethod
def GetEdgeImages(srcimg):
blackwallimg = ImageProcessor.preprocess(srcimg, 0.5) # Get cropped image and convert image wall to black
blackwallimg = cv2.medianBlur(blackwallimg, 3)
whilelanes = GreyLines.ChangeLaneToWhite(blackwallimg) # change all lanes to white
#ImageProcessor.show_image(whilelanes, "whilelanes")
Walls = GreyLines.TractGrey(whilelanes) # Get wall edges
WallAndLanes = GreyLines.TractGrey(blackwallimg) # Get lanes and wall edges
#cv2.imwrite("TestImage/WallAndLanes_"+str(time.time())+".jpg", WallAndLanes)
return WallAndLanes, Walls, blackwallimg
#GreyLines.DrawPoints(LanesCoor, "LanesCoorpoint")
#GreyLines.DrawPoints(WallAndLanesCoor, "WallAndLanesCoorpoint")
@staticmethod
def CalRoadPosition(srcimg):
road_image = GreyLines.GetRoadImages(srcimg)
road_image_temp = road_image.copy()
middle_position = road_image_temp.shape[1]/2
#print "middle_position", middle_position
row_position = []
has_check_road = False
for row in road_image_temp[::-1]:
if len(row[row>100]) > (len(row) -2):
row[row>100] = 50
else:
position = []
if has_check_road and len(row[row<10]) == len(row) and len(position)>0:
if row_position[-1] > middle_position:
row_position.append(len(row))
else:
row_position.append(0)
continue
has_check_road = True
for col_n, pixel in enumerate(row):
if pixel > 100:
position.append(col_n)
if len(position) > 0:
row_position.append(np.array(position).mean())
ImageProcessor.show_image(road_image_temp, "road_image_temp")
car_pos = np.array(row_position).mean()
print("row_position",car_pos)
if len(row_position) == 0:
return 160
return car_pos
@staticmethod
def CalRoadNumber(srcimg):
blackwallimg = ImageProcessor.preprocess(srcimg, 0.5) # Get cropped image and convert image wall to black
blackwallimg = cv2.medianBlur(blackwallimg, 5)
ImageProcessor.show_image(blackwallimg, "blackwallimg")
line_number = 0
line_color = []
for row_pixel in blackwallimg[::-1]:
if row_pixel[0][0]<10 and row_pixel[0][1]<10 and row_pixel[0][2]<10 and\
row_pixel[1][0]<10 and row_pixel[1][1]<10 and row_pixel[1][2]<10 and\
row_pixel[-1][0]<10 and row_pixel[-1][1]<10 and row_pixel[-1][2]<10 and\
row_pixel[-2][0]<10 and row_pixel[-2][1]<10 and row_pixel[-2][2]<10:
#Both side is black
for pixel in row_pixel[2:(len(row_pixel)-2)]:
r = pixel[0]
g = pixel[1]
b = pixel[2]
if r< 10 and g<10 and b<10:
continue
if len(line_color) == 0:
if r > 10 and g<10 and b< 10:
line_color.append("red")
elif r < 10 and g>10 and b< 10:
line_color.append("green")
elif r < 10 and g<10 and b> 10:
line_color.append("blue")
else:
print("---------------Color error in road line")
elif r > 10 and g<10 and b< 10 and line_color[-1] != "red":
#red line
line_color.append("red")
elif r < 10 and g>10 and b< 10 and line_color[-1] != "green":
#green line
line_color.append("green")
elif r < 10 and g<10 and b>10 and line_color[-1] != "blue":
line_color.append("blue")
break
print(line_color)
if len(line_color) <2:
line_number = 0
elif len(line_color)<4:
line_number = 3
else:
line_number = 6
print("line number ", len(line_color))
return len(line_color)
@staticmethod
def has_two_line(line_data):
last_pos = line_data[0]
for line_pos in line_data:
if (line_pos - last_pos) >2:
return True
last_pos = line_pos
return False
@staticmethod
def middle_line_pos(line_data):
last_pos = line_data[0]
for line_pos in line_data:
if (line_pos - last_pos) >2:
return last_pos,(line_pos+last_pos)/2,line_pos
last_pos = line_pos
return -1
@staticmethod
def GetTwoLinePos(line_image, line_number):
row_line = line_image[line_number]
line_pos = np.where(row_line==255)
print("STart to get two lone pos",time.time())
double_line_pos = -1
line_list = []
if len(line_pos[0]) == 0:
#No line
left_point = -1
right_point = -1
for lower_number in range(100):
if line_number+lower_number+1 >= 119:
break
lower_row_line = line_image[line_number+lower_number+1]
if lower_row_line[0] == 255 and left_point == -1:
left_point = line_number+lower_number+1
if lower_row_line[319] == 255 and right_point == -1:
right_point = line_number+lower_number+1
if left_point != -1 and right_point!= -1:
print("left_point,right_point",left_point,right_point)
if left_point > right_point:
double_line_pos = 320
else:
double_line_pos = 0
break
if double_line_pos == -1:
for lower_number in range(50):
#print "line_number+lower_number",line_number+lower_number+1
if line_number+lower_number >= 119:
break
lower_row_line = line_image[line_number+lower_number+1]
lower_line_pos = np.where(lower_row_line==255)
if len(lower_line_pos[0]) > 2:
below_lower_row_line = line_image[line_number+lower_number+2]
below_lower_line_pos = np.where(below_lower_row_line==255)
if lower_line_pos[0].mean() > below_lower_line_pos[0].mean():
#turn Right
double_line_pos = line_image.shape[1]
else:
double_line_pos = 0
break
elif len(line_pos[0]) != 0 and GreyLines.has_two_line(line_pos[0]):
#Has two line
print("Has two line")
left_pos, double_line_pos, right_pos = GreyLines.middle_line_pos(line_pos[0])
else:
double_line_row = -1
need_to_check_upper = True
need_to_check_lower = True
for i in range(120):
upper_line = line_number-i-1
lower_line = line_number+i+1
if upper_line >= 0 and need_to_check_upper:
upper_row_line = line_image[upper_line]
upper_line_pos = np.where(upper_row_line==255)
if len(upper_line_pos[0]) == 0:
need_to_check_upper = False
elif GreyLines.has_two_line(upper_line_pos[0]):
print("Find two line row,upper_line",upper_line)
double_line_row = upper_line
last_left_pos, last_middle_pos, last_right_pos = GreyLines.middle_line_pos(upper_line_pos[0])
line_list.append(last_middle_pos)
for j in range(line_number - upper_line):
current_line = line_image[upper_line+j+1]
current_line_pos = np.where(current_line==255)
if len(current_line_pos[0]) == 0:
current_left_pos, current_middle_pos, current_right_pos = last_left_pos, last_middle_pos, last_right_pos
line_list.append(current_middle_pos)
elif GreyLines.has_two_line(current_line_pos[0]):
current_left_pos, current_middle_pos, current_right_pos = GreyLines.middle_line_pos(current_line_pos[0])
line_list.append(current_middle_pos)
else:
#Has one line, make offset by last middle line
if abs(current_line_pos[0][0] - last_left_pos) <5:
#The only line is left line
current_left_pos = current_line_pos[0][0]
current_middle_pos = last_middle_pos + (current_left_pos - last_left_pos)
line_list.append(current_middle_pos)
else:
#The only line is right line
current_right_pos = current_line_pos[-1][0]
current_middle_pos = last_middle_pos + (current_right_pos - last_right_pos)
line_list.append(current_middle_pos)
break
if lower_line < 120 and need_to_check_lower:
lower_row_line = line_image[lower_line]
lower_line_pos = np.where(lower_row_line==255)
if len(lower_line_pos[0]) == 0:
need_to_check_lower = False
elif GreyLines.has_two_line(lower_line_pos[0]):
print("Find two line row,lower_line",lower_line)
double_line_row = lower_line
last_left_pos, last_middle_pos, last_right_pos = GreyLines.middle_line_pos(lower_line_pos[0])
line_list.append(last_middle_pos)
for j in range(lower_line - line_number):
print("CurrentLineNumber",lower_line-j-1)
current_line = line_image[lower_line-j-1]
current_line_pos = np.where(current_line==255)
if len(current_line_pos[0]) == 0:
print("Not found any line")
current_left_pos, current_middle_pos, current_right_pos = last_left_pos, last_middle_pos, last_right_pos
line_list.append(current_middle_pos)
elif GreyLines.has_two_line(current_line_pos[0]):
print("Still has two line")
current_left_pos, current_middle_pos, current_right_pos = GreyLines.middle_line_pos(current_line_pos[0])
line_list.append(current_middle_pos)
else:
print("Has one line",current_line_pos[0][0],last_left_pos,last_right_pos)
#Has one line, make offset by last middle line
if abs(current_line_pos[0][0] - last_left_pos) - abs(current_line_pos[0][0] - last_right_pos) <0:
current_right_pos = line_image.shape[1]
current_left_pos = current_line_pos[0][-1]
current_middle_pos = last_middle_pos + (current_left_pos - last_left_pos)
line_list.append(current_middle_pos)
else:
current_left_pos = 0
current_right_pos = current_line_pos[-1][0]
current_middle_pos = last_middle_pos + (current_right_pos - last_right_pos)
line_list.append(current_middle_pos)
#print("current_left_pos, current_middle_pos, current_right_pos",current_left_pos, current_middle_pos, current_right_pos)
if current_middle_pos < 0:
current_middle_pos = 0
break
elif current_middle_pos >= line_image.shape[1]:
current_middle_pos = line_image.shape[1]-1
break
last_left_pos, last_middle_pos, last_right_pos = current_left_pos, current_middle_pos, current_right_pos
break
if double_line_row != -1:
double_line_pos = line_list[-1]
else:
#not find two line in whole image
#double_line_pos = -1
mean_pos = line_pos[0].mean()
lower_line = line_number+1
upper_line = line_number-1
lower_row_line = line_image[lower_line]
upper_row_line = line_image[upper_line]
lower_line_pos = np.where(lower_row_line==255)
upper_line_pos = np.where(upper_row_line==255)
if len(lower_line_pos[0])!=0:
lower_mean_pos = lower_line_pos[0].mean()
if lower_mean_pos>mean_pos:
#turn left
double_line_pos = line_pos[0][0] - 160
else:
double_line_pos = line_pos[0][0] + 160
elif len(upper_line_pos[0])!=0:
upper_mean_pos = upper_line_pos[0].mean()
if upper_mean_pos>mean_pos:
#turn right
double_line_pos = line_pos[0][0] + 160
else:
double_line_pos = line_pos[0][0] - 160
else:
double_line_pos = -1
return double_line_pos
@staticmethod
def SeeSixRoad(srcimg):
blackwallimg = ImageProcessor.preprocess(srcimg, 0.5) # Get cropped image and convert image wall to black
blackwallimg = cv2.medianBlur(blackwallimg, 5)
b_image, g_image, r_image = cv2.split(blackwallimg)
kernel = np.ones((3,3),np.uint8)
r_image_dilate = cv2.dilate(r_image, kernel, iterations =1)
g_image_dilate = cv2.dilate(g_image, kernel, iterations =1)
line_image = cv2.bitwise_and(r_image_dilate,g_image_dilate)
row_line = line_image[15]
line_pos = np.where(row_line==255)
if len(line_pos[0])>10:
return True
else:
return False
@staticmethod
def CheckWrongDirectionByColor(srcimg, last_pos_list):
#return True means wrong direction
last_pos = np.array(last_pos_list).mean()
if abs(last_pos-160) < 10:
#Stright road, check if wrong way
blackwallimg = ImageProcessor.preprocess(srcimg, 0.5)
blackwallimg = cv2.medianBlur(blackwallimg, 5)
b_image, g_image, r_image = cv2.split(blackwallimg)
r_image_pos = np.where(r_image==255)
g_image_pos = np.where(g_image==255)
if len(r_image_pos[0]) > 50 and len(g_image_pos[0])>50:
if r_image_pos[1].mean() > g_image_pos[1].mean():
return True
else:
return False
return False
@staticmethod
def GetPosByRowNumber(blackwallimg, row_number,last_pos):
b,g,r = cv2.split(blackwallimg)
kernel = np.ones((1,5),np.uint8)
r_dilated = cv2.dilate(r, kernel,iterations =1)
r_close = cv2.erode(r_dilated,kernel,iterations =1)
r_close[0:row_number] = np.zeros((row_number,320))
label_r = measure.label(r_close)
area_limit = 2
label_value_list = []
bigger_limit = 8000
if last_pos < 1:
last_pos = 180
elif last_pos > 319:
last_pos = 140
label_props = measure.regionprops(label_r)
for label_prop in label_props:
print("Label",label_prop.label,"Area",label_prop.area)
if label_prop.area < area_limit:
label_r[label_r == label_prop.label] = 0
elif label_prop.area > bigger_limit:
label_r[label_r == label_prop.label] = 0
else:
label_value_list.append(label_prop.label)
# for l in range(100):
# label_pos = np.where(label_r == l+1)
# print("l+1",l+1,"len",len(label_pos[0]))
# if len(label_pos[0]) == 0:
# break
# if len(label_pos[0]) < area_limit:
# label_r[label_r == l+1] = 0
# elif len(label_pos[0]) > bigger_limit:
# label_r[label_r == l+1] = 0
# else:
# label_value_list.append(l+1)
row_image = label_r[row_number]
conditate_area = []
canditate_pos = []
for l in label_value_list:
l_pos = np.where(row_image == l)
print(l, len(l_pos[0]),"max width",20+row_number*3)
if len(l_pos[0]) > 2 and len(l_pos[0]) < 20+row_number*3:
if l_pos[0][0] == 0 or l_pos[0][-1] == 319:
continue
label_pos = np.where( label_r == l )
label_area = len(label_pos[0])
if label_area > 100:
isMiddleLine = GreyLines.CheckLineWidth(label_r,l)
if isMiddleLine:
row_pos = l_pos[0].mean()
canditate_pos.append(row_pos)
else:
row_pos = l_pos[0].mean()
canditate_pos.append(row_pos)
print("canditate_pos",canditate_pos)
if len(canditate_pos) == 0:
return -1
elif len(canditate_pos) == 1:
return canditate_pos[0]
else:
# dist_last_pos = []
# for c_pos in canditate_pos:
# dist_last_pos.append(abs(c_pos-last_pos))
# candidate_idx = np.argmin(dist_last_pos)
# return canditate_pos[candidate_idx]
return np.array(canditate_pos).mean()
@staticmethod
def CheckLineWidth(label_image,label):
label_pos = np.where( label_image == label )
print("Cal label",label,time.time())
if len(label_pos[0]) == 0:
return 0
label_area = len(label_pos[0])
labelMask = np.zeros(label_image.shape, dtype="uint8")
labelMask[label_image == label] = 255
cnts = cv2.findContours(labelMask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
candidatecount = len(cnts[0])
width = label_area / candidatecount
label_mean_y = label_pos[0].mean()
print("label_width",width,label_mean_y,time.time())
if label_mean_y < 10:
if width < 2:
return True
else:
return False
elif label_mean_y < 11:
if width < 4:
return True
else:
return False
elif label_mean_y < 13:
if width < 7:
return True
else:
return False
elif label_mean_y < 15:
if width < 8:
return True
else:
return False
elif label_mean_y < 20:
if width < 11:
return True
else:
return False
elif label_mean_y < 25:
if width < 15:
return True
else:
return False
elif label_mean_y < 30:
if width < 19:
return True
else:
return False
elif label_mean_y < 40:
if width < 23:
return True
else:
return False
elif label_mean_y < 50:
if width < 30:
return True
else:
return False
elif label_mean_y < 60:
if width < 50:
return True
else:
return False
elif label_mean_y < 70:
if width < 80 and width > 4:
return True
else:
return False
elif label_mean_y < 85:
if width < 160 and width > 5:
return True
else:
return False
else:
return False
@staticmethod
def findLineEnd(a, start_pos=0, noise_num=2):
for i in range(start_pos, len(a) - 1):
#print("{} delta:{}".format(i, a[i+1]-a[i]))
if (a[i+1] - a[i]) <= noise_num:
continue
else:
return i
return len(a) - 1
@staticmethod
def findLine(a, start_pos=0, min_line_pixels=3):
bFound = False
end_pos = GreyLines.findLineEnd(a, start_pos)
#print("findlineend:{}".format(end_pos))
while (end_pos - start_pos + 1 <= min_line_pixels and end_pos < len(a) - 1):
start_pos = end_pos + 1
end_pos = GreyLines.findLineEnd(a, start_pos)
if (end_pos - start_pos + 1 <= min_line_pixels):
continue
if end_pos - start_pos + 1 > min_line_pixels:
bFound = True
return (start_pos, end_pos, bFound)
@staticmethod
def checkObstacle(raw_img, start_col=5, end_col=315, obs_min_width=10, obs_max_width=150):
gray_image = rgb2grey(raw_img)
#print(gray_image)
#ShowGrayImage(gray_image)
#print(gray_image)
black_img = gray_image < 0.05
#print(b)
#ShowGrayImage(black_img)
#label_b = measure.label(black_img)
revised_black_img = morphology.remove_small_holes(black_img, 16)
#ShowGrayImage(revised_black_img)
#skeleton = morphology.skeletonize(black_img)
#ShowGrayImage(skeleton)
check_img = revised_black_img
#revised_label_b = morphology.remove_small_holes(label_b, 9)
#ShowGrayImage(revised_label_b)
check_start_row = 100
check_end_row = 150
obs_lefts = []
if (raw_img.shape[0] < 150):
check_start_row = 10
check_end_row = 50
for x in range(check_start_row, check_end_row):
black_arrays = np.where(check_img[x,start_col:end_col] == 0)
curr_gray_row = np.asarray(gray_image[x,start_col:end_col])
# red pixels: 0.2 ~ 0.3
# blue pixels: 0.5 ~ 0.8
# black pixels: < 0.05
blue_pixels = ( (curr_gray_row > 0.5) & (curr_gray_row < 0.8) ).sum()
#print("col:{}, black_arrays:{}, blue_arrays:{}".format(x, len(black_arrays), blue_pixels ) )
if len(black_arrays[0]) > 10 and blue_pixels < 5:
check_lines = black_arrays[0]
obs_left = -1
obs_right = -1
# print("rows:{}".format(x))
# print(check_lines)
start_pos = 0
first_start_pos, first_end_pos, bFound = GreyLines.findLine(check_lines, start_pos)
#print("first_start:{}, first_end:{}".format(first_start_pos, first_end_pos))
if bFound:
second_start_pos, second_end_pos, bFound = GreyLines.findLine(check_lines, first_end_pos + 1)
#print("second_start:{}, second_end:{}".format(second_start_pos, second_end_pos))
if bFound:
third_start_pos, third_end_pos, bFound = GreyLines.findLine(check_lines, second_end_pos + 1)
#print("third_start:{}, third_end:{}".format(third_start_pos, third_end_pos))
if bFound:
obs_left = check_lines[second_start_pos]
obs_right = check_lines[second_end_pos]
if (obs_right - obs_left >= obs_min_width and obs_right - obs_left <= obs_max_width):
#print("col: {}, left:{}, right:{}".format(x, obs_left, obs_right))
obs_lefts.append(obs_left)
#print(gray_image[x,obs_left:obs_right])
#print(check_lines)
#print(gray_image[x,:])
#print("obs:{}".format(len(obs_lefts)))
if len(obs_lefts) >= 3:
return np.median(obs_lefts), True
else:
return -1, False
@staticmethod
def black_rgb(img):
b,g,r = cv2.split(img)
black_filter = ((r < 60) & (g <60) & (b < 60))
r[black_filter], r[np.invert(black_filter)], g[black_filter],g[np.invert(black_filter)], b[black_filter],b[np.invert(black_filter)] = 255,0,255,0,255,0
flattened = cv2.merge((r, g, b))
flattened_gray = cv2.cvtColor(flattened,cv2.COLOR_BGR2GRAY)
return flattened_gray
@staticmethod
def CheckSide(black_gray):
black_sum = black_gray.sum(axis=0).astype(int)
black_sum_diff = abs(np.diff(black_sum))
black_sum_diff[black_sum_diff<1000] = 0
black_sum_pos = np.where(black_sum_diff > 0)
#print(len(black_sum_diff),black_sum_diff)
# middle_pos = -1
# if len(black_sum_pos[0]) == 0:
# return middle_pos
left_sum = black_sum[0:160].sum()
right_sum = black_sum[160:320].sum()
diff = abs(left_sum - right_sum)
print("diff:".format(diff))
if left_sum > right_sum:
return 300, diff
else:
return 40, diff
@staticmethod
def GetTwoLinePosEx(lineImage,traceRow):
lineRow = lineImage[traceRow]
lineRowPos = np.where(lineRow == 255)
if len(lineRowPos[0]) == 0:
return -1
else:
return lineRowPos[0].mean()
@staticmethod
def GetOptimizedPos(srcimg, pre_pos,middle_number,trace_road):
black_src = GreyLines.black_rgb(srcimg)
print("Optimize pre_pos",pre_pos)
op_pos = pre_pos
# black_temp = None
# if pre_pos < 160:
# black_temp = black_src[int(2*middle_number):,:160]
# else:
# black_temp = black_src[int(2*middle_number):,160:]
if trace_road == 5:
offset_flag = 2
black_temp = black_src[int(2*middle_number):]
else:
offset_flag = 2
black_temp = black_src[int(3*middle_number):]
black_temp = black_src[int(2*middle_number):]
black_temp_sum = black_temp.sum(axis=0).astype(int)
#print("black_temp_sum",black_temp_sum)
black_sum = black_temp_sum[black_temp_sum>500]
#cv2.imwrite("../TestImage5/srcimg_"+str(time.time())+"_black_src_"+".bmp", black_src)
#cv2.imwrite("../TestImage5/srcimg_"+str(time.time())+"_black_tmp_"+".bmp", black_temp)
# if trace_road == 5:
# offset_flag = 2
# else:
# offset_flag = 1
if len(black_sum) > 0:
black_temp_sum_pos = np.where(black_temp_sum>500)
black_mean = black_temp_sum_pos[0].mean()#2*black_sum.mean()/255
print("black_mean",black_mean)
if black_mean < 160:
op_pos = pre_pos + offset_flag*black_mean
if op_pos < (320 + offset_flag*black_mean)/2:
op_pos = (320 + offset_flag*black_mean)/2
else:
op_pos = pre_pos - offset_flag*(320-black_mean)
if op_pos > (320 - offset_flag*(320-black_mean))/2:
op_pos = (320 - offset_flag*(320-black_mean))/2
if op_pos < 1 :
op_pos = 1
elif op_pos > 319:
op_pos = 319
return op_pos
@staticmethod
def GetWhiteRoadPos(srcimg):
blackimg = GreyLines.black_rgb(srcimg)
# for row in blackimg[:10]:
# black_len = len(row[row==255])
# if len(row) == black_len:
# return -1
blackSum = blackimg.sum(axis=0).astype(int)
#print(len(blackSum),blackSum)
blackSum[blackSum > 600] = 0
blackSum[blackSum>0] = 255
#blackSum = 255 - blackSum
#print("====",len(blackSum),blackSum)
#blackimg = blackimg[0]
kernel = np.ones((1,5),np.uint8)
#blackimg_dilated = cv2.dilate(blackSum, kernel,iterations =1)
blackimg_close = blackSum#cv2.erode(blackimg_dilated,kernel,iterations =1)
# blackimg_close[blackimg_close>50] = 255
# blackimg_close[blackimg_close<=50] = 0
# blackimg_close = 255 - blackimg_close
#
blackimg_label = measure.label(blackimg_close)
label_list = []
label_area_list = []
label_pos_list = []
for l in range(100):
l_pos = np.where(blackimg_label == l+1)
if len(l_pos[0]) == 0:
break
if l_pos[0][0] == 0 or l_pos[0][-1] == 319:
continue
label_list.append(l+1)
label_area_list.append(len(l_pos[0]))
label_pos_list.append(l_pos[0].mean())
if len(label_list) == 0:
return -1
elif len(label_list) == 1:
return label_pos_list[0]
else:
l_idx = np.argmax(label_area_list)
return label_pos_list[l_idx]
# label_props = measure.regionprops(blackimg_label)
# if len(label_props) == 0:
# return -1
# elif len(label_props) == 1:
# return label_props[0].centroid[1]
# else:
# max_area = 0
# max_area_label = -1
# for label_prop in label_props:
# if label_prop.area > max_area:
# max_area = label_prop.area
# max_area_label = label_prop.label
# return label_props[max_area_label].centroid[1]
@staticmethod
def GetCarPos(srcimg, last_pos_list, trace_road, current_speed, last_idx, isChangingRoad,changelane_direction):
#cv2.imwrite("TestImage/srcimg_"+str(time.time())+".bmp", srcimg[120:])
blackwallimg = ImageProcessor._flatten_rgb_old(srcimg[120:])
#cv2.imwrite("TestImage/blackwallimg_"+str(time.time())+".bmp", blackwallimg)
middle_position =160
last_pos = np.array(last_pos_list).mean()
if trace_road == 5:
car_left_pos, car_right_pos, car_top_pos, car_bottom_pos, bFoundCar = GreyLines.checkObstacle_v2(srcimg)
if bFoundCar:
#print("last_pos:{}".format(last_pos_list[last_idx]))
prev_turn_pos = last_pos_list[last_idx]
car_mid_pos = car_left_pos + (car_right_pos - car_left_pos)/2
flattened_gray = GreyLines.black_rgb(srcimg[120:])
pos, diff = GreyLines.CheckSide(flattened_gray)
curr_time = time.time()
time_str = str(curr_time)
if GreyLines.prev_obs_detect_time > 0:
time_diff = (curr_time - GreyLines.prev_obs_detect_time) * 1000
turn_time = 400
if time_diff < turn_time: #500 ms to turn around the car
curr_pos = round( GreyLines.prev_obs_turn_start_pos + (160 - GreyLines.prev_obs_turn_start_pos) * time_diff / turn_time, 1)
#print("car_pos:{}-{}, prev_turn:{}, curr_turn_pos:{}, diff:{}".format(car_left_pos, car_right_pos, last_pos_list[last_idx], curr_pos, diff))
logger.info("car_pos:{}-{}, prev_turn:{}, curr_turn_pos:{}, diff:{}".format(car_left_pos, car_right_pos, prev_turn_pos, curr_pos, diff))
cv2.imwrite("/log/TestImage/Car/srcimg_"+time_str+"_"+str(car_left_pos)+"-"+str(car_right_pos) +"_"+ str(last_pos_list[last_idx]) + "_" + str(curr_pos)+"_"+ str(diff) + ".bmp", srcimg)
return curr_pos, bFoundCar,False,0
GreyLines.prev_obs_detect_time = curr_time
obs_width = car_right_pos - car_left_pos
if obs_width in range (100, 150):
gear_gap = 40
new_turn_pos = prev_turn_pos
if prev_turn_pos < car_mid_pos:
pos_diff = abs(car_mid_pos - prev_turn_pos)
if pos_diff >= 120:
new_turn_pos = prev_turn_pos # no change
else:
if car_mid_pos < 120 + 40:
new_turn_pos = 320
else:
if pos_diff < 40:
new_turn_pos = prev_turn_pos - 100
else:
new_turn_pos = prev_turn_pos - 40
if new_turn_pos < 0:
new_turn_pos = 0
else:
pos_diff = abs(prev_turn_pos - car_mid_pos)
if pos_diff >= 120:
new_turn_pos = prev_turn_pos # no change
else:
if car_mid_pos > 200 + 40:
new_turn_pos = 0
else:
if pos_diff < 40:
new_turn_pos = prev_turn_pos + 100
else:
new_turn_pos = prev_turn_pos + 40
if new_turn_pos > 320:
new_turn_pos = 320
cv2.imwrite("/log/TestImage/Car/srcimg_"+time_str+"_"+str(car_left_pos)+"-"+str(car_right_pos) +"_"+ str(prev_turn_pos) + "_" + str(new_turn_pos)+"_"+ str(diff) + ".bmp", srcimg)
#print("car_pos:{}-{}, prev_turn:{}, turn_pos:{}, diff:{}".format(car_left_pos, car_right_pos, prev_turn_pos, new_turn_pos, diff))
logger.info("=> car_pos:{}-{}, prev_turn:{} turn_pos:{}, diff:{}".format(car_left_pos, car_right_pos,prev_turn_pos , new_turn_pos, diff))
GreyLines.prev_obs_turn_start_pos = new_turn_pos
return new_turn_pos, bFoundCar,False,0
middle_trace_number = 9
if current_speed <0.5:
middle_trace_number = middle_trace_number+10
elif current_speed <1.0:
middle_trace_number = middle_trace_number+5
elif current_speed <1.4:
middle_trace_number = middle_trace_number+4
elif current_speed <1.5:
middle_trace_number = middle_trace_number+2
elif current_speed <1.6:
middle_trace_number = middle_trace_number+1
elif current_speed <1.8:
middle_trace_number = middle_trace_number
elif current_speed <1.9:
middle_trace_number = middle_trace_number
elif current_speed <1.98:
middle_trace_number = middle_trace_number
pos = GreyLines.GetPosByRowNumber(blackwallimg,middle_trace_number,last_pos_list[last_idx])
if pos != -1:
pos = GreyLines.GetOptimizedPos(srcimg[120:],pos,middle_trace_number,trace_road)
print("Optimized pos",pos)
if pos != -1:
#srcimg[120:][middle_trace_number] = srcimg[120:][middle_trace_number] + 20
#cv2.imwrite("TestImage1/srcimg_"+str(time.time())+"_"+str(pos)+".bmp", srcimg[120:])
# if pos < 60:
# pos = GreyLines.GetPosByRowNumber(blackwallimg,middle_trace_number+8,last_pos_list[last_idx])
#if current_speed > 0.1:
#cv2.imwrite("/Volumes/jeffrey/TestImage5/srcimg_"+str(time.time())+"_"+str(int(pos))+".bmp", srcimg)
#print time.time(), "Store Image"
#cv2.line(srcimg,(0,130),(319,130),(255,0,0))
#cv2.imwrite("../TestImage5/srcimg_"+str(time.time())+"_"+str(int(pos))+".bmp", srcimg)
return pos, False,False,0
else:
# if last_pos > 160:
# cv2.imwrite("TestImage2/2/srcimg_"+str(time.time())+"_"+str(320)+".bmp", srcimg[120:])
# return 320, False
# else:
# cv2.imwrite("TestImage2/1/srcimg_"+str(time.time())+"_"+str(0)+".bmp", srcimg[120:])
# return 0, False
# whitePos = GreyLines.GetWhiteRoadPos(srcimg[120:])
# if whitePos != -1:
# cv2.imwrite("../TestImage6/srcimg_"+str(time.time())+"_"+str(whitePos)+".bmp", srcimg[120:])
# whitePos = GreyLines.GetOptimizedPos(srcimg[120:],whitePos,9,trace_road)
# return whitePos, False,False,0
input_image = cv2.resize(srcimg[120:],(80,30),interpolation=cv2.INTER_CUBIC)
input_data = input_image[np.newaxis,:,:,:]
direction_prob = lostLine_model.predict(input_data)
direction = np.argmax(direction_prob)
logit("direction_prob"+str(direction_prob))
if direction == 0:
#cv2.imwrite("lostLine/1/blackwallimg_"+str(time.time())+"_"+str(-1)+"_"+str(0)+".bmp", blackwallimg)
# if current_speed >0.5:
# cv2.imwrite("../TestImage4/1/srcimg_"+str(time.time())+".bmp", srcimg[120:])
# pos = GreyLines.GetPosByRowNumber(blackwallimg,middle_trace_number+8,last_pos_list[last_idx])
# if pos == -1:
# return 0,False
# else:
# return pos, False
if current_speed > 0.5:
# cv2.imwrite("/Volumes/jeffrey/TestImage5/srcimg_"+str(time.time())+"_"+str(int(pos))+".bmp", srcimg)
# print time.time(), "Store Image model 0"
# cv2.line(srcimg,(0,130),(319,130),(255,0,0))
cv2.imwrite("../TestImage44/1/srcimg_"+str(time.time())+"_"+str(int(0))+".bmp", srcimg[120:])
return 0,False,False,0
else:
#cv2.imwrite("lostLine/2/blackwallimg_"+str(time.time())+"_"+str(-1)+"_"+str(320)+".bmp", blackwallimg)
# if current_speed >0.5:
# cv2.imwrite("../TestImage4/2/srcimg_"+str(time.time())+".bmp", srcimg[120:])
if current_speed > 0.5:
# cv2.imwrite("/Volumes/jeffrey/TestImage5/srcimg_"+str(time.time())+"_"+str(int(pos))+".bmp", srcimg)
# print time.time(), "Store Image model 320"
# cv2.line(srcimg,(0,130),(319,130),(255,0,0))
# cv2.imwrite("../TestImage5/srcimg_"+str(time.time())+"_"+str(int(320))+".bmp", srcimg)
cv2.imwrite("../TestImage44/2/srcimg_"+str(time.time())+"_"+str(int(0))+".bmp", srcimg[120:])
return 320,False,False,0
else:
trace_row = 14
black_row = 18
if current_speed <0.5:
trace_row = 60
elif current_speed <1.0:
trace_row = trace_row+15
elif current_speed <1.4:
trace_row = trace_row+5
elif current_speed <1.5:
trace_row = trace_row+4
elif current_speed <1.6:
trace_row = trace_row+3
elif current_speed <1.8:
trace_row = trace_row+2
elif current_speed <1.9:
trace_row = trace_row+1
elif current_speed <1.98:
trace_row = trace_row
# if current_speed <0.5:
# trace_row = 60
# elif current_speed <1.0:
# trace_row = 30
# elif current_speed <1.2:
# trace_row = 15
# elif current_speed <1.4:
# trace_row = 14
# elif current_speed <1.6:
# trace_row = 14
# elif current_speed <1.8:
# trace_row = 14
# elif current_speed <1.9:
# trace_row = 15
# elif current_speed <1.98:
# trace_row = 14
blackwallimg = ImageProcessor.preprocess(srcimg[120:], 0.5) # Get cropped image and convert image wall to black
#blackwallimg = cv2.medianBlur(blackwallimg, 5)
b_image, g_image, r_image = cv2.split(blackwallimg)
line_image,isSeeSixRoad,label_number = GreyLines.GetTwoLineImage(srcimg[120:],isChangingRoad,changelane_direction)
row_line = line_image[trace_row]
row_line_pos =
|
np.where(row_line==255)
|
numpy.where
|
import numpy as np
import geometry as geom
from numpy import linalg as la
def calculate_form_value(M, b, vec):
# Подсчет значения формы M(x,x) - b(x) на векторе vec
res = np.dot(np.transpose(vec),
np.dot(M, vec)) + np.dot(np.transpose(b), vec)
try :
return res[0, 0]
except IndexError:
return res
def solve_1dim(M, b, cond, cond_const, cond_eq, cond_eq_const):
eq_index = 0
free_index = 1
M_diff = 2 * M
b_diff = -b
if cond_eq[0] == 0:
eq_index = 1
free_index = 0
M_diff[eq_index] = cond_eq
b[eq_index] = cond_eq_const
force_vert_check = False
if la.det(M_diff) == 0: # На прямой расстояние до оси может не меняться
# В этом случае минимум - форма на какой-нибудь точке отрезка
force_vert_check = True # Например, на вершине
else:
linear_global_min = la.solve(M_diff, b)
# Ищем минимум на прямой, содержащей ребро
conditions_check = np.matmul(cond, linear_global_min)
inside = np.all(
|
np.less_equal(conditions_check, cond_const)
|
numpy.less_equal
|
import numpy as np
import matplotlib.pyplot as plt
import pydrake.solvers.mathematicalprogram as mp
from pydrake.solvers.gurobi import GurobiSolver
solver = GurobiSolver()
#%%
'''
1D point mass collides with a spring-damper system. No gravity.
Even without damping (d=0), damping is provided as a side effect of
implicit-Euler integration, which seems to grow with the time step h. This
damping seems substantial when h=0.1.
'''
m = 1.0 # kg
v0 = -0.1 # m/s
k = 100. # N/m
h = 0.1 # s, time step.
d = 1 # damping
q_a_cmd_next = 0. # holding spring at origin.
def dynamics(q_u, q_a, v_u):
prog = mp.MathematicalProgram()
v_next = prog.NewContinuousVariables(2, "v")
# v_u_next = v_next[0]
# v_a_next = v_next[1]
dq_a_cmd = q_a_cmd_next - q_a
tau_h = np.array([m * v_u, k * dq_a_cmd * h])
Q =
|
np.diag([m, k * h**2 + d * h])
|
numpy.diag
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
from .quantizer import Quantizer
class BinaryQuantizer(Quantizer):
def __init__(self) -> None:
super().__init__(1)
class binary_mean_scaling_quantizer(BinaryQuantizer):
def __init__(self):
super().__init__()
def post(self, x: np.ndarray) -> np.ndarray:
return np.mean(np.absolute(x)).astype(np.float32)
def pre(self, x: np.ndarray) -> np.ndarray:
y = np.sign(x)
y[y < 0] = 0
return y.astype(np.int32)
class binary_channel_wise_mean_scaling_quantizer(BinaryQuantizer):
def __init__(self):
super().__init__()
def post(self, x: np.ndarray) -> np.ndarray:
return np.mean(np.absolute(x), axis=(1, 2, 3)).astype(np.float32)
def pre(self, x: np.ndarray) -> np.ndarray:
y =
|
np.sign(x)
|
numpy.sign
|
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from CGMSData import CGMSData
from mixup import MixUp
class CGMSDataSeg(CGMSData):
"""Data set"""
def __init__(self, fmt, filepath, sampling_interval):
super().__init__(fmt, filepath, sampling_interval)
self._feature = None
self._hypo_th = 80
self._border_th = 10
self._hypo_train_x = None
self._hypo_train_y = None
self.border_train_x = None
self.border_train_y = None
self._nonhypo_train_x = None
self._nonhypo_train_y = None
self._original_train_x = None
self._original_train_y = None
self.gan_data = None
self.alpha = 0.4
def _build_dataset(self, beg, end, padding):
print(f"Building dataset, requesting data from {beg} to {end}")
x, y = [], []
l = self.sampling_horizon + self.prediction_horizon
for j in range(beg, end):
d = np.array(self.data[j])
for i in range(
d.size - self.sampling_horizon - self.prediction_horizon + 1
):
if self.feature is not None:
x.append(
np.hstack((self.feature[j], d[i : (i + self.sampling_horizon)]))
)
else:
x.append(d[i : (i + self.sampling_horizon)])
if padding == "History":
y.append(d[(i + self.sampling_horizon) : (i + l)])
else:
y.append(d[i + l - 1])
if padding == "None" or padding == "History":
return np.array(x), np.array(y)
if padding == "Same":
return np.array(x), np.tile(y, [self.sampling_horizon, 1]).T
raise ValueError("Unsupported padding " + padding)
@property
def feature(self):
return self._feature
@feature.setter
def feature(self, value):
if len(value) != len(self.data):
print("Feature and data length mismatch, set to None")
self._feature = None
else:
self._feature = value
def reset(
self,
sampling_horizon,
prediction_horizon,
scale,
train_test_ratio,
smooth,
padding,
target_weight,
standardize=False,
):
self.sampling_horizon = sampling_horizon
self.prediction_horizon = prediction_horizon
self.scale = scale
self.train_test_ratio = train_test_ratio
if smooth:
window_length = sampling_horizon
if window_length % 2 == 0:
window_length += 1
self._smooth(window_length, window_length - 4)
x, y = self._build_dataset(0, len(self.data), padding)
train_ratio = self.train_test_ratio / (1 + self.train_test_ratio)
c = int(x.shape[0] * train_ratio)
if self.set_cutpoint:
if self.set_cutpoint < x.shape[0] and self.set_cutpoint > 0:
c = self.set_cutpoint
elif self.set_cutpoint < 0:
print("Train data requested beyond limit, using all but last one")
c = -2
self._original_train_x = x[0:c]
self._original_train_y = y[0:c]
# detect hypo in training data
if padding == "Same":
hypo_loc = np.where(self._original_train_y[:, 0] < self._hypo_th)
border_loc = np.where(
|
np.abs(self._original_train_y[:, 0] - self._hypo_th)
|
numpy.abs
|
from __future__ import division
from __future__ import print_function
import os, time, scipy.io, shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import glob
import re
import cv2
from utils import *
from model import *
import torchvision
import pdb
# DoRF--database of real-world camera response functions
# http://www.cs.columbia.edu/CAVE
def load_CRF():
CRF = scipy.io.loadmat('matdata/201_CRF_data.mat')
B_gl = CRF['B']
I_gl = CRF['I']
CRF_para = np.array(CRF_function_transfer(I_gl, B_gl))
iCRF_para = 1. / CRF_para
return CRF_para, iCRF_para
def load_checkpoint(checkpoint_dir):
if os.path.exists(checkpoint_dir + 'checkpoint.pth.tar'):
# load existing model
model_info = torch.load(checkpoint_dir + 'checkpoint.pth.tar')
print('==> loading existing model:', checkpoint_dir + 'checkpoint.pth.tar')
model = CBDNet()
model.cuda()
model.load_state_dict(model_info['state_dict'])
optimizer = torch.optim.Adam(model.parameters())
optimizer.load_state_dict(model_info['optimizer'])
cur_epoch = model_info['epoch']
else:
# create model
model = CBDNet()
model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
cur_epoch = 0
return model, optimizer, cur_epoch
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, checkpoint_dir + 'checkpoint.pth.tar')
if is_best:
shutil.copyfile(checkpoint_dir + 'checkpoint.pth.tar',checkpoint_dir + 'model_best.pth.tar')
def adjust_learning_rate(optimizer, epoch, lr_update_freq):
if not epoch % lr_update_freq and epoch:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.1
return optimizer
if __name__ == '__main__':
input_dir = './dataset/synthetic/'
checkpoint_dir = './checkpoint/synthetic/'
result_dir = './result/synthetic/'
save_freq = 100
lr_update_freq = 100
CRF_para, iCRF_para = load_CRF()
train_fns = glob.glob(input_dir + '*.bmp')
origin_imgs = [None] * len(train_fns)
noise_imgs = [None] * len(train_fns)
noise_levels = [None] * len(train_fns)
for i in range(len(train_fns)):
origin_imgs[i] = []
noise_imgs[i] = []
noise_levels[i] = []
model, optimizer, cur_epoch = load_checkpoint(checkpoint_dir)
# load pretrained model
model_info = torch.load("checkpoint/CBDNet.pth")
model.load_state_dict(model_info)
criterion = fixed_loss()
criterion = criterion.cuda()
for epoch in range(cur_epoch, 201):
cnt=0
losses = AverageMeter()
optimizer = adjust_learning_rate(optimizer, epoch, lr_update_freq)
model.train()
for ind in np.random.permutation(len(train_fns)):
train_fn = train_fns[ind]
if not len(origin_imgs[ind]):
origin_img = cv2.imread(train_fn)
h, w, c = origin_img.shape
h = (h // 4) * 4
w = (w // 4) * 4
origin_img = cv2.resize(origin_img, (h, w))
origin_img = origin_img[:,:,::-1] / 255.0
origin_imgs[ind] = np.array(origin_img).astype('float32')
# re-add noise
if epoch % save_freq == 0:
noise_imgs[ind] = []
noise_levels[ind] = []
if len(noise_imgs[ind]) < 1:
noise_img, noise_level = AddRealNoise(origin_imgs[ind][:, :, :], CRF_para, iCRF_para)
noise_imgs[ind].append(noise_img)
noise_levels[ind].append(noise_level)
st = time.time()
for nind in np.random.permutation(len(noise_imgs[ind])):
temp_origin_img = origin_imgs[ind]
temp_noise_img = noise_imgs[ind][nind]
temp_noise_level = noise_levels[ind][nind]
if np.random.randint(2, size=1)[0] == 1:
temp_origin_img = np.flip(temp_origin_img, axis=1)
temp_noise_img = np.flip(temp_noise_img, axis=1)
temp_noise_level = np.flip(temp_noise_level, axis=1)
if np.random.randint(2, size=1)[0] == 1:
temp_origin_img = np.flip(temp_origin_img, axis=0)
temp_noise_img =
|
np.flip(temp_noise_img, axis=0)
|
numpy.flip
|
import os
#import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2gray, gray2rgb
import time
from skimage.feature import ORB, match_descriptors
from skimage.measure import ransac
from skimage.transform import warp, downscale_local_mean, resize, SimilarityTransform
from skimage.io import imread, imsave
import time
########################################################################33
def focusmeasure(img):
from scipy.ndimage import convolve
from scipy.ndimage import correlate1d
from scipy.ndimage.filters import uniform_filter
# M = [-1 2 - 1];
# Lx = imfilter(Image, M, 'replicate', 'conv');
# Ly = imfilter(Image, M', 'replicate', 'conv');
# FM = abs(Lx) + abs(Ly);
# FM = mean2(FM);
img = rgb2gray(img)
M = np.array([-1, 2, -1])
img1 = correlate1d(img, M, mode='constant', cval=0.0)
M = np.transpose(M)
img2 = correlate1d(img, M, mode='constant', cval=0.0)
img = np.abs(img1) +
|
np.abs(img2)
|
numpy.abs
|
"""
The :mod:`fatf.transparency.predictions.surrogate_image_explainers` module
implements a bLIMEy version of the LIME surrogate image explainer.
.. versionadded:: 0.1.1
The ``scikit-learn``, ``scikit-image`` and ``Pillow`` packages are required for
the surrogate image explainer to work.
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from numbers import Number
from typing import Callable, Dict, List, Optional, Tuple, Union
import logging
import warnings
import scipy.spatial
import numpy as np
from fatf.exceptions import IncompatibleModelError
import fatf.utils.data.instance_augmentation as fatf_augmentation
import fatf.utils.kernels as fatf_kernels
import fatf.utils.models.models as fatf_models
import fatf.utils.models.processing as fatf_processing
import fatf.utils.models.validation as fatf_validation
__all__ = ['ImageBlimeyLime']
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
try:
# pylint: disable=ungrouped-imports
import sklearn.linear_model
import fatf.transparency.sklearn.linear_model as fatf_linear_explainer
import fatf.utils.data.occlusion as fatf_occlusion
import fatf.utils.data.segmentation as fatf_segmentation
except ImportError as _exc:
_err = ( # pylint: disable=invalid-name
'The ImageBlimeyLime surrogate image explainer requires scikit-learn, '
'scikit-image and Pillow to be installed.\n\n{}')
raise ImportError(_err.format(str(_exc)))
Explanation = Dict[str, Number]
ExplanationTuple = Union[Explanation, Tuple[Explanation, fatf_models.Model]]
RGBcolour = Tuple[int, int, int]
ColourFn = Callable[[np.ndarray], np.ndarray]
class ImageBlimeyLime(object): # pylint: disable=useless-object-inheritance
"""
Implements a surrogate image explainer equivalent to LIME.
.. versionadded:: 0.1.1
By default this explainer uses *quickshift* segmentation
(the :class:`fatf.utils.data.segmentation.QuickShift` class) and
mean-colour occlusion
(the :class:`fatf.utils.data.occlusion.Occlusion` class).
It uses the cosine distance transformed thorough the exponential kernel
to generate similarity scores between the binary representation of the
explained instance and the data sample.
It works with both crisp and probabilistic classifiers;
it assumes the latter by default (``as_probabilistic=True``).
Parameters
----------
image : numpy.ndarray
A numpy array representing an image to be explained.
predictive_model : object
A pre-trained (black-box) predictive model to be explained. If
``as_probabilistic`` (see below) is set to ``True``, it must have a
``predict_proba`` method that takes a data set as the only required
input parameter and returns a 2-dimensional numpy array with
probabilities of belonging to each class. Otherwise, if
``as_probabilistic`` is set to ``False``, the ``predictive_model`` must
have a ``predict`` method that outputs a 1-dimensional array with
(class) predictions.
as_probabilistic : boolean, optional (default=True)
A boolean indicating whether the global model is probabilistic. If
``True``, the ``predictive_model`` must have a ``predict_proba``
method. If ``False``, the ``predictive_model`` must have a ``predict``
method.
class_names : List[string, integer], optional (default=None)
A list of strings or integer corresponding to the names of classes.
If the predictive model is probabilistic, the order of the class names
should correspond to the order of columns output by the model.
For crisp models the order is irrelevant.
segmentation_mask : numpy.ndarray, optional (default=None)
A numpy array representing an image to be used for generating the
segmentation. If this parameter is not provided, the ``image`` will
be used to generate the segmentation.
segments_merge_list : list(integer) or list(list(integer)), \
optional (default=None)
A collection or a set of collections of segment ids to be merged.
See the documentation of the
:func:`fatf.utils.data.segmentation.Segmentation.merge_segments` method
for more details.
ratio : number, optional (default=0.2)
Balances color-space proximity and image-space proximity for
the **quickshift** segmenter.
Higher values give more weight to color-space.
Between 0 and 1.
kernel_size : number, optional (default=4)
Width of Gaussian kernel used in smoothing the sample density for
the **quickshift** segmenter.
Higher means fewer clusters.
max_dist : number, optional (default=200)
Cut-off point for data distances for the **quickshift** segmenter.
Higher means fewer clusters.
colour : string, integer, tuple(integer, integer, integer), \
optional (default=None)
An occlusion colour specifier.
By default (``colour=None``) the mean colouring strategy is used.
See the documentation of the
:func:`fatf.utils.data.occlusion.Occlusion.set_colouring_strategy`
method for more details.
Raises
------
IncompatibleModelError
The ``predictive_model`` does not have the required functionality:
``predict_proba`` method for probabilistic models and ``predict``
method crisp classifiers.
RuntimeError
The number of class names provided via the ``class_names`` parameter
does not agree with the number of classes output by the probabilistic
model.
TypeError
The ``as_probabilistic`` parameter is not a boolean.
The ``class_names`` parameter is neither a list nor ``None``.
Some of the elements in the ``class_names`` list are neither a string
nor an integer.
ValueError
The ``class_names`` list is empty or it contains duplicates.
Attributes
----------
image : numpy.ndarray
A numpy array representing an image to be explained.
segmentation_mask : numpy.ndarray
A numpy array representing an image used to perform segmentation.
segmenter : fatf.utils.data.segmentation.Segmentation
A *quickshift* image segmenter
(:class:`fatf.utils.data.segmentation.QuickShift`).
occluder : fatf.utils.data.occlusion.Occlusion
An image occluder (:class:`fatf.utils.data.occlusion.Occlusion`).
as_probabilistic : boolean
``True`` if the ``predictive_model`` should be treated as
probabilistic and ``False`` if it should be treated as a classifier.
predictive_model : object
A pre-trained (black-box) predictive model to be explained.
predictive_function : Callable[[numpy.ndarray], numpy.ndarray]
A function that will be used to get predictions from the explained
predictive model. It references the ``predictive_model.predict_proba``
method for for probabilistic models (``as_probabilistic=True``) and the
``predictive_model.predict`` method for crisp classifiers.
image_prediction : Union[string, integer]
The prediction of the explained image. For probabilistic models it is
the index of the class assigned to this instance by the explained
model; for crisp classifier it is the predicted class.
classes_number : integer or None
The number of modelled classes for probabilistic models;
``None`` for crisp classifiers unless ``class_names`` was provided.
class_names : List[string] or None
A list of class names that can be predicted by the explained model.
For probabilistic models these are in order they appear in the
probability vector output by the model.
There is no particular order for crisp predictors.
surrogate_data_sample : numpy.ndarray or None
A binary data sample generated during the last call of the
``explain_instance`` method.
surrogate_data_predictions : numpy.ndarray or None
Predictions of the explained model for the binary data sample (reversed
to the image representation) generated during the last call of the
``explain_instance`` method.
similarities : numpy.ndarray or None
Similarities between the explained instance and the sampled data
computed in the binary domain using the cosine distance transformed
thorough the exponential kernel and generated during the last call of
the ``explain_instance`` method.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self,
image: np.ndarray,
predictive_model: object,
as_probabilistic: bool = True,
class_names: Optional[Union[List[str], List[int]]] = None,
segmentation_mask: Optional[np.ndarray] = None,
segments_merge_list: Union[None, List[int], List[
List[int]]] = None,
ratio: float = 0.2,
kernel_size: float = 4,
max_dist: float = 200,
colour: Optional[Union[str, int, RGBcolour]] = None):
"""Constructs a bLIMEy LIME image explainer."""
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
# pylint: disable=too-many-statements
# The image and the segmentation mask in numpy representation
self.image = image.copy()
if segmentation_mask is None:
self.segmentation_mask = self.image.copy()
else:
self.segmentation_mask = segmentation_mask.copy()
if not isinstance(as_probabilistic, bool):
raise TypeError(
'The as_probabilistic parameter must be a boolean.')
self.as_probabilistic = as_probabilistic
if self.as_probabilistic:
is_functional = fatf_validation.check_model_functionality(
predictive_model, True, False)
if not is_functional:
raise IncompatibleModelError(
'With as_probabilistic set to True the predictive model '
'needs to be capable of outputting probabilities via '
'a *predict_proba* method, which takes exactly one '
'required parameter -- data to be predicted -- and '
'outputs a 2-dimensional array with probabilities.')
else:
is_functional = fatf_validation.check_model_functionality(
predictive_model, False, False)
if not is_functional:
raise IncompatibleModelError(
'With as_probabilistic set to False the predictive model '
'needs to be capable of outputting (class) predictions '
'via a *predict* method, which takes exactly one required '
'parameter -- data to be predicted -- and outputs a '
'1-dimensional array with (class) predictions.')
self.predictive_model = predictive_model
if self.as_probabilistic:
predictive_function = \
self.predictive_model.predict_proba # type: ignore
image_prediction = predictive_function([self.image])[0]
classes_number = image_prediction.shape[0]
image_prediction = int(np.argmax(image_prediction))
else:
predictive_function = self.predictive_model.predict # type: ignore
classes_number = None
image_prediction = predictive_function([self.image])[0]
self.predictive_function = predictive_function
self.image_prediction = image_prediction
self.classes_number = classes_number
if class_names is not None:
if isinstance(class_names, list):
if not class_names:
raise ValueError('The class_names list cannot be empty.')
if len(class_names) != len(set(class_names)):
raise ValueError('The class_names list contains '
'duplicated entries.')
_chosen_type = type(class_names[0])
if _chosen_type is int or _chosen_type is str:
_chosen_error = False
for class_name in class_names:
if not isinstance(class_name, _chosen_type):
_chosen_error = True
break
else:
_chosen_error = True
class_name = class_names[0]
if _chosen_error:
raise TypeError('All elements of the class_names '
'list must be strings or integers; '
'*{}* is not.'.format(class_name))
if self.classes_number is None:
self.classes_number = len(class_names)
else:
if self.classes_number != len(class_names):
raise RuntimeError('The number of class names does '
'not correspond to the shape of '
'the model predictions.')
else:
raise TypeError('The class_names parameter must be a Python '
'list or None.')
self.class_names = class_names
logger.debug('Building segmentation.')
self.segmenter = fatf_segmentation.QuickShift(
self.image,
segmentation_mask=self.segmentation_mask,
ratio=ratio,
kernel_size=kernel_size,
max_dist=max_dist)
if segments_merge_list is not None:
self.segmenter.merge_segments(segments_merge_list, inplace=True)
logger.debug('Building occlusion.')
self.occluder = fatf_occlusion.Occlusion(
self.image, self.segmenter.segments, colour=colour)
# Placeholder to memorise the last data sample for training surrogates
self.surrogate_data_sample = None # type: Union[None, np.ndarray]
self.surrogate_data_predictions = None # type: Union[None, np.ndarray]
self.similarities = None # type: Union[None, np.ndarray]
def set_occlusion_colour(self, colour):
"""
Sets the occlusion colour.
See the documentation of the
:func:`fatf.utils.data.occlusion.Occlusion.set_colouring_strategy`
method for more details.
"""
self.occluder.set_colouring_strategy(colour)
def explain_instance(self,
explained_class: Optional[Union[int, str]] = None,
samples_number: int = 50,
batch_size: int = 50,
kernel_width: float = .25,
colour: Optional[Union[str, int, RGBcolour]] = None,
reuse_sample: bool = False,
return_model: bool = False) -> ExplanationTuple:
"""
Explains the image used to initialise this class.
Parameters
----------
explained_class : Union[integer, string], optional (default=None)
The class to be explained. By default (``explained_class=None``)
the class predicted by the explained model for the explained image
will be used.
For probabilistic models this can be the index of the class in the
probability vector output by the explained model or the name of the
class if ``class_names`` parameter was provided while initialising
this class.
For crisp classifiers this has to be one of the values predicted by
the explained model.
samples_number : integer, optional (default=50)
The number of data points sampled from the random binary generator
to be used for fitting the local surrogate model.
batch_size : integer, optional (default=50)
The number of images to be processed in one iteration. Since this
step is computationally expensive -- images need to be generated
and occluded according to the binary data sample, and then
predicted by the explained model -- the data points can be
processed in fixed-size batches.
kernel_width : float, optional (default=0.25)
The width of the exponential kernel used when computing weights of
the binary sampled data based on the cosine distances between them
and the explained image.
colour : string, integer, tuple(integer, integer, integer), \
optional (default=None)
An occlusion colour specifier.
By default (``colour=None``) the colour specified when initialising
this class is used.
See the documentation of the
:func:`fatf.utils.data.occlusion.Occlusion.set_colouring_strategy`
method for more details.
reuse_sample : boolean, optional (default=False)
Whether to generate a new binary data sample or reuse the one
generated with the last call of this method.
return_models : boolean, optional (default=False)
If ``True``, this method will return both the feature importance
explanation and the local surrogate model.
Otherwise, only the explanation is returned.
Warns
-----
UserWarning
Informs the user if none of the sampled data were predicted with
the explained class when explaining a crisp model -- such a
situation will most probably result in unreliable explanations.
Raises
------
IndexError
The name of the class chosen to be explained could not be located
among the class names provided upon initialising this object.
The index of the explained class -- when explaining a probabilistic
model -- is invalid.
RuntimeError
Some of the cosine distances could not be computed due to a
numerical error.
The data sample cannot be reused without calling this method at
least once beforehand.
A class name cannot be used when explaining a probabilistic model
without initialising this object with class names.
TypeError
The ``return_model`` or ``reuse_sample`` parameter is not a
boolean.
The ``explained_class`` parameter is neither of ``None``, a string
or an integer.
Returns
-------
explanations : Dictionary[string, float]
A dictionary containing image segment importance (extracted
from the local linear surrogate).
models : sklearn.linear_model.base.LinearModel, optional
A locally fitted surrogate linear model.
This model is only returned when ``return_model=True``.
"""
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
# pylint: disable=too-many-statements
if not isinstance(return_model, bool):
raise TypeError('The return_model parameter should be a boolean.')
if not isinstance(reuse_sample, bool):
raise TypeError('The reuse_sample parameter should be a boolean.')
if not (explained_class is None or isinstance(explained_class,
(int, str))):
raise TypeError('The explained_class parameter must be either of '
'None, a string or an integer.')
if explained_class is None:
explained_class = self.image_prediction
assert explained_class is not None, 'Filtered out before.'
if self.as_probabilistic:
assert self.classes_number is not None, 'Inferred form the model.'
if isinstance(explained_class, int):
if (explained_class < 0
or explained_class >= self.classes_number):
raise IndexError('The explained class index is invalid.')
else:
if self.class_names is None:
raise RuntimeError(
'It is not possible to use a name for the explained '
'class without initialising this explainer with a '
'list of class names (the *class_names* parameter).')
if explained_class not in self.class_names:
raise IndexError(
'The name of the explained class could not be '
'found in the list of class names used to '
'initialise this explainer (the *class_names* '
'parameter).')
explained_class = self.class_names.index(
explained_class) # type: ignore
else:
if self.class_names is not None:
if explained_class not in self.class_names:
raise IndexError(
'The name of the explained class could not be found '
'in the list of class names used to initialise this '
'explainer (the *class_names* parameter).')
if reuse_sample:
logger.debug('Reusing the sample.')
if (self.surrogate_data_sample is None
or self.surrogate_data_predictions is None
or self.similarities is None):
raise RuntimeError('You need to explain an instance before '
'being able to reuse its (random) sample.')
else:
# Generate binary samples in the interpretable domain
logger.debug('Generating a sample.')
self.surrogate_data_sample = \
fatf_augmentation.random_binary_sampler(
self.segmenter.segments_number, samples_number)
# Build interpretable representation of the explained instance
explained_instance_ir = np.ones(
shape=(1, self.surrogate_data_sample.shape[1]), dtype=np.int8)
# Get distances to the sampled data
logger.debug('Computing distances.')
distances = scipy.spatial.distance.cdist(
explained_instance_ir,
self.surrogate_data_sample,
'cosine').flatten() # yapf: disable
# all-0 vectors nan-out cosine similarity
_all_zero = self.surrogate_data_sample.sum(axis=1)
_all_zero_mask = (_all_zero == 0)
if _all_zero_mask.any():
assert np.isnan(distances[_all_zero_mask]).all(), 'nans.'
logger.debug('Setting the distance to all-0 vectors to 1.')
distances[_all_zero_mask] = 1 # similarity is 0
assert not np.isnan(distances).any(), 'Do not expect any nans.'
# Kernelise the distance
logger.debug('Transforming distances into similarities.')
self.similarities = fatf_kernels.exponential_kernel(
distances, width=kernel_width)
if colour is None:
transformation_fn = \
self.occluder.occlude_segments_vectorised # type: ColourFn
else:
transformation_fn = lambda data: \
self.occluder.occlude_segments_vectorised( # noqa: E731
data, colour=colour)
# Transform to images and predict the sampled data
iter_ = fatf_processing.batch_data(
self.surrogate_data_sample,
batch_size=batch_size,
transformation_fn=transformation_fn)
sample_predictions = []
logger.debug('Reconstructing and predicting images.')
for batch in iter_:
sample_predictions.append(self.predictive_function(batch))
if self.as_probabilistic:
self.surrogate_data_predictions =
|
np.vstack(sample_predictions)
|
numpy.vstack
|
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
import pytest
from adnc.model.memory_units.base_cell import BaseMemoryUnitCell
@pytest.fixture(
params=[{"seed": 123, "input_size": 13, "batch_size": 3, "memory_length": 4, "memory_width": 4, "read_heads": 3,
"dnc_norm": True, "bypass_dropout": False},
{"seed": 124, "input_size": 11, "batch_size": 3, "memory_length": 256, "memory_width": 23, "read_heads": 2,
"dnc_norm": False, "bypass_dropout": False},
{"seed": 125, "input_size": 5, "batch_size": 3, "memory_length": 4, "memory_width": 11, "read_heads": 8,
"dnc_norm": True, "bypass_dropout": True},
{"seed": 126, "input_size": 2, "batch_size": 3, "memory_length": 56, "memory_width": 9, "read_heads": 11,
"dnc_norm": False, "bypass_dropout": True}
])
def memory_config(request):
config = request.param
return BaseMemoryUnitCell(input_size=config['input_size'], memory_length=config["memory_length"],
memory_width=config["memory_width"],
read_heads=config["read_heads"], seed=config["seed"],
reuse=False, name='test_mu'), config
@pytest.fixture()
def session():
with tf.Session() as sess:
yield sess
tf.reset_default_graph()
@pytest.fixture()
def np_rng():
seed = np.random.randint(1, 999)
return np.random.RandomState(seed)
class TestDNCMemoryUnit():
def test_init(self, memory_config):
memory_unit, config = memory_config
assert isinstance(memory_unit, object)
assert isinstance(memory_unit.rng, np.random.RandomState)
assert memory_unit.h_N == config["memory_length"]
assert memory_unit.h_W == config["memory_width"]
assert memory_unit.h_RH == config["read_heads"]
def test_property_output_size(self, memory_config, session):
memory_unit, config = memory_config
output_size = memory_unit.output_size
assert output_size == config['memory_width'] * config["read_heads"] + config['input_size']
def test_calculate_content_weightings(self, memory_config, session, np_rng):
memory_unit, config = memory_config
np_memory = np_rng.normal(0, 1, (config['batch_size'], config['memory_length'], config['memory_width']))
np_keys = np_rng.normal(0, 2, (config['batch_size'], 1, config['memory_width']))
np_strengths = np_rng.uniform(1, 10, (config['batch_size'], 1))
memory = tf.constant(np_memory, dtype=tf.float32)
keys = tf.constant(np_keys, dtype=tf.float32)
strengths = tf.constant(np_strengths, dtype=tf.float32)
content_weightings = memory_unit._calculate_content_weightings(memory, keys, strengths)
weightings = content_weightings.eval()
np_similarity =
|
np.empty([config['batch_size'], config['memory_length']])
|
numpy.empty
|
"""
Plotting
"""
# from matplotlib.colors import LogNorm
# from matplotlib.ticker import ScalarFormatter
import matplotlib.pyplot as plt
# from matplotlib.cm import ScalarMappable
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import os
from scipy import fftpack
from scipy.fftpack import fft, ifft, rfft, fftfreq
from xseis import xutil
from matplotlib.pyplot import rcParams
rcParams['figure.figsize'] = 11, 8
def sigs(d, shifts=None, labels=None, **kwargs):
if shifts is None:
shifts = np.arange(0, d.shape[0], 1) * 1.0
for i, sig in enumerate(d):
tmp = sig / np.max(np.abs(sig)) + shifts[i]
plt.plot(tmp, **kwargs)
if labels is not None:
for i, lbl in enumerate(labels):
plt.text(0, shifts[i] + 0.1, lbl, fontsize=15)
def v2color(vals):
cnorm = plt.Normalize(vmin=np.nanmin(vals), vmax=np.nanmax(vals))
cmap = plt.cm.ScalarMappable(norm=cnorm, cmap=plt.get_cmap('viridis'))
clrs = [cmap.to_rgba(v) for v in vals]
return clrs
def stations(locs, ckeys=None, vals=None, alpha=0.3, lstep=100, pkeys=None, plocs=None):
locs = locs[:, :2]
x, y = locs.T
plt.scatter(x, y, alpha=alpha, s=6, zorder=0)
# x, y, z = locs[2900:3100].T
if lstep != 0:
for i in range(0, locs.shape[0], lstep):
plt.text(x[i], y[i], i)
if ckeys is not None:
if vals is not None:
clrs = v2color(vals)
for i, ck in enumerate(ckeys):
x, y = locs[ck].T
plt.plot(x, y, alpha=alpha, color=clrs[i], linewidth=2)
else:
for ck in ckeys:
x, y = locs[ck].T
plt.plot(x, y, alpha=alpha, color='black', zorder=1)
if pkeys is not None:
x, y = locs[pkeys].T
plt.scatter(x, y, s=60, color='red', zorder=2)
for i in range(x.size):
plt.text(x[i], y[i], i, color='green')
if plocs is not None:
x, y = plocs[:, :2].T
plt.scatter(x, y, s=60, color='red', marker='x', zorder=2)
for i in range(x.size):
plt.text(x[i], y[i], i, color='green')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.axis('equal')
plt.show()
def im_freq(d, sr, norm=False, xlims=None):
fd = fftpack.rfft(d, axis=1)
fd = np.abs(fd)
if norm is True:
fd /= np.max(fd, axis=1)[:, np.newaxis]
n = fd.shape[1]
freq = fftpack.rfftfreq(n, d=1. / sr)
plt.imshow(fd, aspect='auto', extent=[freq[0], freq[-1], 0, fd.shape[0]], origin='lower', interpolation='none')
if xlims is not None:
plt.xlim(xlims)
plt.show()
def im(d, norm=True, savedir=None, tkey='im_raw', cmap='viridis', aspect='auto', extent=None, locs=None, labels=None, title=None):
fig = plt.figure(figsize=(10, 7), facecolor='white')
# if times is not None:
# extent = [times[0], times[-1], 0, d.shape[0]]
if norm is True:
dtmp = d / np.max(np.abs(d), axis=1)[:, np.newaxis]
else:
dtmp = d
im = plt.imshow(dtmp, origin='lower', aspect=aspect, extent=extent,
cmap=cmap, interpolation='none')
if extent is not None:
plt.xlim(extent[:2])
plt.ylim(extent[2:])
if locs is not None:
plt.scatter(locs[:, 0], locs[:, 1])
plt.colorbar(im)
if labels is not None:
plt.xlabel(labels[0])
plt.ylabel(labels[1])
if title is not None:
plt.title(title)
# manager = plt.get_current_fig_manager()
# manager.resize(*manager.window.maxsize())
# plt.tight_layout()
savefig(fig, savedir, tkey)
def im_ax(d, ax, norm=True, cmap='viridis', aspect='auto', extent=None):
if norm is True:
dtmp = d / np.max(np.abs(d), axis=1)[:, np.newaxis]
else:
dtmp = d
im = ax.imshow(dtmp, origin='lower', aspect=aspect, extent=extent,
cmap=cmap, interpolation='none')
if extent is not None:
ax.set_xlim(extent[:2])
ax.set_ylim(extent[2:])
def freq_compare(sigs, sr, xlim=None):
plt.subplot(211)
for sig in sigs:
plt.plot(sig)
plt.xlabel('Time')
plt.subplot(212)
for sig in sigs:
f = fftpack.fft(sig)
freq = fftpack.fftfreq(len(f), d=1. / sr)
# freq = np.fft.fftshift(freq)
plt.plot(np.fft.fftshift(freq), np.fft.fftshift(np.abs(f)))
if xlim is not None:
plt.xlim(xlim)
else:
plt.xlim([0, sr / 2.])
plt.xlabel('Freq (Hz)')
plt.show()
def freq(sig, sr, xlim=None):
plt.subplot(211)
plt.plot(sig, marker='o', alpha=1, markersize=3)
plt.xlabel('Time')
plt.subplot(212)
f = fftpack.fft(sig)
freq = fftpack.fftfreq(len(f), d=1. / sr)
# freq = np.fft.fftshift(freq)
plt.plot(np.fft.fftshift(freq), np.fft.fftshift(np.abs(f)), marker='o', alpha=1, markersize=3)
if xlim is not None:
plt.xlim(xlim)
else:
plt.xlim([0, sr / 2.])
plt.xlabel('Freq (Hz)')
plt.show()
def angle(sig, sr, xlim=None):
plt.subplot(211)
plt.plot(sig)
plt.xlabel('Time')
plt.subplot(212)
size = len(sig)
hl = size // 2
freq = fftpack.fftfreq(size, d=1. / sr)[:hl]
f = fftpack.fft(sig)[:hl]
plt.plot(freq, np.abs(f))
ang = np.angle(f)
plt.plot(freq, ang)
if xlim is not None:
plt.xlim(xlim)
else:
plt.xlim([0, sr / 2.])
plt.xlabel('Freq (Hz)')
plt.show()
def sigs_old(d, spacing=10, labels=None, vlines=None):
if vlines is not None:
for v in vlines:
plt.axvline(v, linestyle='--', color='red')
std = np.std(d)
shifts = np.arange(0, d.shape[0], 1) * spacing * std
for i, sig in enumerate(d):
plt.plot(sig + shifts[i])
if labels is not None:
for i, lbl in enumerate(labels):
plt.text(0, shifts[i] + 2 * std, lbl, fontsize=15)
plt.show()
def sigsNorm(d, spacing=1, labels=None, vlines=None):
if vlines is not None:
for v in vlines:
plt.axvline(v, linestyle='--', color='red')
shifts = np.arange(0, d.shape[0], 1) * spacing
for i, sig in enumerate(d):
plt.plot(sig / np.max(
|
np.abs(sig)
|
numpy.abs
|
import numpy as np
from hypothesis import given
from stylo.testing.strategies import shape_mask
class BaseColorMapTest:
"""A base class for testing :code:`ColorMap` implementations.
When writing your test case for a new :code:`Shape` implementation you need to
declare it as follows.
.. code-block:: python
from unittest import TestCase
from stylo.testing.color import BaseColorMapTest
class TestMyColorMap(TestCase, BaseColorMapTest):
...
.. note::
The order in which you write the classes is *very* important.
You also need to define a :code:`setUp` method to set the :code:`colormap` attribute
equal to an instance of your shape implementation
.. code-block:: python
def setUp(self):
self.colormap = MyColorMap()
Then in addition to any tests your write, your :code:`ColorMap` implementation will
be automatically tested to see if it passes the checks defined below.
"""
@given(mask=shape_mask)
def test_paint(self, mask):
"""Ensure that a colormap can be called with a mask produced by some shape and
that the result is:
- A numpy array with the same shape :code:`(height, width)` as the given mask.
.. note::
Since :code:`ColorMaps` need to be independent of :code:`ColorSpace` we
cannot enforce anything about the contents of the array
"""
colormap = self.colormap
colorspace = colormap.colorspace
background = colorspace.parse("ffffff")
height, width = mask.shape
dimensions = (height, width, len(background))
color =
|
np.full(dimensions, background)
|
numpy.full
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from matplotlib import cm
from mplEasyAnimate import animation
from tqdm import tqdm
def RK4(diffeq, y0, t, h):
""" RK4 method for ODEs:
Given y0 at t, returns y1 at t+h """
k1 = h*diffeq(y0, t) # dy/dt at t
k2 = h*diffeq(y0+0.5*k1, t + h/2.) # dy/dt at t+h/2
k3 = h*diffeq(y0+0.5*k2, t + h/2.) # dy/dt at t+h/2
k4 = h*diffeq(y0+k3, t + h) # dy/dt at t+h
return y0 + (k1+k4)/6.0 + (k2+k3)/3.0
def wavemotion2d(u0, u1):
u2 = 2*(1-2*b)*u1 - u0 # unshifted terms
u2[1:-1,1:-1] += b*( u1[1:-1,0:-2] + u1[1:-1,2:] # left, right
+ u1[0:-2,1:-1] + u1[2:,1:-1] ) #top, bottom
return u2
def gaussian(x):
return np.exp(-(x-5)**2)
def force(r, t): # force of particle pair, with relative pos r
s = np.sqrt(np.sum(r*r, axis=-1)) # distance
s3 = np.dstack((s, s, s)) # make (m,n,3) array
return -spring_k*(1.0 - spring_l/s3)*r # Hooke's law
def cloth(Y, t): # tablecloth
r, v, f = Y[0], Y[1], np.zeros((N,M,3))
rtop = r[0:-1, :] - r[1:, :] # rel pos to top neighbor
if t < 1:
rright = r[:, 0:-1] - r[:, 1:] # rel pos to right neighbor
else:
rright = r[:, 0:] - r[:, 1:]
ftop, fright = force(rtop, t), force(rright, t) # forces from top, right
f[0:-1, :] = ftop # force from top
if t < 1:
f[:, 0:-1] += fright # force from right
else:
f[:, :] += fright
f[1:, :] -= ftop # below, left: use 3rd law
f[:, 1:] -= fright
a = (f - damp*v)/mass + gvec
if t < 1:
v[0,0], v[0,-1], v[-1,0], v[-1,-1]=0, 0, 0, 0 # fixed coners
else:
v[0,0], v[0,-1], v[-1,0], v[-1,-1]=0, 0, 0
return np.array([v,a])
L, M, N = 2.0, 15, 15 # size, (M,N) particle array
h, mass, damp = 0.01, 0.004, 0.01 # keep damp between [.01,.1]
x, y = np.linspace(0,L,M),
|
np.linspace(0,L,N)
|
numpy.linspace
|
import gym
import tensorflow as tf
import random
import numpy as np
from statistics import mean, median
env = gym.make('CartPole-v0').env
env.reset()
goal_steps = 700
score_requirement = 60
initial_games = 10000
def model_data_preparation():
training_data = []
scores = []
accepted_scores = []
for game_index in range(initial_games):
score = 0
game_memory = []
previous_observation = []
for step_index in range(goal_steps):
#env.render()
if len(previous_observation) == 0:
action = random.randrange(0, 2)
observation, reward, done, info = env.step(action)
else:
action = random.randrange(0, 2)
#action = np.argmax(loaded_model.predict(observation.reshape(-1, len(observation)))[0])
observation, reward, done, info = env.step(action)
game_memory.append([previous_observation, action])
previous_observation = observation
score += reward
if done:
env.reset
break
if score >= score_requirement:
accepted_scores.append(score)
for data in game_memory:
if data[1] == 1:
output = [0, 1]
elif data[1] == 0:
output = [1, 0]
training_data.append([data[0], output])
env.reset()
scores.append(score)
print(accepted_scores)
print(scores)
return training_data
def build_model(input_size, output_size):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(128, input_dim = input_size, activation=tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(256, activation=tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(512, activation=tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(256, activation=tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(output_size, activation=tf.nn.softmax))
model.compile(loss= tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5))
return model
def train_model(training_data):
x = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]))
y =
|
np.array([i[1] for i in training_data])
|
numpy.array
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os,sys
import pandas as pd
import numpy as np
import math
import itertools
from functools import reduce
import h5py
from progressbar import *
from sklearn.metrics import roc_curve, auc
from sklearn.utils.extmath import stable_cumsum
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
from seaborn import heatmap
from textwrap import wrap
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def disp_learn_hist_smoothed(location, losslim=None, window_train=400,window_val=40,show=True, leg_size=24):
train_log=location+'/log_train.csv'
val_log=location+'/log_val.csv'
train_log_csv = pd.read_csv(train_log)
val_log_csv = pd.read_csv(val_log)
epoch_train = moving_average(np.array(train_log_csv.epoch),window_train)
accuracy_train = moving_average(np.array(train_log_csv.accuracy),window_train)
loss_train = moving_average(np.array(train_log_csv.loss),window_train)
epoch_val = moving_average(np.array(val_log_csv.epoch),window_val)
accuracy_val = moving_average(np.array(val_log_csv.accuracy),window_val)
loss_val = moving_average(np.array(val_log_csv.loss),window_val)
epoch_val_uns = np.array(val_log_csv.epoch)
accuracy_val_uns = np.array(val_log_csv.accuracy)
loss_val_uns = np.array(val_log_csv.loss)
fig, ax1 = plt.subplots(figsize=(12,8),facecolor='w')
line11 = ax1.plot(epoch_train, loss_train, linewidth=2, label='Average training loss', color='b', alpha=0.3)
line12 = ax1.plot(epoch_val, loss_val, label='Average validation loss', color='blue')
ax1.set_xlabel('Epoch',fontweight='bold',fontsize=24,color='black')
ax1.tick_params('x',colors='black',labelsize=18)
ax1.set_ylabel('Loss', fontsize=24, fontweight='bold',color='b')
ax1.tick_params('y',colors='b',labelsize=18)
if losslim is not None:
ax1.set_ylim(0.,losslim)
ax2 = ax1.twinx()
line21 = ax2.plot(epoch_train, accuracy_train, linewidth=2, label='Average training accuracy', color='r', alpha=0.3)
line22 = ax2.plot(epoch_val, accuracy_val, label='Average validation accuracy', color='red')
ax2.set_ylabel('Accuracy', fontsize=24, fontweight='bold',color='r')
ax2.tick_params('y',colors='r',labelsize=18)
ax2.set_ylim(0.,1.0)
lines = line11+ line12+ line21+ line22
labels = [l.get_label() for l in lines]
leg = ax2.legend(lines, labels, loc=5, numpoints=1,prop={'size' : leg_size})
leg_frame = leg.get_frame()
leg_frame.set_facecolor('white')
if show:
plt.grid()
plt.show()
return
return fig
def plot_confusion_matrix(labels, predictions, class_names,title=None):
"""
plot_confusion_matrix(labels, predictions, class_names)
Plots the confusion matrix for a given energy interval
Args: labels ... 1D array of true label value, the length = sample size
predictions ... 1D array of predictions, the length = sample size
class_names ... 1D array of string label for classification targets, the length = number of categories
title ... optional, title for the confusion matrix
"""
fig, ax = plt.subplots(figsize=(12,8),facecolor='w')
num_labels = len(class_names)
max_value = np.max([np.max(np.unique(labels)),np.max(np.unique(labels))])
assert max_value < num_labels
mat,_,_,im = ax.hist2d(predictions, labels,
bins=(num_labels,num_labels),
range=((-0.5,num_labels-0.5),(-0.5,num_labels-0.5)),cmap=plt.cm.Blues)
# Normalize the confusion matrix
mat = mat.astype("float") / mat.sum(axis=0)
cbar = plt.colorbar(im, ax=ax)
cbar.ax.tick_params(labelsize=20)
ax.set_xticks(np.arange(num_labels))
ax.set_yticks(np.arange(num_labels))
ax.set_xticklabels(class_names,fontsize=20)
ax.set_yticklabels(class_names,fontsize=20)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
plt.setp(ax.get_yticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
ax.set_xlabel('Prediction',fontsize=20)
ax.set_ylabel('True Label',fontsize=20)
if title is not None:
ax.set_title(title,fontsize=20)
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
ax.text(i,j, r"${0:0.3f}$".format(mat[i,j]),
ha="center", va="center", fontsize=20,
color="white" if mat[i,j] > (0.5*mat.max()) else "black")
plt.show()
def plot_multiple_ROC(data, metric, pos_neg_labels, plot_labels = None, png_name=None,title='ROC Curve', annotate=True,ax=None, linestyle=None, leg_loc=None, xlabel=None,ylabel=None,legend_label_dict=None):
'''
plot_multiple_ROC(data, metric, pos_neg_labels, plot_labels = None, png_name=None,title='ROC Curve', annotate=True,ax=None, linestyle=None, leg_loc=None, xlabel=None,ylabel=None,legend_label_dict=None)
Plot multiple ROC curves of background rejection vs signal efficiency. Can plot 'rejection' (1/fpr) or 'fraction' (tpr).
Args:
data ... tuple of (n false positive rates, n true positive rate, n thresholds) to plot rejection or
(rejection fractions, true positive rates, false positive rates, thresholds) to plot rejection fraction.
metric ... string, name of metric to plot: ('rejection' or 'fraction')
pos_neg_labels ... array of one positive and one negative string label, or list of lists, with each list giving positive and negative label for
one dataset
plot_labels ... label for each run to display in legend
png_name ... name of image to save
title ... title of plot
annotate ... whether or not to include annotations of critical points for each curve, default True
ax ... matplotlib.pyplot.axes on which to place plot
linestyle ... list of linestyles to use for each curve, can be '-', ':', '-.'
leg_loc ... location for legend, eg 'upper right' - vertical upper, center, lower, horizontal right left
legend_label_dict ... dictionary of display symbols for each string label, to use for displaying pretty characters in labels
author: <NAME>
June 2020
'''
if legend_label_dict is None:
legend_label_dict={}
if isinstance(pos_neg_labels[0], str):
legend_label_dict[pos_neg_labels[0]]=pos_neg_labels[0]
legend_label_dict[pos_neg_labels[1]]=pos_neg_labels[1]
else:
for j in range(len(pos_neg_labels)):
legend_label_dict[pos_neg_labels[j][0]]=pos_neg_labels[j][0]
legend_label_dict[pos_neg_labels[j][1]]=pos_neg_labels[j][1]
if ax is None:
fig, ax = plt.subplots(figsize=(16,9),facecolor="w")
ax.tick_params(axis="both", labelsize=20)
model_colors = [np.random.rand(3,) for i in data[0]]
for j in np.arange(len(data[0])):
if isinstance(pos_neg_labels[0], str):
label_0 = pos_neg_labels[0]
label_1 = pos_neg_labels[1]
else:
label_0 = pos_neg_labels[j][0]
label_1 = pos_neg_labels[j][1]
if metric=='rejection':
fpr = data[0][j]
tpr = data[1][j]
threshold = data[2][j]
roc_auc = auc(fpr, tpr)
inv_fpr = []
for i in fpr:
inv_fpr.append(1/i) if i != 0 else inv_fpr.append(1/(3*min(fpr[fpr>0])))
tnr = 1. - fpr
elif metric == 'fraction':
fraction = data[0][j]
tpr = data[1][j]
fpr = data[2][j]
threshold = data[3][j]
roc_auc = auc(fpr, tpr)
tnr = 1. - fpr
else:
print('Error: metric must be either \'rejection\' or \'fraction\'.')
return
if metric == 'rejection':
if plot_labels is None:
line = ax.plot(tpr, inv_fpr,
label=f"{j:0.3f}, AUC {roc_auc:0.3f}",
linestyle=linestyle[j] if linestyle is not None else None, linewidth=2,markerfacecolor=model_colors[j])
else:
line = ax.plot(tpr, inv_fpr,
label=f"{plot_labels[j]}, AUC {roc_auc:0.3f}",
linestyle=linestyle[j] if linestyle is not None else None, linewidth=2,markerfacecolor=model_colors[j])
else:
if plot_labels is None:
line = ax.plot(tpr, fraction,
label=f"{j:0.3f}, AUC {roc_auc:0.3f}",
linestyle=linestyle[j] if linestyle is not None else None, linewidth=2,markerfacecolor=model_colors[j])
else:
line = ax.plot(tpr, fraction,
label=f"{plot_labels[j]}, AUC {roc_auc:0.3f}",
linestyle=linestyle[j] if linestyle is not None else None, linewidth=2,markerfacecolor=model_colors[j])
# Show coords of individual points near x = 0.2, 0.5, 0.8
todo = {0.2: True, 0.5: True, 0.8: True}
if annotate:
pbar = ProgressBar(widgets=['Find Critical Points: ', Percentage(), ' ', Bar(marker='0',left='[',right=']'),
' ', ETA()], maxval=len(tpr))
pbar.start()
for i,xy in enumerate(zip(tpr, inv_fpr if metric=='rejection' else fraction, tnr)):
pbar.update(i)
xy = (round(xy[0], 4), round(xy[1], 4), round(xy[2], 4))
xy_plot = (round(xy[0], 4), round(xy[1], 4))
for point in todo.keys():
if xy[0] >= point and todo[point]:
ax.annotate('(%s, %s, %s)' % xy, xy=xy_plot, textcoords='data', fontsize=18, bbox=dict(boxstyle="square", fc="w"))
todo[point] = False
pbar.finish()
ax.grid(True, which='both', color='grey')
if xlabel is None: xlabel = f'{legend_label_dict[label_0]} Signal Efficiency'
if ylabel is None: ylabel = f'{legend_label_dict[label_1]} Background Rejection' if metric == 'rejection' else f'{legend_label_dict[label_1]} Background Rejection Fraction'
ax.set_xlabel(xlabel, fontsize=20)
ax.set_ylabel(ylabel, fontsize=20)
ax.set_title(title, fontsize=20)
ax.legend(loc=leg_loc if leg_loc is not None else "upper right", prop={"size":20})
if metric == 'rejection':
ax.set_yscale('log')
plt.margins(0.1)
if png_name is not None: plt.savefig(os.path.join(os.getcwd(),png_name), bbox_inches='tight')
return fpr, tpr, threshold, roc_auc
def prep_roc_data(softmaxes, labels, metric, softmax_index_dict, label_0, label_1, energies=None,threshold=None):
"""
prep_roc_data(softmaxes, labels, metric, softmax_index_dict, label_0, label_1, energies=None,threshold=None)
Purpose : Prepare data for plotting the ROC curves. If threshold is not none, filters
out events with energy greater than threshold. Returns true positive rates, false positive
rates, and thresholds for plotting the ROC curve, or true positive rates, rejection fraction,
and thresholds, switched on 'metric'.
Args: softmaxes ... array of resnet softmax output, the 0th dim= sample size
labels ... 1D array of true label value, the length = sample size
metric ... string, name of metrix to use ('rejection' or 'fraction')
for background rejection or background rejection fraction.
softmax_index_dict ... Dictionary pointing to label integer from particle name
label_0 and label_1 ... Labels indicating which particles to use - label_0 is the positive label
energies ... 1D array of true event energies, the length = sample
size
threshold ... optional maximum to impose on energies, events with higher energy will be discarded (legacy)
author: <NAME>
May 2020
"""
if threshold is not None and energies is not None:
low_energy_idxs = np.where(np.squeeze(energies) < threshold)[0]
rsoftmaxes = softmaxes[low_energy_idxs]
rlabels = labels[low_energy_idxs]
renergies = energies[low_energy_idxs]
else:
rsoftmaxes = softmaxes
rlabels = labels
renergies = energies
(pos_softmaxes, neg_softmaxes), (pos_labels, neg_labels) = separate_particles([rsoftmaxes, rlabels], rlabels, softmax_index_dict, [label_0, label_1])
total_softmax = np.concatenate((pos_softmaxes, neg_softmaxes), axis=0)
total_labels = np.concatenate((pos_labels, neg_labels), axis=0)
assert total_labels.shape[0]==total_softmax.shape[0]
if metric == 'rejection':
return roc_curve(total_labels, total_softmax[:,softmax_index_dict[label_0]], pos_label=softmax_index_dict[label_0])
else:
fps, tps, thresholds = binary_clf_curve(total_labels,total_softmax[:,softmax_index_dict[label_0]],
pos_label=softmax_index_dict[label_0])
fns = tps[-1] - tps
tns = fps[-1] - fps
tprs = tps / (tps + fns)
rejection_fraction = tns / (tns + fps)
fprs = fps / (fps + tns)
return rejection_fraction, tprs, fprs, thresholds
def disp_multiple_learn_hist(locations,losslim=None,show=True,titles=None,best_only=False,leg_font=10,title_font=10):
'''
disp_multiple_learn_hist(locations,losslim=None,show=True,titles=None,best_only=False,leg_font=10,title_font=10)
Plots a grid of learning histories.
Args:
locations ... list of paths to directories of training dumps
losslim ... limit of loss axis
show ... bool, whether to show the plot
titles ... list of titles for each plot in the grid
best_only ... bool, whether to plot only the points where best model was saved
leg_font ... legend font size
title_font ... take a guess!
author: <NAME>
June 2020
'''
ncols = len(locations) if len(locations) < 3 else 3
nrows = math.ceil(len(locations)/3)
if nrows==1 and ncols==1: fig = plt.figure(facecolor='w',figsize=(12,12))
else: fig = plt.figure(facecolor='w',figsize=(12,nrows*4))
gs = gridspec.GridSpec(nrows,ncols,figure=fig)
axes = []
for i,location in enumerate(locations):
train_log=location+'/log_train.csv'
val_log=location+'/log_val.csv'
train_log_csv = pd.read_csv(train_log)
val_log_csv = pd.read_csv(val_log)
disp_names = ['' for _ in range(len(locations))]
if best_only:
best_idxs = [0]
best_epoch=0
best_loss = val_log_csv.loss[0]
for idx,loss in enumerate(val_log_csv.loss):
if loss < best_loss:
best_loss=loss
best_idxs.append(idx)
best_epoch=val_log_csv.epoch[idx]
val_log_csv = val_log_csv.loc[best_idxs]
if titles is not None:
disp_names[i] = titles[i] + ", \n Best Val Loss ={loss:.4f}@Ep.{epoch:.2f}".format(loss=best_loss,epoch=best_epoch)
else:
disp_names=titles
ax1=fig.add_subplot(gs[i],facecolor='w') if i ==0 else fig.add_subplot(gs[i],facecolor='w',sharey=axes[0])
ax1.set_xlim(0,train_log_csv.epoch.max())
axes.append(ax1)
line11 = ax1.plot(train_log_csv.epoch, train_log_csv.loss, linewidth=2, label='Train loss', color='b', alpha=0.3)
line12 = ax1.plot(val_log_csv.epoch, val_log_csv.loss, marker='o', markersize=3, linestyle='', label='Validation loss', color='blue')
if losslim is not None:
ax1.set_ylim(None,losslim)
if titles is not None:
ax1.set_title(disp_names[i],size=title_font)
ax2 = ax1.twinx()
line21 = ax2.plot(train_log_csv.epoch, train_log_csv.accuracy, linewidth=2, label='Train accuracy', color='r', alpha=0.3)
line22 = ax2.plot(val_log_csv.epoch, val_log_csv.accuracy, marker='o', markersize=3, linestyle='', label='Validation accuracy', color='red')
ax1.set_xlabel('Epoch',fontweight='bold',fontsize=24,color='black')
ax1.tick_params('x',colors='black',labelsize=18)
ax1.set_ylabel('Loss', fontsize=24, fontweight='bold',color='b')
ax1.tick_params('y',colors='b',labelsize=18)
ax2.set_ylabel('Accuracy', fontsize=24, fontweight='bold',color='r')
ax2.tick_params('y',colors='r',labelsize=18)
ax2.set_ylim(0.,1.05)
lines = line11 + line12 + line21 + line22
labels = [l.get_label() for l in lines]
leg = ax2.legend(lines, labels, fontsize=16, loc=5, numpoints=1,prop={'size':leg_font})
leg_frame = leg.get_frame()
leg_frame.set_facecolor('white')
gs.tight_layout(fig)
return fig
# Function to plot a grid of confusion matrices
def plot_multiple_confusion_matrix(label_arrays, prediction_arrays, class_names,titles=None):
"""
plot_multiple_confusion_matrix(label_arrays, prediction_arrays, class_names,titles=None)
Plot the confusion matrix for a series of test outputs.
Args: label_arrays ... array of 1D arrays of true label value, the length = sample size
predictions ... array of 1D arrays of predictions, the length = sample size
class_names ... 1D array of string label for classification targets, the length = number of categories
titles ... list of string titles for the confusion matrices
author: <NAME>
May 2020
"""
if len(label_arrays) >= 3:
fig = plt.figure(facecolor='w',figsize=(16,8))
gs = gridspec.GridSpec(math.ceil(len(label_arrays)/3),3,figure=fig)
else:
fig = plt.figure(facecolor='w',figsize=(10*len(label_arrays),8))
gs = gridspec.GridSpec(1,len(label_arrays),figure=fig)
axes = []
for i,labels in enumerate(label_arrays):
predictions = prediction_arrays[i]
ax=fig.add_subplot(gs[i],facecolor='w')
num_labels = len(class_names)
max_value = np.max([np.max(np.unique(labels)),np.max(np.unique(labels))])
assert max_value < num_labels
mat,_,_,im = ax.hist2d(predictions, labels,
bins=(num_labels,num_labels),
range=((-0.5,num_labels-0.5),(-0.5,num_labels-0.5)),cmap=plt.cm.Blues)
# Normalize the confusion matrix
mat = mat.astype("float") / mat.sum(axis=0)
cbar = plt.colorbar(im, ax=ax)
cbar.ax.tick_params(labelsize=20)
ax.set_xticks(np.arange(num_labels))
ax.set_yticks(np.arange(num_labels))
ax.set_xticklabels(class_names,fontsize=20)
ax.set_yticklabels(class_names,fontsize=20)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
plt.setp(ax.get_yticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
ax.set_xlabel('Prediction',fontsize=20)
ax.set_ylabel('True Label',fontsize=20)
if titles is not None:
ax.set_title(titles[i])
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
ax.text(i,j, r"${0:0.3f}$".format(mat[i,j]),
ha="center", va="center", fontsize=20,
color="white" if mat[i,j] > (0.5*mat.max()) else "black")
gs.tight_layout(fig)
return fig
def load_fq_output(mapping_indices_path, fq_failed_idxs_path, test_idxs_path, cut_path, cut_list):
'''
load_fq_output(mapping_indices_path, test_idxs, cut_path, cut_list)
Purpose : Load FiTQun output matching the desired
Args: mapping_indices_path ... path to .npz list of mapping indices for FiTQun
fq_failed_idxs_path ... path to .npz containing indices of events (in test set ordering) for which FiTQun failed to produce output
test_idxs_path ... path to .npz containing indices in pointnet set - idx array must be titled 'test_idxs' in the archive
cut_path ... path to pointnet cuts npz file
cut_list ... list of cuts to be applied. Must be an array in the .npz pointed to be cut_path
author: <NAME>
August 2020
'''
# Load the fiTQun results
# File paths for fiTQun results
fiTQun_e_path = "/fast_scratch/WatChMaL/data/IWCDmPMT_4pi_fulltank_fiTQun_e-.npz"
fiTQun_mu_path = "/fast_scratch/WatChMaL/data/IWCDmPMT_4pi_fulltank_fiTQun_mu-.npz"
fiTQun_gamma_path = "/fast_scratch/WatChMaL/data/IWCDmPMT_4pi_fulltank_fiTQun_gamma.npz"
# Load fiTQun results
f_e = np.load(fiTQun_e_path, allow_pickle=True)
f_mu = np.load(fiTQun_mu_path, allow_pickle=True)
f_gamma = np.load(fiTQun_gamma_path, allow_pickle=True)
fq_filename_original = (f_gamma['filename'],f_e['filename'],f_mu['filename'])
fq_eventids_original = ( f_gamma['eventid'],f_e['eventid'], f_mu['eventid'])
fq_flag_original = (f_gamma['flag'] ,f_e['flag'],f_mu['flag'])
fq_nll_original = (f_gamma['nLL'],f_e['nLL'],f_mu['nLL'])
fq_mom_original = (f_gamma['momentum'],f_e['momentum'],f_mu['momentum'])
n_events = int(reduce(lambda x,y : x+y, list(map(lambda x : x.shape[0],fq_filename_original))))
fq_rootfiles = np.empty(n_events,dtype=object)
fq_eventids = np.zeros(n_events)
fq_flag = np.empty((n_events,2))
fq_nll = np.empty((n_events,2))
fq_mom = np.empty((n_events, 2))
fq_mapping_indices = np.load(mapping_indices_path,allow_pickle=True)['arr_0']
fq_failed_idxs = np.load(fq_failed_idxs_path, allow_pickle = True)['failed_indices_pointing_to_h5_test_set'].astype(int)
filtered_indices = np.load("/fast_scratch/WatChMaL/data/IWCD_fulltank_300_pe_idxs.npz", allow_pickle=True)
test_filtered_indices = filtered_indices['test_idxs']
stest_filtered_indices = np.delete(test_filtered_indices, fq_failed_idxs,0)
idx_dic = {}
for i, idx in enumerate(stest_filtered_indices):
idx_dic[idx] = i
test_idxs = np.load(test_idxs_path, allow_pickle=True)['test_idxs']
keep_idxs = []
stest_idxs = []
i=0
for idx in test_idxs:
try:
keep_idxs.append(idx_dic[idx])
stest_idxs.append(idx)
except KeyError:
i+=1
f = h5py.File("/data/WatChMaL/data/IWCDmPMT_4pi_fulltank_9M.h5", "r")
original_labels = np.array(f['labels'])
labels = np.array(original_labels[test_filtered_indices])
f.close()
slabels = np.delete(labels, fq_failed_idxs, 0)
for i,ptype in enumerate(slabels):
fq_rootfiles[i] = str(fq_filename_original[ptype][fq_mapping_indices[i]])
fq_eventids[i] = fq_eventids_original[ptype][fq_mapping_indices[i]]
fq_flag[i] = fq_flag_original[ptype][fq_mapping_indices[i]]
fq_nll[i] = fq_nll_original[ptype][fq_mapping_indices[i]]
fq_mom[i] = fq_mom_original[ptype][fq_mapping_indices[i]]
fq_scores = np.zeros((fq_nll.shape[0],3))
fq_scores[:,0] = fq_nll[:,1] - fq_nll[:,0]
fq_scores[:,1] = fq_nll[:,1] - fq_nll[:,0]
fq_scores[:,2] = fq_nll[:,0] - fq_nll[:,1]
fq_labels = slabels
fq_rootfiles = fq_rootfiles[keep_idxs]
fq_eventids = fq_eventids[keep_idxs]
fq_flag = fq_flag[keep_idxs]
fq_scores = fq_scores[keep_idxs]
fq_mom = fq_mom[keep_idxs]
fq_labels = fq_labels[keep_idxs]
fq_rootfiles = apply_cuts(fq_rootfiles, stest_idxs, cut_path, cut_list)
fq_eventids = apply_cuts(fq_eventids, stest_idxs, cut_path, cut_list)
fq_flag = apply_cuts(fq_flag, stest_idxs, cut_path, cut_list)
fq_scores = apply_cuts(fq_scores, stest_idxs, cut_path, cut_list)
fq_mom = apply_cuts(fq_mom, stest_idxs, cut_path, cut_list)
fq_labels = apply_cuts(fq_labels, stest_idxs, cut_path, cut_list)
return fq_rootfiles, fq_eventids, fq_flag, fq_scores, fq_mom, fq_labels
def apply_cuts(array, idxs, cut_path, cut_list):
'''
apply_cuts(array, indices, cut_path, cut_list)
Purpose: Applies cuts to a given array, based on the given cut file and indices.
Args: array ... 1d array of length n, that we wish to cut
idxs ... 1d array of length n, where the ith entry gives the index of the event in the
pointnet test set ordering corresponding to the ith entry in array
cut_path ... path to pointnet cuts npz file
cut_list ... list of cuts to be applied. Must be an array in the .npz pointed to be cut_path
'''
cut_file = np.load(cut_path, allow_pickle=True)
cut_arrays = []
for cut in cut_list:
assert cut in cut_file.keys(), f"Error, {cut} has no associated cut file"
cut_arrays.append(cut_file[cut][idxs])
combined_cut_array=np.array(list(map(lambda x : 1 if 1 in x else 0, list(zip(*cut_arrays)))))
cut_idxs = np.where(combined_cut_array==1)[0]
return np.delete(array, cut_idxs, 0)
def load_test_output_pn(location, cut_path, test_idxs, cut_list):
"""
load_test_output_pn(location, cut_path, test_idxs, cut_list)
Purpose : Load output of a test run on the full h5 test set,
using PN cut file method.
Args: location ... string, path of the directory containing the test
output eg. '/home/cmacdonald/CNN/dumps/20200525_152544/test_validation_iteration_dump.npz'
cut_path ... string, path of cut file
test_idxs ... string, path of indices file used for test run
cut_list ... list of names of cuts, must be keys to files in the cut file
author: <NAME>
May 2020
"""
test_dump_np = np.load(location, allow_pickle=True)
cut_file = np.load(cut_path, allow_pickle=True)
cut_arrays = []
for cut in cut_list:
assert cut in cut_file.keys(), f"Error, {cut} has no associated cut file"
cut_arrays.append(cut_file[cut][test_idxs])
combined_cut_array=np.array(list(map(lambda x : 1 if 1 in x else 0, list(zip(*cut_arrays)))))
cut_idxs = np.where(combined_cut_array==1)[0]
info_dict={}
arr_names=['predicted_labels', 'softmax', 'labels', 'energies', 'rootfiles', 'eventids', 'angles']
for arr_name in arr_names:
info_dict[arr_name] = np.concatenate(list([batch_array for batch_array in test_dump_np[arr_name]]))
for key in info_dict.keys():
info_dict[key] = np.delete(info_dict[key], cut_idxs, 0)
return info_dict
def load_test_output(location,index_path,remove_flagged=True, dset='noveto'):
"""
load_test_output(location,index_path)
(deprecated)
Purpose : Load output of a test run on the full h5 test set,
remove FiTQun flagged/failed events, and return a dict of results.
Args: location ... string, path of the directory containing the test
output eg. '/home/cmacdonald/CNN/dumps/20200525_152544/test_validation_iteration_dump.npz'
index_path ... string, path of directory containing indices of FiTQun failed and flagged files
remove_flagged ... whether or not to remove flagged events, default True
dset ... 'noveto' or 'veto'
author: <NAME>
May 2020
"""
test_dump_np = np.load(location, allow_pickle=True)
res_predictedlabels = np.concatenate(list([batch_array for batch_array in test_dump_np['predicted_labels']]))
res_softmaxes = np.concatenate(list([batch_array for batch_array in test_dump_np['softmax']]))
res_labels = np.concatenate(list([batch_array for batch_array in test_dump_np['labels']]))
res_energies = np.concatenate(list([batch_array for batch_array in test_dump_np['energies']]))
res_rootfiles = np.concatenate(list([batch_array for batch_array in test_dump_np['rootfiles']]))
res_eventids = np.concatenate(list([batch_array for batch_array in test_dump_np['eventids']]))
res_angles = np.concatenate(list([batch_array for batch_array in test_dump_np['angles']]))
if dset=='noveto':
failed_idxs = np.load(os.path.join(index_path, 'fq_failed_idxs.npz'),allow_pickle=True)['failed_indices_pointing_to_h5_test_set'].astype(int)
flagged_idxs = np.load(os.path.join(index_path, 'fq_flagged_idxs.npz'),allow_pickle=True)['arr_0'].astype(int)
sres_predictedlabels = np.delete(res_predictedlabels,failed_idxs)
sres_softmaxes = np.delete(res_softmaxes,failed_idxs,0)
sres_labels = np.delete(res_labels,failed_idxs)
sres_energies = np.delete(res_energies,failed_idxs)
sres_rootfiles = np.delete(res_rootfiles,failed_idxs)
sres_eventids = np.delete(res_eventids,failed_idxs)
sres_angles = np.delete(res_angles,failed_idxs,0)
if remove_flagged:
filtered_res_predictedlabels = np.delete(sres_predictedlabels,flagged_idxs)
filtered_res_softmaxes = np.delete(sres_softmaxes,flagged_idxs,0)
filtered_res_labels = np.delete(sres_labels,flagged_idxs)
filtered_res_energies = np.delete(sres_energies,flagged_idxs)
filtered_res_rootfiles = np.delete(sres_rootfiles,flagged_idxs)
filtered_res_eventids = np.delete(sres_eventids,flagged_idxs)
filtered_res_angles = np.delete(sres_angles,flagged_idxs,0)
return{'filtered_predictions':filtered_res_predictedlabels,
'filtered_softmaxes':filtered_res_softmaxes,
'filtered_labels':filtered_res_labels,
'filtered_energies':filtered_res_energies,
'filtered_rootfiles':filtered_res_rootfiles,
'filtered_eventids':filtered_res_eventids,
'filtered_angles':filtered_res_angles
}
else:
return{'s_predictions':sres_predictedlabels,
's_softmaxes':sres_softmaxes,
's_labels':sres_labels,
's_energies':sres_energies,
's_rootfiles':sres_rootfiles,
's_eventids':sres_eventids,
's_angles':sres_angles
}
elif dset=='vetoed':
fq_cut_idxs = np.load(os.path.join(index_path, 'fq_cut_idxs_for_vetoed_set.npz'),allow_pickle=True)['fq_cut_idxs_for_vetoed_set'].astype(int)
filtered_res_predictedlabels = np.delete(res_predictedlabels,fq_cut_idxs)
filtered_res_softmaxes = np.delete(res_softmaxes,fq_cut_idxs,0)
filtered_res_labels = np.delete(res_labels,fq_cut_idxs)
filtered_res_energies = np.delete(res_energies,fq_cut_idxs)
filtered_res_rootfiles = np.delete(res_rootfiles,fq_cut_idxs)
filtered_res_eventids =
|
np.delete(res_eventids,fq_cut_idxs)
|
numpy.delete
|
import numpy as np
class Base:
def __init__(self):
self.left_eye = None
self.right_eye = None
self.nose = None
self.left_mouse = None
self.right_mouse = None
self.center_mouse = None
class Landmark4(Base):
def __init__(self, lm4):
super().__init__()
self.left_eye, self.right_eye, self.nose, self.center_mouse = lm4
@property
def pts(self):
return
|
np.array([self.left_eye, self.right_eye, self.nose, self.center_mouse])
|
numpy.array
|
'''
Created on Oct 25, 2016
@author: <NAME>
'''
import numpy as np
import sklearn.metrics as metrics
class MajorityVoting(object):
'''
classdocs
'''
majority = None
annotations = None
accuracies = None
votes = None
num_labels = None
num_annotators = None
num_words = None
def __init__(self, annotations, num_labels):
'''
Constructor
'''
self.annotations = annotations
self.num_words, self.num_annotators = annotations.shape
self.num_labels = num_labels
self.accuracies = np.ones(self.num_annotators)
self.probabilities = np.zeros((self.num_words, self.num_labels))
def vote(self, weighted=False, threshold=0.5, simple=True):
'''
If simple is True, we ignore the structure of the labels and just take the most popular
:param weighted:
:param threshold:
:param simple:
:return:
'''
votes = np.zeros((self.num_words, self.num_labels))
for l in range(self.num_labels):
votes[:, l] = np.sum((self.annotations==l).astype(int), axis=1)
if simple: # we have a simple preference for other classes over 'O' if there is a tie-break situation
# order used by Nugyen -- first one has preference in a tie.
# missing -- 0
# 'B-LOC': 1
# 'B-MISC': 2
# 'B-ORG': 3
# 'B-PER': 4
# 'I-LOC': 5
# 'I-MISC': 6
# 'I-ORG': 7
# 'I-PER': 8
# 'O': 9
preferences =
|
np.array([7, 9, 3, 8, 4, 5, 1, 6, 2])
|
numpy.array
|
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import display
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def floors_num_to_columns(full_data):
return pd.get_dummies(full_data["floors"])
def zipcode_to_columns(full_data):
full_data["zipcode_by_area"] = full_data["zipcode"].apply(lambda x:
str(np.round(x / 10)))
return pd.get_dummies(full_data["zipcode_by_area"])
def year_to_duration(full_data):
year_exist = np.abs(full_data["yr_built"] - 2022)
year_since_renovation = np.abs(full_data["yr_renovated"] - 2022)
last_renovated_or_build_year = pd.concat([year_exist,
year_since_renovation],
axis=1).min(axis=1)
full_data.drop("yr_built", axis=1)
full_data.insert(14, "yr_exist", year_exist)
full_data.insert(16, "year_since_renovation", year_since_renovation)
full_data.insert(18, "last_changed",
last_renovated_or_build_year)
def date_to_year(full_data):
dates = full_data["date"]
selling_year = []
for date in dates:
temp_year = pd.to_datetime(date).year
selling_year.append(temp_year)
full_data.insert(1, "selling_year", selling_year)
def drop_negative_values(full_data):
for feature in full_data.columns:
if feature == "date" or feature == "lat" or feature == "long":
continue
elif feature == "id" or feature == "price":
full_data.drop(full_data[(full_data[feature] <= 0)].index,
inplace=True)
else:
full_data.drop(full_data[(full_data[feature] < 0)].index,
inplace=True)
return full_data
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
full_data = pd.read_csv(filename).dropna().drop_duplicates()
# corr_data = pd.DataFrame(np.round(full_data.corr(), 3))
# corr_fig = px.imshow(corr_data, text_auto=True, height=1000, width=1000)
# corr_fig.show()
full_data = drop_negative_values(full_data)
floors_by_categories = floors_num_to_columns(full_data)
year_to_duration(full_data)
date_to_year(full_data)
zipcode_by_categories = zipcode_to_columns(full_data)
features = full_data[["bedrooms",
"bathrooms",
"sqft_living",
"sqft_lot",
"condition",
"view",
"grade",
"sqft_above",
"last_changed"]]
features = pd.concat([features, pd.get_dummies(full_data["selling_year"])],
axis=1)
features = pd.concat([features, floors_by_categories], axis=1)
features = pd.concat([features, zipcode_by_categories], axis=1)
labels = full_data["price"]
return features, labels
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
for x in X.columns:
feature = X[x]
corr = np.cov(feature, y)[0, 1] / np.sqrt(np.var(feature) * np.var(y))
corr =
|
np.round(corr, 3)
|
numpy.round
|
import sys
import warnings
import math
import pdb
import itertools
import numpy as np
from utils import reset_wrapper, step_wrapper
from scipy.ndimage.filters import convolve1d as convolve
import os
import copy
import pygame
from numba import njit, jit
from collections import deque
@njit
def angle_between(v1, v2):
v1_conv = v1.astype(np.dtype("float"))
v2_conv = v2.astype(np.dtype("float"))
return np.abs(
np.arctan2(
np.linalg.det(np.stack((v1_conv, v2_conv))),
np.dot(v1_conv, v2_conv),
)
)
@njit
def total_angle_between(v1, v2):
"""
Calculate total angle between v1 and v2. Resulting angle is in range [-pi, pi].
:param v1: first vector.
:type v1: np.array
:param v2: second vector.
:type v2: np.array
:return: angle between v1 and v2, in range [-pi, pi].
:rtype: float.
"""
v1_conv = v1.astype(np.dtype("float"))
v2_conv = v2.astype(np.dtype("float"))
return np.arctan2(
np.linalg.det(np.stack((v1_conv, v2_conv))), np.dot(v1_conv, v2_conv),
)
@njit
def dist_2d(v1, v2):
return math.sqrt((v1[0] - v2[0]) ** 2 + (v1[1] - v2[1]) ** 2)
@njit
def norm_2d(vector):
return math.sqrt(vector[0] ** 2 + vector[1] ** 2)
def deg_to_rad(deg):
return deg * np.pi / 180
def rad_to_deg(rad):
return rad * 180 / np.pi
def get_rot_matrix(theta):
"""
returns the rotation matrix given a theta value
rotates in the counter clockwise direction
"""
return np.asarray(
[[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]
)
def arange_orientation_info(dim_vector_8):
# converts the 8 dim vector of orientation to
# a 9 dim vector, for visulization purposes
orient_disp_vector = np.zeros(9)
j = 0
for i in range(dim_vector_8.shape[0]):
if i == 4:
j += 1
orient_disp_vector[j] = dim_vector_8[i]
return orient_disp_vector
"""
def get_abs_orientation(agent_state, orientation_approximator):
#returns the current absolute binned orientation of the agent
#one of the 8 directions. Dim:8 (this is the default case)
#for the default case, it additionally returns a 9 dimensional vector
#if no orientation information is provided it returns 4.
#works for the orientation approximator
0 1 2
3 4
5 6 7
############
#for other cases, it just returns the orientation.
#if no orientation information is provided, it returns -1.
no_of_directions = len(orientation_approximator)
angle_diff= np.zeros(no_of_directions)
abs_approx_orientation = None
if no_of_directions==8: #the default
#will return the vector only if the orientation_approximator is the default 8-dir one.
abs_approx_orientation = np.zeros(9)
else:
abs_approx_orientation = np.zeros(no_of_directions)
orientation = agent_state['orientation']
if orientation is None:
#straight up
orientation = 1
elif np.linalg.norm(orientation)==0:
if no_of_directions==8:
orientation = 1
else:
orientation = 1
else:
for i in range(len(orientation_approximator)):
#print('The orientation val')
#print(orientation)
angle_diff[i] = angle_between(orientation_approximator[i], orientation)
orientation = np.argmin(angle_diff)
if no_of_directions == 8:
if orientation >=4:
orientation += 1
abs_approx_orientation[orientation] = 1
return abs_approx_orientation, orientation
return abs_approx_orientation, orientation
"""
def get_abs_orientation(agent_state, orientation_approximator):
"""
#returns the current absolute binned orientation of the agent
#one of the 8 directions. Dim:8 (this is the default case)
#for the default case, it additionally returns a 9 dimensional vector
#if no orientation information is provided it returns 4.
#works for the orientation approximator
0 1 2
7 3
6 5 4
############
#for other cases, it just returns the orientation.
#if no orientation information is provided, it returns -1.
"""
no_of_directions = len(orientation_approximator)
angle_diff = np.zeros(no_of_directions)
min_thresh = 0.001
abs_approx_orientation = None
if no_of_directions == 8: # the default
# will return the vector only if the orientation_approximator is the default 8-dir one.
abs_approx_orientation = np.zeros(9)
else:
abs_approx_orientation = np.zeros(no_of_directions)
orientation = agent_state["orientation"]
if orientation is None:
# straight up
orientation = 1
else:
for i in range(len(orientation_approximator)):
# print('The orientation val')
# print(orientation)
angle_diff[i] = angle_between(
orientation_approximator[i], orientation
)
orientation = np.argmin(angle_diff)
abs_approx_orientation[orientation] = 1
return abs_approx_orientation, orientation
def get_rel_orientation(prev_frame_info, agent_state, goal_state):
"""
Calculates and bins the angle between (agent_pos - goal_pos) and agent velocity.
in effect, this is the "error" in the agent's heading.
"""
# returns the relative orientation of the agent with the direction
# of the goal.
# Primarily for use in IRL
relative_orientation_vector = np.zeros(4)
vector_to_goal = goal_state - agent_state["position"]
if prev_frame_info is None:
agent_orientation = np.array([-1, 0])
else:
agent_orientation = (
agent_state["position"] - prev_frame_info["position"]
)
diff_in_angle = angle_between(vector_to_goal, agent_orientation)
# pdb.set_trace()
if diff_in_angle < np.pi / 8:
rel_orientation = 0
elif diff_in_angle < np.pi / 4 and diff_in_angle >= np.pi / 8:
rel_orientation = 1
elif diff_in_angle < np.pi * 3 / 4 and diff_in_angle >= np.pi / 4:
rel_orientation = 2
else:
rel_orientation = 3
relative_orientation_vector[rel_orientation] = 1
return relative_orientation_vector
def get_rel_goal_orientation(
orientation_approximator,
rel_orient_conv,
agent_state,
agent_abs_orientation,
goal_state,
):
"""
Calculates a vector from the agent to the goal.
This vector is in the agent's coordinate system, e.g. zero degrees is forward.
This vector is binned into a one hot vector based on orientation_approximator.
"""
# returns the relative orientation of the goal wrt to the agent
# Dim:8
no_of_directions = len(orientation_approximator)
angle_diff = np.zeros(no_of_directions)
relative_orientation_vector = np.zeros(no_of_directions)
rot_matrix = get_rot_matrix(rel_orient_conv[agent_abs_orientation])
# translate the point so that the agent sits at the center of the coordinates
# before rtotation
vec_to_goal = goal_state - agent_state["position"]
# rotate the coordinates to get the relative coordinates wrt the agent
rel_coord_goal = np.matmul(rot_matrix, vec_to_goal)
relative_goal = {}
relative_goal["orientation"] = rel_coord_goal
relative_orientation_vector, _ = get_abs_orientation(
relative_goal, orientation_approximator
)
return relative_orientation_vector
def discretize_information(information, information_slabs):
# given a piece of information(scalar), this function returns the correct
# slab in which the information belongs, based on the slab information
# information_slab(list)provided
for i in range(len(information_slabs) - 1):
if (
information >= information_slabs[i]
and information < information_slabs[i + 1]
):
return i
# if does not classify in any information slabs
return None
def calculate_social_forces(
agent_state, obstacle_state, agent_width, obstacle_width, a, b, lambda_val
):
# agent_state and obstacle_state are dictionaries with the following information:
# position, orientation and speed
r_i_j = agent_width / 2 + obstacle_width / 2
d_i_j = np.linalg.norm(
agent_state["position"] - obstacle_state["position"]
)
@njit
def radial_density_features(agent_position, pedestrian_positions, radius):
"""
implements the 'density features' from:
IRL Algorithms and Features for Robot navigation in Crowds: Vasquez et. al
:param agent_position: position of agent.
:type agent_position: numpy array or tuple.
:param pedestrian_positions: list or array of pedestrian positions.
:type pedestrian_positions: list or np array of tuples or np arrays.
"""
pedestrian_count = 0
# Using indexing necessary for Numba to work
for ped_idx in range(len(pedestrian_positions)):
if dist_2d(pedestrian_positions[ped_idx], agent_position) <= radius:
pedestrian_count += 1
if pedestrian_count >= 5:
return np.array([0.0, 0.0, 1.0])
if pedestrian_count < 2:
return np.array([1.0, 0.0, 0.0])
elif 2 <= pedestrian_count < 5:
return np.array([0.0, 1.0, 0.0])
else:
raise ValueError
@njit
def speed_features(
agent_velocity,
pedestrian_velocities,
lower_threshold=0.015,
upper_threshold=0.025,
):
"""
Computes speed features as described in Vasquez et. al's paper: "Learning
to navigate through crowded environments".
:param agent_velocity: velocity of agent (robot)
:type agent_velocity: 2D np.array or tuple
:param pedestrian_velocities: velocities of pedestrians
:type pedestrian_velocities: list or np.array of 2d arrays or tuples.
:param lower_threshold: Lower magnitude of speed threshold threshold used
for binning. This is 0.015 in the paper.
:type lower_threshold: float
:param upper_threshold: Higher magnitude of speed threshold
used for binning. This is 0.025 in the paper.
:type upper_threshold: float
:return: magnitude feature np.array of shape (3,)
:rtype: float np.array
"""
assert lower_threshold < upper_threshold
feature = np.zeros(3)
for idx in range(len(pedestrian_velocities)):
pedestrian_vel = pedestrian_velocities[idx]
speed = dist_2d(pedestrian_vel, agent_velocity)
# put value into proper bin
if 0 <= speed < lower_threshold:
feature[0] += 1
elif lower_threshold <= speed < upper_threshold:
feature[1] += 1
elif speed >= upper_threshold:
feature[2] += 1
else:
raise ValueError(
"Error in binning speed. speed does not fit into any bin."
)
return feature
@njit
def orientation_features(
agent_position, agent_velocity, pedestrian_positions, pedestrian_velocities
):
"""
Computes the orientation features described in Vasquez et. al's paper:
"Learning to navigate through crowded environments".
:param agent_position: position of the agent (robot)
:type agent_position: 2d np.array or tuple
:param agent_velocity: velocity of the agent (robot)
:type agent_velocity: 2d np.array or tuple
:param pedestrian_positions: positions of pedestrians.
:type pedestrian_positions: np.array or list, containing 2d arrays or tuples.
:param pedestrian_velocities: velocities of pedestrians.
:type pedestrian_velocities: np.array or list, containing 2d arrays or tuples.
:return: orientation feature vector.
:rtype: float np.array of shape (3,)
"""
feature = np.zeros(3)
# Check that same number of pedestrian positions and velocities are passed in.
assert len(pedestrian_positions) == len(pedestrian_velocities)
for ped_id in range(len(pedestrian_positions)):
relative_pos = agent_position - pedestrian_positions[ped_id]
relative_vel = agent_velocity - pedestrian_velocities[ped_id]
# angle_between produces only positive angles
angle = angle_between(relative_pos, relative_vel)
# put into bins
# Bins adjusted to work with angle_between() (i.e. abs value of angles.)
if 0.75 * np.pi < angle <= np.pi:
feature[0] += 1
elif 0.25 * np.pi <= angle < 0.75 * np.pi:
feature[1] += 1
elif 0.0 <= angle < 0.25 * np.pi:
feature[2] += 1
else:
raise ValueError(
"Error in binning orientation. Orientation does not fit into any bin."
)
return feature
@njit
def velocity_features(
agent_position,
agent_velocity,
pedestrian_positions,
pedestrian_velocities,
lower_speed_threshold=0.015,
upper_speed_threshold=0.025,
):
"""
Computes the velocity features described in Vasquez et. al's paper:
"Learning to navigate through crowded environments".
:param agent_position: position of the agent (robot)
:type agent_position: 2d np.array or tuple
:param agent_velocity: velocity of the agent (robot)
:type agent_velocity: 2d np.array or tuple
:param pedestrian_positions: positions of pedestrians.
:type pedestrian_positions: 2d float np.array.
:param lower_speed_threshold: Lower magnitude of speed threshold
threshold used for binning. This is 0.015 in the paper.
:type lower_threshold: float
:param upper_speed_threshold: Higher magnitude of speed threshold
threshold used for binning. This is 0.025 in the paper.
:type upper_threshold: float
:param pedestrian_velocities: velocities of pedestrians.
:type pedestrian_velocities: 2d float np.array.
:param lower_threshold: Lower magnitude of speed threshold threshold used
for binning. This is 0.015 in the paper.
:type lower_threshold: float
:param upper_threshold: Higher magnitude of speed threshold threshold
used for binning. This is 0.025 in the paper.
:type upper_threshold: float
:return: orientation feature vector.
:rtype: float np.array of shape (3,)
"""
assert lower_speed_threshold < upper_speed_threshold
feature = np.zeros((3, 3))
assert len(pedestrian_positions) == len(pedestrian_velocities)
# used to group pedestrians with the same orientation bin together using
# their ID.
ped_sorted_by_orientation = [np.empty(0, dtype=np.int64)] * 3
for ped_id in range(len(pedestrian_positions)):
relative_pos = agent_position - pedestrian_positions[ped_id]
relative_vel = agent_velocity - pedestrian_velocities[ped_id]
# angle_between produces only positive angles
if (relative_pos == np.zeros(2)).all() or (
relative_vel == np.zeros(2)
).all():
# cannot calculate angle between zero vectors
angle = 0.0
else:
angle = angle_between(relative_pos, relative_vel)
# put into bins
# Bins adjusted to work with angle_between() (i.e. abs value of angles.)
if 0.75 * np.pi < angle <= np.pi:
ped_sorted_by_orientation[0] = np.append(
ped_sorted_by_orientation[0], ped_id
)
elif 0.25 * np.pi <= angle < 0.75 * np.pi:
ped_sorted_by_orientation[1] = np.append(
ped_sorted_by_orientation[1], ped_id
)
elif 0.0 <= angle < 0.25 * np.pi:
ped_sorted_by_orientation[2] = np.append(
ped_sorted_by_orientation[2], ped_id
)
else:
raise ValueError("Orientation does not fit into any bin.")
for idx, ped_ids in enumerate(ped_sorted_by_orientation):
velocities = pedestrian_velocities[ped_ids]
if not velocities.size:
break
else:
mean_speeds = np.mean(np.abs(velocities))
# bin speeds
if 0 <= mean_speeds < lower_speed_threshold:
feature[idx, 0] = 1
elif lower_speed_threshold <= mean_speeds < upper_speed_threshold:
feature[idx, 1] = 1
elif mean_speeds >= upper_speed_threshold:
feature[idx, 2] = 1
else:
raise ValueError("Average speed does not fit in any bins.")
return feature.flatten()
def social_force_features(
agent_radius, agent_position, agent_velocity, pedestrian_positions
):
"""
Computes the social forces features described in Vasquez et. al's paper:
"Learning to navigate through crowded environments".
:param agent_radius: radius of agent(s) in the environment. Note: this is
the radius of the agent's graphical circle, not a radius around the
agent.
:type agent_radius: float.
:param agent_position: position of the agent (robot)
:type agent_position: 2d np.array or tuple
:param agent_velocity: velocity of the agent (robot)
:type agent_velocity: 2d np.array or tuple
:param pedestrian_positions: positions of pedestrians.
:type pedestrian_positions: 2d float np.array.
:param pedestrian_velocities: velocities of pedestrians.
:type pedestrian_velocities: 2d float np.array.
:return: orientation feature vector.
:rtype: float np.array of shape (3,)
"""
# in the paper formula, 'i' is our agent, while 'j's are the pedestrians.
rel_positions = pedestrian_positions - agent_position
rel_distances = np.linalg.norm(rel_positions, axis=1)
normalized_rel_positions = rel_positions / np.max(rel_distances)
assert rel_positions.shape == normalized_rel_positions.shape
rel_angles = np.zeros(rel_distances.shape)
# used to group pedestrians with the same orientation bin together using
# their ID.
feature = np.zeros(3)
ped_orientation_bins = [np.empty(0, dtype=np.int64)] * 3
for ped_id in range(len(pedestrian_positions)):
relative_pos = rel_positions[ped_id]
# angle_between produces only positive angles
angle = angle_between(relative_pos, agent_velocity)
rel_angles[ped_id] = angle
# put into bins
# Bins adjusted to work with angle_between() (i.e. abs value of angles.)
if 0.75 * np.pi <= angle <= np.pi:
ped_orientation_bins[0] = np.append(
ped_orientation_bins[0], ped_id
)
elif 0.25 * np.pi <= angle < 0.75 * np.pi:
ped_orientation_bins[1] = np.append(
ped_orientation_bins[1], ped_id
)
elif 0.0 <= angle < 0.25 * np.pi:
ped_orientation_bins[2] = np.append(
ped_orientation_bins[2], ped_id
)
else:
raise ValueError("Orientation does not fit into any bin.")
exp_multiplier = np.exp(2 * agent_radius - rel_distances).reshape(-1, 1)
anisotropic_term = (2.0 - 0.5 * (1.0 + np.cos(rel_angles))).reshape(-1, 1)
social_forces = (
exp_multiplier * normalized_rel_positions * anisotropic_term
)
forces_above_threshold = np.linalg.norm(social_forces, axis=1) > 0.5
feature[0] = np.sum(forces_above_threshold[ped_orientation_bins[0]])
feature[1] = np.sum(forces_above_threshold[ped_orientation_bins[1]])
feature[2] = np.sum(forces_above_threshold[ped_orientation_bins[2]])
return feature
@njit
def angle_to_goal_features(goal_position, agent_position, agent_orientation):
"""
computes features based on the error in the agent's heading towards the
goal. Error is the angle between agent heading vector and vector
(goal_pos - agent_pos). The features are binary features based on where
the angle fits in the bins [0-pi/8, pi/8-pi/4, pi/4-3/4pi, 3/4pi-pi].
This is meant to mimic the goal_rel_orientation function.
:param goal_position: position of the goal.
:type goal_position: 2d numpy vector.
:param agent_position: position of agent.
:type agent_position: 2d numpy vector.
:param agent_orientation: orientation vector of agent.
:type agent_orientation: 2d numpy vector.
:raises ValueError: If angle does not fit in the [0,pi] interval,
something unexpected has happened.
:return: feature vector representing binned angles.
:rtype: float np.array
"""
features = np.zeros(4)
vector_to_goal = goal_position - agent_position
angle = angle_between(agent_orientation, vector_to_goal)
# bin in angle bins
if 0.0 <= angle < 0.125 * np.pi:
features[0] = 1.0
elif 0.125 * np.pi <= angle < 0.25 * np.pi:
features[1] = 1.0
elif 0.25 * np.pi <= angle < 0.75 * np.pi:
features[2] = 1.0
elif 0.75 * np.pi <= angle <= np.pi:
features[3] = 1.0
else:
raise ValueError("Cannot bin angle in [0,pi] interval.")
return features
@njit
def vector_to_goal_features(goal_position, agent_position, agent_orientation):
features = np.zeros(8)
vector_to_goal = goal_position - agent_position
angle = total_angle_between(agent_orientation, vector_to_goal)
# mimic finding closest relative vector by binning angle
if -0.125 * np.pi <= angle < 0.125 * np.pi:
features[0] = 1.0
elif 0.125 * np.pi <= angle < 0.375 * np.pi:
features[1] = 1.0
elif 0.375 * np.pi <= angle < 0.625 * np.pi:
features[2] = 1.0
elif 0.625 * np.pi <= angle < 0.875 * np.pi:
features[3] = 1.0
elif 0.875 * np.pi <= angle <= np.pi:
features[4] = 1.0
elif -np.pi <= angle < -0.875 * np.pi:
features[4] = 1.0
elif -0.875 * np.pi <= angle < -0.625 * np.pi:
features[5] = 1.0
elif -0.625 * np.pi <= angle < -0.375 * np.pi:
features[6] = 1.0
elif -0.375 * np.pi <= angle < -0.125 * np.pi:
features[7] = 1.0
else:
raise ValueError("Faled to bin angles in [-pi, pi] range.")
return features
@njit
def orientation_change_features(new_orientation, old_orientation):
thresholds = np.array(
[0, np.pi / 9, 2 * np.pi / 9, np.pi * 3 / 9, 4 * np.pi / 9]
)
if old_orientation is None:
print("Warning: old orientation is none, assuming old=new.")
orientation_change = 0.0
else:
orientation_change = angle_between(new_orientation, old_orientation)
# bin based on thresholds
features = np.zeros(5)
index = np.argmin(np.abs(orientation_change - thresholds))
features[index] = 1.0
return features
@njit
def SAM_features(
agent_position,
agent_velocity,
pedestrian_positions,
pedestrian_velocities,
inner_radius,
outer_radius,
lower_speed_threshold,
upper_speed_threshold,
):
"""
Calculates entire sam features based on Fahad et. al's 2018 paper:
"Learning How Pedestrians Navigate: A Deep Inverse Reinforcement Learning Approach"
:param agent_position: Position of the agent.
:type agent_position: 2d numpy float array.
:param agent_velocity: Agent velocity.
:type agent_velocity: 2d numpy float array.
:param pedestrian_positions: Px2 vector of the position of all pedestrians.
:type pedestrian_positions: Px2 numpy float array where P is the number
of pedestrians.
:param pedestrian_velocities: Px2 vector of the velocity of all pedestrians.
:type pedestrian_velocities: Px2 numpy float array where P is the number
of pedestrians.
:param inner_radius: Radius of inner circle of feature extractor.
:type inner_radius: float.
:param outer_radius: Radius of outer circle of feature extractor.
:type outer_radius: float.
:param lower_speed_threshold: lower binning threshold for speed.
:type lower_speed_threshold: float.
:param upper_speed_threshold: upper binning threshold for speed.
:type upper_speed_threshold: float.
:return: tuple (SAM_features, density) where SAM_features are the
features and density is total number of pedestrians inside all bins.
:rtype: tuples(numpy 1d array, float)
"""
num_pedestrians = pedestrian_positions.shape[0]
# classify pedestrians in either inner or outer ring
ring_designation = np.zeros(num_pedestrians)
for idx in range(num_pedestrians):
ped_distance = dist_2d(agent_position, pedestrian_positions[idx])
if ped_distance <= outer_radius:
if ped_distance > inner_radius:
ring_designation[idx] = 2
else:
ring_designation[idx] = 1
inner_ped_positions = pedestrian_positions[ring_designation == 1]
inner_ped_velocities = pedestrian_velocities[ring_designation == 1]
outer_ped_positions = pedestrian_positions[ring_designation == 2]
outer_ped_velocities = pedestrian_velocities[ring_designation == 2]
assert inner_ped_positions.shape[0] == inner_ped_velocities.shape[0]
assert outer_ped_positions.shape[0] == outer_ped_velocities.shape[0]
num_inner_pedestrians = inner_ped_positions.shape[0]
num_outer_pedestrians = outer_ped_positions.shape[0]
# classify pedestrians in each bin, and add up their velocities per bin
peds_in_bin_counts = np.zeros(10)
average_velocities = np.zeros((10, 2))
for idx in range(num_inner_pedestrians):
ped_relative_position = inner_ped_positions[idx] - agent_position
ped_velocity = inner_ped_velocities[idx]
angle = total_angle_between(agent_velocity, ped_relative_position)
if -0.25 * np.pi < angle < 0.25 * np.pi:
peds_in_bin_counts[0] += 1
average_velocities[0] += ped_velocity
elif 0.25 * np.pi <= angle < 0.75 * np.pi:
peds_in_bin_counts[1] += 1
average_velocities[1] += ped_velocity
elif 0.75 * np.pi <= angle < np.pi or -np.pi < angle < -0.75 * np.pi:
peds_in_bin_counts[2] += 1
average_velocities[2] += ped_velocity
elif -0.75 * np.pi <= angle <= -0.25 * np.pi:
peds_in_bin_counts[3] += 1
average_velocities[3] += ped_velocity
else:
raise ValueError("angle couldn't be binned.")
for idx in range(num_outer_pedestrians):
ped_relative_position = outer_ped_positions[idx] - agent_position
ped_velocity = outer_ped_velocities[idx]
angle = total_angle_between(agent_velocity, ped_relative_position)
if -0.25 * np.pi < angle < 0.25 * np.pi:
peds_in_bin_counts[4] += 1
average_velocities[4] += ped_velocity
elif 0.25 * np.pi <= angle < 0.5 * np.pi:
peds_in_bin_counts[9] += 1
average_velocities[9] += ped_velocity
elif 0.5 * np.pi <= angle < 0.75 * np.pi:
peds_in_bin_counts[8] += 1
average_velocities[8] += ped_velocity
elif 0.75 * np.pi <= angle < np.pi or -np.pi < angle < -0.75 * np.pi:
peds_in_bin_counts[7] += 1
average_velocities[7] += ped_velocity
elif -0.5 * np.pi <= angle < -0.25 * np.pi:
peds_in_bin_counts[5] += 1
average_velocities[5] += ped_velocity
elif -0.75 * np.pi <= angle < -0.5 * np.pi:
peds_in_bin_counts[6] += 1
average_velocities[6] += ped_velocity
else:
raise ValueError("angle couldn't be binned.")
nonzero_mask = peds_in_bin_counts != 0
average_velocities[nonzero_mask] /= peds_in_bin_counts[
nonzero_mask
].reshape(-1, 1)
heading_feat_vect = np.zeros((10, 3))
velocity_feat_vect = np.zeros((10, 3))
# 0 degree degree vector used as reference for judging absolute angles.
angle_origin = np.array([1.0, 0.0])
for idx in range(len(average_velocities)):
if peds_in_bin_counts[idx] == 0.0:
continue
relative_velocity = agent_velocity - average_velocities[idx]
heading = angle_between(relative_velocity, agent_velocity)
heading_thresholds = np.array([0.25 * np.pi, 0.75 * np.pi])
heading_idx = np.digitize(np.array(heading), heading_thresholds)
heading_feat_vect[idx][heading_idx] = 1
vel_idx = np.digitize(
np.array(norm_2d(relative_velocity)),
np.array([lower_speed_threshold, upper_speed_threshold]),
)
velocity_feat_vect[idx][vel_idx] = 1
velocity_feat_vect = np.concatenate(
(heading_feat_vect, velocity_feat_vect), axis=1
).reshape(-1, 1)
SAM_vector = np.concatenate(
(peds_in_bin_counts.reshape(-1, 1), velocity_feat_vect)
).flatten()
density = np.sum(peds_in_bin_counts)
return SAM_vector, density
@njit
def distance_from_goal_features(agent_position, goal_position):
"""
Calculates manhattan distance between agent position and goal position.
This distance is calculated in a discrete manner, taken from floor of
distance vector. Which results in an integer.
:param agent_position: position of agent.
:type agent_position: 2d np float array.
:param goal_position: position of goal.
:type goal_position: 2d np float array.
:return: manhattan distance from goal.
:rtype: int.
"""
distance = goal_position - agent_position
manhattan_distance = np.sum(np.abs(np.floor(distance)))
return manhattan_distance
class BaseVasquez:
def __init__(self):
self.old_agent_velocity = None
def compute_state_information(self, state_dict):
"""
Vasquez et. al's features are based on agent positions and
velocities. This function computes those values and returns them in
the proper format.
:param state_dict: State dictionary of the environment.
:type state_dict: dictionary.
:return: agent position, agent velocity, pedestrian positions,
pedestrian velocities
:rtype: 2d float np.array, 2d float np.array, (num_peds x 2) float
np.array, (num_peds x2) float np.array
"""
# get necessary info about pedestrians
ped_info_list = state_dict["obstacles"]
ped_velocities = np.zeros((len(ped_info_list), 2))
ped_positions = np.zeros((len(ped_info_list), 2))
for ped_index, ped_info in enumerate(ped_info_list):
ped_orientation = ped_info["orientation"]
if ped_orientation is not None:
ped_orientation = ped_orientation / norm_2d(ped_orientation)
ped_velocities[ped_index] = ped_orientation * ped_info["speed"]
else:
# can't calculate velocity if orientation is not known.
ped_velocities[ped_index] = np.zeros(2)
ped_positions[ped_index] = ped_info["position"]
# get necessary info about agent
agent_orientation = state_dict["agent_state"]["orientation"]
normalizing_factor = norm_2d(agent_orientation)
assert normalizing_factor is not None
if normalizing_factor != 0.0:
agent_orientation = agent_orientation / normalizing_factor
else:
warnings.warn(
"division by zero side-stepped - agent has (0,0) orinetation."
)
agent_speed = state_dict["agent_state"]["speed"]
agent_velocity = agent_orientation * agent_speed
agent_position = state_dict["agent_state"]["position"]
return (
agent_position,
agent_velocity,
ped_positions,
ped_velocities,
)
def hash_function(self, feature):
return feature.tobytes()
def recover_state_from_hash_value(self, hash_value):
return np.frombuffer(hash_value)
def overlay_bins(self, state):
return None
class VasquezF1(BaseVasquez):
def __init__(
self, density_radius, lower_speed_threshold, upper_speed_threshold
):
"""
Calculates Vasquez et. al's f1 features as described in their paper:
"Inverse reinforcement learning Algorithms and features for robot
navigation in crowds."
:param density_radius: Radius around agent used to density features of surrounding pedestrians.
:type density_radius: float.
:param lower_speed_threshold: Lower speed magnitude threshold, used for
binning features based on speed.
:type lower_speed_threshold: float.
:param upper_speed_threshold: Upper speed magnitude threshold, used for
binning features based on speed.
:type upper_speed_threshold: float.
"""
super().__init__()
self.density_radius = density_radius
self.lower_speed_threshold = lower_speed_threshold
self.upper_speed_threshold = upper_speed_threshold
def extract_features(self, state_dict):
(
agent_position,
agent_velocity,
ped_positions,
ped_velocities,
) = self.compute_state_information(state_dict)
density_feature_vector = radial_density_features(
agent_position, ped_positions, self.density_radius
)
velocity_feature_vector = velocity_features(
agent_position,
agent_velocity,
ped_positions,
ped_velocities,
lower_speed_threshold=self.lower_speed_threshold,
upper_speed_threshold=self.upper_speed_threshold,
)
orientation_change_feature_vector = orientation_change_features(
agent_velocity, self.old_agent_velocity
)
self.old_agent_velocity = agent_velocity
default_feature_vector = np.ones(1)
# goal orienting features
goal_position = state_dict["goal_state"]
angle_to_goal_feature_vector = angle_to_goal_features(
goal_position, agent_position, agent_velocity
)
vector_to_goal_feature_vector = vector_to_goal_features(
goal_position, agent_position, agent_velocity
)
out_features = np.concatenate(
(
density_feature_vector,
velocity_feature_vector,
angle_to_goal_feature_vector,
vector_to_goal_feature_vector,
orientation_change_feature_vector,
default_feature_vector,
)
)
return out_features
class VasquezF2(BaseVasquez):
def __init__(
self, density_radius, lower_speed_threshold, upper_speed_threshold
):
"""
Calculates Vasquez et. al's f2 features as described in their paper:
"Inverse reinforcement learning Algorithms and features for robot
navigation in crowds."
:param density_radius: Radius around agent used to density features of surrounding pedestrians.
:type density_radius: float.
:param lower_speed_threshold: Lower speed magnitude threshold, used for
binning features based on speed.
:type lower_speed_threshold: float.
:param upper_speed_threshold: Upper speed magnitude threshold, used for
binning features based on speed.
:type upper_speed_threshold: float.
"""
super().__init__()
self.density_radius = density_radius
self.lower_speed_threshold = lower_speed_threshold
self.upper_speed_threshold = upper_speed_threshold
def extract_features(self, state_dict):
(
agent_position,
agent_velocity,
ped_positions,
ped_velocities,
) = self.compute_state_information(state_dict)
density_feature_vector = radial_density_features(
agent_position, ped_positions, self.density_radius
)
speed_feature_vector = speed_features(
agent_velocity,
ped_velocities,
lower_threshold=self.lower_speed_threshold,
upper_threshold=self.upper_speed_threshold,
)
orientation_feature_vector = orientation_features(
agent_position, agent_velocity, ped_positions, ped_velocities,
)
default_feature_vector = np.ones(1)
orientation_change_feature_vector = orientation_change_features(
agent_velocity, self.old_agent_velocity
)
self.old_agent_velocity = agent_velocity
# goal orienting features
goal_position = state_dict["goal_state"]
angle_to_goal_feature_vector = angle_to_goal_features(
goal_position, agent_position, agent_velocity
)
vector_to_goal_feature_vector = vector_to_goal_features(
goal_position, agent_position, agent_velocity
)
out_features = np.concatenate(
(
density_feature_vector,
speed_feature_vector,
orientation_feature_vector,
angle_to_goal_feature_vector,
vector_to_goal_feature_vector,
orientation_change_feature_vector,
default_feature_vector,
)
)
return out_features
class VasquezF3(BaseVasquez):
def __init__(self, agent_radius):
"""
Calculates Vasquez et. al's f3 features as described in their paper:
"Inverse reinforcement learning Algorithms and features for robot
navigation in crowds."
:param agent_radius: radius of agent itself. This is the "width" of the agent, not a radius surrounding the agent.
:type agent_radius: float.
"""
super().__init__()
self.agent_radius = agent_radius
def extract_features(self, state_dict):
(
agent_position,
agent_velocity,
ped_positions,
_,
) = self.compute_state_information(state_dict)
default_feature_vector = np.ones(1)
social_force_feature_vector = social_force_features(
self.agent_radius, agent_position, agent_velocity, ped_positions
)
# goal orienting features
goal_position = state_dict["goal_state"]
angle_to_goal_feature_vector = angle_to_goal_features(
goal_position, agent_position, agent_velocity
)
vector_to_goal_feature_vector = vector_to_goal_features(
goal_position, agent_position, agent_velocity
)
orientation_change_feature_vector = orientation_change_features(
agent_velocity, self.old_agent_velocity
)
self.old_agent_velocity = agent_velocity
out_features = np.concatenate(
(
social_force_feature_vector,
angle_to_goal_feature_vector,
vector_to_goal_feature_vector,
orientation_change_feature_vector,
default_feature_vector,
)
)
return out_features
class Fahad(BaseVasquez):
def __init__(
self,
inner_radius,
outer_radius,
lower_speed_threshold,
upper_speed_threshold,
):
super().__init__()
self.inner_radius = inner_radius
self.outer_radius = outer_radius
self.lower_speed_threshold = lower_speed_threshold
self.upper_speed_threshold = upper_speed_threshold
def extract_features(self, state_dict):
(
agent_position,
agent_velocity,
pedestrian_positions,
pedestrian_velocities,
) = self.compute_state_information(state_dict)
SAM_vector, density_vector = SAM_features(
agent_position,
agent_velocity,
pedestrian_positions,
pedestrian_velocities,
self.inner_radius,
self.outer_radius,
self.lower_speed_threshold,
self.upper_speed_threshold,
)
distance_feature_vect = distance_from_goal_features(
agent_position, state_dict["goal_state"]
)
default_feature = np.array([1.0])
output_feature = np.concatenate(
(
SAM_vector,
np.array([density_vector]),
np.array([distance_feature_vect]),
default_feature,
)
)
return output_feature
class GoalConditionedFahad(BaseVasquez):
def __init__(
self,
inner_radius,
outer_radius,
lower_speed_threshold,
upper_speed_threshold,
):
super().__init__()
self.inner_radius = inner_radius
self.outer_radius = outer_radius
self.lower_speed_threshold = lower_speed_threshold
self.upper_speed_threshold = upper_speed_threshold
def extract_features(self, state_dict):
(
agent_position,
agent_velocity,
pedestrian_positions,
pedestrian_velocities,
) = self.compute_state_information(state_dict)
SAM_vector, density_vector = SAM_features(
agent_position,
agent_velocity,
pedestrian_positions,
pedestrian_velocities,
self.inner_radius,
self.outer_radius,
self.lower_speed_threshold,
self.upper_speed_threshold,
)
default_feature = np.array([1.0])
# goal orienting features
goal_position = state_dict["goal_state"]
angle_to_goal_feature_vector = angle_to_goal_features(
goal_position, agent_position, agent_velocity
)
vector_to_goal_feature_vector = vector_to_goal_features(
goal_position, agent_position, agent_velocity
)
orientation_change_feature_vector = orientation_change_features(
agent_velocity, self.old_agent_velocity
)
self.old_agent_velocity = agent_velocity
default_feature = np.ones(1)
output_feature = np.concatenate(
(
SAM_vector,
np.array([density_vector]),
angle_to_goal_feature_vector,
vector_to_goal_feature_vector,
orientation_change_feature_vector,
default_feature,
)
)
return output_feature
class DroneFeatureSAM1:
"""
Features to put in:
1. Orientation of the obstacles
2. Speed of the obstacles
4. Speed of the agent?
N.B. To add speed of the agent, you have to have
actions that deal with the speed of the agent.
5. Density of pedestrian around the agent?
"""
"""
Description of the feature representation:
Total size : 162 = 9 + 3 + 3 + 3 + 16*9
Global direction : The direction in which the agent is facing. (9)
Goal direction : The direction of the goal wrt the agent. (3)
Inner ring density : The number of people in the inner ring. (3)
Outer ring density : The number of people in the outer ring. (3)
Single Bin information : The average speed and orientation of the
people in a given bin. (5(speed)+4(orientation))
Total number of bins : 8x2
"""
def __init__(
self,
thresh1=1,
thresh2=2,
agent_width=10,
obs_width=10,
step_size=10,
grid_size=10,
show_bins=False,
):
self.agent_width = agent_width
self.obs_width = obs_width
self.step_size = step_size
self.grid_size = grid_size
# self.prev_frame_info = None
self.agent_state_history = deque(maxlen=1)
self.state_rep_size = None
self.thresh1 = thresh1 * step_size
self.thresh2 = thresh2 * step_size
"""
orientation approximator format
0 1 2
3 4
5 6 7
self.orientation_approximator = [np.array([-2, -2]), np.array([-2,0]),
np.array([-2, 2]), np.array([0, -2]),
np.array([0, 2]), np.array([2, -2]),
np.array([2, 0]), np.array([2,2])]
"""
"""
orientation approximator format
0 1 2
7 3
6 5 4
"""
self.orientation_approximator = [
np.array([-2, -2]),
np.array([-2, 0]),
np.array([-2, 2]),
np.array([0, 2]),
np.array([2, 2]),
np.array([2, 0]),
np.array([2, -2]),
np.array([0, -2]),
]
"""
0
3 1
2
"""
self.orientation_approximator_4 = [
np.array([-2, 0]),
np.array([0, 2]),
np.array([2, 0]),
np.array([0, -2]),
]
"""
self.rel_orient_conv = [7*np.pi/4, 0,
np.pi/4, 6*np.pi/4,
np.pi/2, 5*np.pi/4,
np.pi, 3*np.pi/4]
"""
self.rel_orient_conv = [
7 * np.pi / 4,
0,
1 * np.pi / 4,
2 * np.pi / 4,
3 * np.pi / 4,
4 * np.pi / 4,
5 * np.pi / 4,
6 * np.pi / 4,
]
"""
self.rel_orient_conv = [np.pi/4, 0, 7*np.pi/4,
2*np.pi/4, 6*np.pi/4,
3*np.pi/4, 4*np.pi/4, 5*np.pi/4]
"""
self.speed_divisions = [0, 1, 2, 5]
self.inner_ring_density_division = [0, 1, 2, 4]
self.outer_ring_density_division = [0, 3, 5, 7]
self.show_bins = show_bins
# self.bins is a dictionary, with keys containing the id of the bins and
# corresponding to each bin is a list containing the obstacles
# present in the bin
self.bins = {}
for i in range(16):
self.bins[str(i)] = []
self.state_dictionary = {}
self.state_str_arr_dict = {}
self.inv_state_dictionary = {}
self.hash_variable_list = []
self.num_of_speed_blocks = 3
self.num_of_orient_blocks = 4
# state rep size = 16*8+9+3+3
# state rep size = 9+9+4+16*8+3+3
self.state_rep_size = 131
self.generate_hash_variable()
# self.generate_state_dictionary()
# print('Done!')
def smooth_state(self, state):
"""
A smoothing function for a given state
depending how the feature extractor is depicting the state.
Each feature extractor should ideally have one.
input - state(numpy)
output - a smoothed version of the state vector(numpy) based on how the
state feature has been designed in the first place
"""
return state
def generate_hash_variable(self):
"""
The hash variable basically is an array of the size of the current state.
This creates an array of the following format:
[. . . 16 8 4 2 1] and so on.
"""
self.hash_variable_list = []
for i in range(self.state_rep_size - 1, -1, -1):
self.hash_variable_list.append(
(int(math.pow(2, self.state_rep_size - 1 - i)))
)
def recover_state_from_hash_value(self, hash_value):
size = self.state_rep_size
state_val = np.zeros(size)
i = 0
while hash_value > 0:
state_val[i] = int(hash_value)%2
hash_value = math.floor((hash_value)//2)
i += 1
return state_val
def hash_function(self, state):
hash_value = 0
size = len(self.hash_variable_list)
for i in range(size):
hash_value += int(self.hash_variable_list[i]*state[i])
return hash_value
def get_info_from_state(self, state):
# read information from the state
agent_state = state["agent_state"]
goal_state = state["goal_state"]
obstacles = state["obstacles"]
return agent_state, goal_state, obstacles
def get_relative_coordinates(self,):
# adjusts the coordinates of the obstacles based on the current
# absolute orientation of the agent.
return 0
def populate_orientation_bin(
self, agent_orientation_val, agent_state, obs_state_list
):
"""
#given an obstacle, the agent state and orientation,
#populates the self.bins dictionary with the appropriate obstacles
#self.bins is a dictionary where against each key of the dictionary
#is a list of obstacles that are present in that particular bin
Bin informations:
Bins from the inner ring 0:7
Bins from the outer ring 8:15
Bin value in each of the ring is based on the orientation_approximator
"""
for obs_state in obs_state_list:
distance = dist_2d(obs_state["position"], agent_state["position"])
if obs_state["orientation"] is not None:
obs_orientation_ref_point = (
obs_state["position"] + obs_state["orientation"]
)
else:
obs_orientation_ref_point = obs_state["position"]
if distance < self.thresh2:
# classify obs as considerable
temp_obs = {}
# check for the orientation
# obtain relative orientation
# get the relative coordinates
rot_matrix = get_rot_matrix(deg_to_rad(agent_orientation_val))
# translate the point so that the agent sits at the center of the coordinates
# before rtotation
vec_to_obs = obs_state["position"] - agent_state["position"]
vec_to_orient_ref = (
obs_orientation_ref_point - agent_state["position"]
)
# rotate the coordinates to get the relative coordinates wrt the agent
rel_coord_obs = np.matmul(rot_matrix, vec_to_obs)
rel_coord_orient_ref = np.matmul(rot_matrix, vec_to_orient_ref)
bin_val = 0
angle_diff = angle_between(
self.orientation_approximator[0], rel_coord_obs
)
for i, orientation_approx in enumerate(
self.orientation_approximator[1:], start=1
):
new_angle_diff = angle_between(
orientation_approx, rel_coord_obs
)
if new_angle_diff < angle_diff:
angle_diff = new_angle_diff
bin_val = i
if distance > self.thresh1:
bin_val += 8
# orientation of the obstacle needs to be changed as it will change with the
# change in the relative angle. No need to change the speed.
temp_obs["orientation"] = rel_coord_orient_ref - rel_coord_obs
temp_obs["position"] = rel_coord_obs
temp_obs["speed"] = obs_state["speed"]
self.bins[str(bin_val)].append(temp_obs)
def overlay_bins(self, state):
# a visualizing tool to debug if the binning is being done properly
# draws the bins on the game surface for a visual inspection of the
# classification of the obstacles in their respective bins
# pdb.set_trace()
self.orientation_approximator
# draw inner ring
# pdb.set_trace()
center = np.array(
[
int(state["agent_state"]["position"][1]),
int(state["agent_state"]["position"][0]),
]
)
pygame.draw.circle(
pygame.display.get_surface(), (0, 0, 0), center, self.thresh1, 2
)
# draw outer ring
pygame.draw.circle(
pygame.display.get_surface(), (0, 0, 0), center, self.thresh2, 2
)
pygame.draw.circle(
pygame.display.get_surface(),
(0, 0, 0),
center,
int(
self.step_size + (self.agent_width + self.obs_width) * 1.4 // 2
),
2,
)
line_start_point = np.array([0, -self.thresh2])
line_end_point = np.array([0, self.thresh2])
for i in range(8):
# draw the lines
rot_matrix = get_rot_matrix(self.rel_orient_conv[i])
cur_line_start = np.matmul(rot_matrix, line_start_point) + center
cur_line_end = np.matmul(rot_matrix, line_end_point) + center
# pdb.set_trace()
pygame.draw.line(
pygame.display.get_surface(),
(0, 0, 0),
cur_line_start,
cur_line_end,
2,
)
pygame.display.update()
# pdb.set_trace()
def compute_bin_info(self):
# given self.bins populated with the obstacles,
# computes the average relative orientation and speed for all the bins
sam_vector = np.zeros(
[
16,
len(self.speed_divisions)
- 1
+ len(self.orientation_approximator_4),
]
)
density_inner_ring = np.zeros(3)
inner_ring_count = 0
density_outer_ring = np.zeros(3)
outer_ring_count = 0
for i in range(len(self.bins.keys())):
avg_speed = 0
avg_orientation = np.zeros(2)
speed_bin = np.zeros(len(self.speed_divisions) - 1)
orientation_bin = np.zeros(len(self.orientation_approximator_4))
total_obs = len(self.bins[str(i)])
for j in range(total_obs):
obs = self.bins[str(i)][j]
if obs["speed"] is not None:
avg_speed += np.linalg.norm(obs["speed"])
if obs["orientation"] is not None:
avg_orientation += obs["orientation"]
if i < 8:
inner_ring_count += 1
else:
outer_ring_count += 1
# if obs['speed'] is not None:
if total_obs > 0:
avg_speed /= total_obs
speed_bin_index = discretize_information(
avg_speed, self.speed_divisions
)
speed_bin[speed_bin_index] = 1
# if obs['orientation'] is not None:
new_obs = {"orientation": avg_orientation}
_, avg_orientation = get_abs_orientation(
new_obs, self.orientation_approximator_4
)
# print('the avg orientation :', avg_orientation)
orientation_bin[avg_orientation] = 1
# based on the obtained average speed and orientation bin them
# print('Avg speed :', avg_speed, 'Speed bin :',speed_bin)
# print('Avg orientation :', avg_orientation, 'Orientation bin :', orientation_bin)
sam_vector[i][:] = np.concatenate((speed_bin, orientation_bin))
density_inner_ring[
discretize_information(
inner_ring_count, self.inner_ring_density_division
)
] = 1
density_outer_ring[
discretize_information(
outer_ring_count, self.outer_ring_density_division
)
] = 1
return sam_vector, density_inner_ring, density_outer_ring
def compute_social_force(self):
# computes the social force value at a given time(optional)
return 0
def extract_features(self, state):
# getting everything to come together to extract the features
agent_state, goal_state, obstacles = self.get_info_from_state(state)
abs_approx_orientation, agent_orientation_index = get_abs_orientation(
agent_state, self.orientation_approximator
)
agent_orientation_angle = state["agent_head_dir"]
# print('The orientation :')
# print(abs_approx_orientation.reshape(3,3))
if len(self.agent_state_history) > 0:
prev_frame_info = self.agent_state_history[-1]
else:
prev_frame_info = None
relative_orientation = get_rel_orientation(
prev_frame_info, agent_state, goal_state
)
relative_orientation_goal = get_rel_goal_orientation(
self.orientation_approximator,
self.rel_orient_conv,
agent_state,
agent_orientation_index,
goal_state,
)
# empty bins before populating
for i in range(16):
self.bins[str(i)] = []
self.populate_orientation_bin(
agent_orientation_angle, agent_state, obstacles
)
(
sam_vector,
inner_ring_density,
outer_ring_density,
) = self.compute_bin_info()
extracted_feature = np.concatenate(
(
relative_orientation_goal,
relative_orientation,
np.reshape(sam_vector, (-1)),
inner_ring_density,
outer_ring_density,
)
)
self.agent_state_history.append(copy.deepcopy(state["agent_state"]))
return reset_wrapper(extracted_feature)
def rollback(self, frames, state):
if frames > len(self.agent_state_history):
print("Trying to rollback more than it has seen!!!")
else:
for i in range(1, frames + 1):
if len(self.agent_state_history) > 0:
self.agent_state_history.pop(-1)
return self.extract_features(state)
def reset(self):
self.agent_state_history = deque(maxlen=1)
class DroneFeatureMinimal(DroneFeatureSAM1):
def __init__(
self,
thresh1=1,
thresh2=2,
agent_width=10,
step_size=10,
obs_width=10,
goal_size=10,
show_bins=False,
):
super().__init__(
thresh1=thresh1,
thresh2=thresh2,
agent_width=agent_width,
step_size=step_size,
grid_size=goal_size,
show_bins=show_bins,
obs_width=obs_width,
)
self.thresh_speed = 0.5
self.state_rep_size = 50
def compute_bin_info(self):
"""
The minimal version ditches the huge detailed vectors of information
for something more succient. It returns 3/2 dimensional vector telling
how likely the pedestrians in the bin is to interfere with the robot.
Likelihood of collision is calculated as follows:
Low : if the pedestrians of the bin are moving away from the agent
High : if the pedestrians are quick and moving towards the agent
Med : Anything that does not fall in this category
"""
collision_information = np.zeros((len(self.bins.keys()), 3))
for i in range(len(self.bins.keys())):
# for each bin
current_danger_level = 0
for ped in range(len(self.bins[str(i)])):
# for each pedestrian
# pdb.set_trace()
coll = self.compute_collision_likelihood(
self.bins[str(i)][ped]
)
if coll > current_danger_level:
current_danger_level = coll
collision_information[i, current_danger_level] = 1
if (
np.sum(collision_information[:, 1]) > 0
or np.sum(collision_information[:, 2]) > 0
):
for i in range(collision_information.shape[0]):
print(
"Bin no :",
i,
", collision_info : ",
collision_information[i, :],
)
# pdb.set_trace()
return collision_information
def compute_collision_likelihood(self, pedestrian):
"""
collision prob: High : 2, med : 1, low : 0
"""
collision_prob = 0
pos_vector = np.array([0, 0]) - pedestrian["position"]
orientation = pedestrian["orientation"]
ang = angle_between(pos_vector, orientation)
# highest prob
if ang < np.pi / 8:
if np.linalg.norm(pedestrian["orientation"]) > self.thresh_speed:
collision_prob = 2
# lowest prob
elif ang > np.pi / 8 or pedestrian["speed"] == 0:
collision_prob = 0
# somewhere in between
else:
collision_prob = 1
return collision_prob
def extract_features(self, state):
agent_state, goal_state, obstacles = self.get_info_from_state(state)
abs_approx_orientation, agent_orientation_index = get_abs_orientation(
agent_state, self.orientation_approximator
)
if len(self.agent_state_history) > 0:
prev_frame_info = self.agent_state_history[-1]
else:
prev_frame_info = None
relative_orientation = get_rel_orientation(
prev_frame_info, agent_state, goal_state
)
for i in range(16):
self.bins[str(i)] = []
self.populate_orientation_bin(
agent_orientation_index, agent_state, obstacles
)
collision_info = self.compute_bin_info()
self.agent_state_history.append(copy.deepcopy(state["agent_state"]))
# pdb.set_trace()
# return reset_wrapper(extracted_feature)
return None
class DroneFeatureOccup(DroneFeatureSAM1):
def __init__(
self,
thresh1=1,
thresh2=2,
agent_width=10,
step_size=10,
obs_width=10,
grid_size=10,
show_bins=False,
window_size=5,
):
super().__init__(
thresh1=thresh1,
thresh2=thresh2,
agent_width=agent_width,
step_size=step_size,
grid_size=grid_size,
show_bins=show_bins,
obs_width=obs_width,
)
self.window_size = window_size
self.thresh_speed = 0.5
self.state_rep_size = window_size ** 2 + 22
self.thresh2 = (step_size * window_size) / 2
def check_overlap(self, temp_pos, obs_pos):
# if true, that means there is an overlap
boundary = None
if self.grid_size >= self.agent_width:
boundary = self.grid_size / 2
else:
boundary = self.agent_width / 2
distance_to_maintain = boundary + (self.obs_width / 2)
# pdb.set_trace()
if (
abs(temp_pos[0] - obs_pos[0]) < distance_to_maintain
and abs(temp_pos[1] - obs_pos[1]) < distance_to_maintain
):
return True
else:
return False
def block_to_arrpos(self, r, c):
a = (self.window_size ** 2 - 1) / 2
b = self.window_size
pos = a + (b * r) + c
return int(pos)
"""
def overlay_grid(self, pygame_surface, state):
center = np.array([int(state['agent_state']['position'][1]),
int(state['agent_state']['position'][0])])
window_rows = window_cols = self.window_size
line_orient = ['hor', 'ver']
grid_width = self.step_size
start_point = center - np.array([window_size/2 ])
for orient in line_orient:
for i in range(window_size):
start_point =
"""
def compute_bin_info(self):
obstacles = []
# create a obstacle list from the self.bins
for bin_key in self.bins.keys():
for obs in self.bins[bin_key]:
obstacles.append(obs)
window_rows = window_cols = self.window_size
row_start = int((window_rows - 1) / 2)
col_start = int((window_cols - 1) / 2)
local_occup_grid = np.zeros(self.window_size ** 2)
agent_pos = np.array([0, 0])
for i in range(len(obstacles)):
# as of now this just measures the distance from the center of the obstacle
# this distance has to be measured from the circumferance of the obstacle
# new method, simulate overlap for each of the neighbouring places
# for each of the obstacles
obs_pos = obstacles[i]["position"]
obs_width = self.obs_width
for r in range(-row_start, row_start + 1, 1):
for c in range(-col_start, col_start + 1, 1):
# c = x and r = y
# pdb.set_trace()
temp_pos = np.asarray(
[
agent_pos[0] + r * self.step_size,
agent_pos[1] + c * self.step_size,
]
)
if self.check_overlap(temp_pos, obs_pos):
pos = self.block_to_arrpos(r, c)
local_occup_grid[pos] = 1
return local_occup_grid
def extract_features(self, state):
# getting everything to come together to extract the features
agent_state, goal_state, obstacles = self.get_info_from_state(state)
abs_approx_orientation, agent_orientation_index = get_abs_orientation(
agent_state, self.orientation_approximator
)
# print('The orientation :')
# print(abs_approx_orientation.reshape(3,3))
if len(self.agent_state_history) > 0:
prev_frame_info = self.agent_state_history[-1]
else:
prev_frame_info = None
relative_orientation = get_rel_orientation(
prev_frame_info, agent_state, goal_state
)
relative_orientation_goal = get_rel_goal_orientation(
self.orientation_approximator,
self.rel_orient_conv,
agent_state,
agent_orientation_index,
goal_state,
)
# print('The absolute approx orientation :', abs_approx_orientation)
##print('The relative orientation', relative_orientation)
# empty bins before populating
for i in range(16):
self.bins[str(i)] = []
# print('Here')
self.populate_orientation_bin(
agent_orientation_index, agent_state, obstacles
)
# pdb.set_trace()
local_occup_grid = self.compute_bin_info()
extracted_feature = np.concatenate(
(
abs_approx_orientation,
relative_orientation_goal,
relative_orientation,
local_occup_grid,
)
)
self.agent_state_history.append(copy.deepcopy(state["agent_state"]))
return reset_wrapper(extracted_feature)
class DroneFeatureRisk(DroneFeatureSAM1):
def __init__(
self,
thresh1=1,
thresh2=2,
agent_width=10,
step_size=10,
obs_width=10,
grid_size=10,
show_bins=False,
show_agent_persp=False,
):
super().__init__(
thresh1=thresh1,
thresh2=thresh2,
agent_width=agent_width,
step_size=step_size,
grid_size=grid_size,
show_bins=show_bins,
obs_width=obs_width,
)
self.rel_speed_divisions = [-1, 0, 1]
self.rel_distance_divisions = [1, 3, 5]
"""
relative goal : 9
relative step : 4
risk information for 16 bins : 16*3
"""
self.state_rep_size = 9 + 4 + 16 * 3
self.generate_hash_variable()
self.show_agent_persp = show_agent_persp
self.init_surface = False
self.orig_disp_size_row = None
self.orig_disp_size_col = None
"""
def show_agent_view(self, agent_orientation_val, agent_state, pygame_surface):
#draw the agent
#draw the bins
#draw the obstacles
if agent_orientation_val > 4:
agent_orientation_val -= 1
rot_matrix = get_rot_matrix(self.rel_orient_conv[agent_orientation_val])
if agent_state['orientation'] is None:
agent_state['orientation'] = np.array([1, 0])
rotated_agent_orientation = np.matmul(rot_matrix, agent_state['orientation'])
for key in self.bins.keys():
for obs in obs_list:
rel_orient = obs['orientation'] - rotated_agent_orientation
"""
def get_change_in_orientation(self, cur_agent_orientation):
# cur_agent_orientation is a 2d array [row, col]
prev_agent_orient = None
change_vector = np.zeros(5)
if len(self.agent_state_history) > 0:
prev_frame_info = self.agent_state_history[-1]
else:
prev_frame_info = None
if prev_frame_info is not None and cur_agent_orientation is not None:
prev_agent_orient = prev_frame_info["orientation"]
# angle_diffs = np.array([0, np.pi/9, 2*np.pi/9, np.pi*3/9, 4*np.pi/9])
angle_diffs = np.array(
[0, np.pi / 4, 2 * np.pi / 4, np.pi * 3 / 4, 4 * np.pi / 4]
)
diff_in_angle = angle_between(
prev_agent_orient, cur_agent_orientation
)
index = np.argmin(np.abs(angle_diffs - diff_in_angle))
# print('Prev orientation :', prev_agent_orient)
# print('cur_agent_orientation :', cur_agent_orientation)
else:
index = 0
# print('Index selected :', index)
# pdb.set_trace()
change_vector[index] = 1
return change_vector
def compute_bin_info(
self, agent_orientation_val, agent_state, pygame_surface=None
):
risk_vector = np.zeros((len(self.bins.keys()), 3))
# rotate the agent's orientation to match that of the obstacles
thresh_value = (
1.4 * (self.agent_width / 2 + self.obs_width / 2) + self.step_size
)
intimate_space_dist = int(
self.step_size + (self.agent_width + self.obs_width) * 1.4 // 2
)
intimate_space_occupancy = np.zeros(8)
rot_matrix = get_rot_matrix(deg_to_rad(agent_orientation_val))
if agent_state["orientation"] is None:
agent_state["orientation"] = np.array([-1, 0])
rotated_agent_orientation = np.matmul(
rot_matrix, agent_state["orientation"]
)
rotated_agent_orientation = (
rotated_agent_orientation * agent_state["speed"]
)
pad = 80
mag = 20 # magnification of the orientation lines
################################
# code for the agent view
# make changes in the game display accordingly
# this is a onetime thing
if self.show_agent_persp and not self.init_surface:
# draw the bins
(
self.orig_disp_size_col,
self.orig_disp_size_row,
) = pygame.display.get_surface().get_size()
pygame.display.set_mode(
(
self.orig_disp_size_col + self.thresh2 * 2 + pad,
self.orig_disp_size_row,
)
)
self.init_surface = True
# add the agent view, refreshed every step
if self.show_agent_persp:
# center is in (row, col) format
center = (
self.orig_disp_size_row / 2,
self.orig_disp_size_col + self.thresh2 + pad / 2,
)
dummy_state = {"agent_state": {}}
dummy_state["agent_state"]["position"] = center
side = self.thresh2 * 2 + pad / 2
# clear and re-draw the primary agent_view rectangle
pygame.display.get_surface().fill(
(255, 255, 255),
(
(self.orig_disp_size_col, 0),
(self.thresh2 * 2 + pad, self.orig_disp_size_row),
),
)
pygame.draw.line(
pygame.display.get_surface(),
(0, 0, 0),
(self.orig_disp_size_col, 0),
(self.orig_disp_size_col, self.orig_disp_size_row),
3,
)
pygame.draw.rect(
pygame.display.get_surface(),
(0, 0, 0),
((center[1] - side / 2, center[0] - side / 2), (side, side)),
4,
)
# draw the cicles
# spdb.set_trace()
self.overlay_bins(dummy_state)
# draw the agent
pygame.draw.rect(
pygame.display.get_surface(),
(0, 0, 0),
[
center[1] - self.agent_width / 2,
center[0] - self.agent_width / 2,
self.agent_width,
self.agent_width,
],
)
# draw the orientation
pygame.draw.line(
pygame.display.get_surface(),
(0, 0, 0),
(center[1], center[0]),
(
(center[1] + rotated_agent_orientation[1] * mag),
(center[0] + rotated_agent_orientation[0] * mag),
),
4,
)
pygame.display.update()
#################################
for key in self.bins.keys():
risk_val = 0
obs_list = self.bins[key]
# print('Bin :', key)
for obs in obs_list:
# relative orientation of the obstacle wrt the agent
# print('Obs information wrt pygame :', obs['orientation'])
rel_orient = obs["orientation"] - rotated_agent_orientation
# print('Relative orientation :', rel_orient)
# relative position of the agent wrt the obstacle
rel_dist = -obs["position"]
rel_dist_mag = np.linalg.norm(rel_dist, 2)
if rel_dist_mag < intimate_space_dist:
intimate_space_occupancy[int(key) % 8] = 1
ang = angle_between(rel_orient, rel_dist)
# if np.linalg.norm(rel_dist) < (self.agent_width+self.obs_width)/2+self.step_size:
# if the pedestrian is too close, ie invading intimate space: high risk
# swapped this for a intimate space detector ring: intimate_space_occupancy
# if np.linalg.norm(rel_dist) < (self.agent_width/math.sqrt(2) + self.obs_width/math.sqrt(2) + self.step_size*math.sqrt(2)):
# risk_val = max(risk_val, 2)
#
# if ang < np.pi/4 and math.tan(ang)*np.linalg.norm(rel_dist) < thresh_value:
if (
ang < np.pi / 2
and abs(math.tan(ang) * np.linalg.norm(rel_dist))
< thresh_value
):
# print('Moving towards')
# high risk
# adding to it, the rel_distance in both row and
# col should be less than the sum(agent_width/2+obs_width/2)
risk_val = max(risk_val, 2)
elif ang < np.pi / 2:
# print('Moving away')
# medium risk
risk_val = max(risk_val, 1)
else:
# low risk
pass
if self.show_agent_persp:
# determine the color of the obstacle based on the risk it poses
if risk_val == 0:
color_val = (0, 255, 0)
if risk_val == 1:
color_val = (0, 0, 255)
if risk_val == 2:
color_val = (255, 0, 0)
if rel_dist_mag < intimate_space_dist:
color_val = (0, 255, 255)
# draw the obstacle in the agent persepective window
shifted_obs_pos = (
center[0] + obs["position"][0],
center[1] + obs["position"][1],
)
pygame.draw.rect(
pygame.display.get_surface(),
color_val,
[
shifted_obs_pos[1] - self.obs_width / 2,
shifted_obs_pos[0] - self.obs_width / 2,
self.obs_width,
self.obs_width,
],
)
# draw the obstacle orientation in the agent perspective window
pygame.draw.line(
pygame.display.get_surface(),
color_val,
(shifted_obs_pos[1], shifted_obs_pos[0]),
(
shifted_obs_pos[1] + rel_orient[1] * mag,
shifted_obs_pos[0] + rel_orient[0] * mag,
),
2,
)
self.overlay_bins(dummy_state)
pygame.display.update()
# pdb.set_trace()
risk_vector[int(key)][risk_val] = 1
return risk_vector, intimate_space_occupancy
def extract_features(self, state):
agent_state, goal_state, obstacles = self.get_info_from_state(state)
abs_approx_orientation, agent_orientation_index = get_abs_orientation(
agent_state, self.orientation_approximator
)
if len(self.agent_state_history) > 0:
prev_frame_info = self.agent_state_history[-1]
else:
prev_frame_info = None
relative_orientation = get_rel_orientation(
prev_frame_info, agent_state, goal_state
)
relative_orientation_goal = get_rel_goal_orientation(
self.orientation_approximator,
self.rel_orient_conv,
agent_state,
agent_orientation_index,
goal_state,
)
for i in range(16):
self.bins[str(i)] = []
# print('absolute orientation :', abs_approx_orientation.reshape((3,3)))
# print('relative orientation :', relative_orientation_goal.reshape((3,3)))
self.populate_orientation_bin(
agent_orientation_index, agent_state, obstacles
)
collision_info = self.compute_bin_info(
agent_orientation_index, agent_state
)
self.agent_state_history.append(copy.deepcopy(state["agent_state"]))
extracted_feature = np.concatenate(
(
relative_orientation,
relative_orientation_goal,
collision_info.reshape((-1)),
)
)
# spdb.set_trace()
return reset_wrapper(extracted_feature)
class DroneFeatureRisk_v2(DroneFeatureRisk):
def __init__(
self,
thresh1=1,
thresh2=2,
agent_width=10,
step_size=10,
obs_width=10,
grid_size=10,
show_bins=False,
show_agent_persp=False,
):
super().__init__(
thresh1=thresh1,
thresh2=thresh2,
agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
show_bins=show_bins,
show_agent_persp=show_agent_persp,
)
# change the state representation size accordingly
"""
relative_orientation 9
relative_orientation_goal 4
change_in_orientation 5
collision_info 48
"""
self.state_rep_size = 9 + 4 + 5 + 16 * 3
self.generate_hash_variable()
def extract_features(self, state):
"""
the parameter ignore_cur_state, if set to true indicates that this is a part of a rollback play.
"""
agent_state, goal_state, obstacles = self.get_info_from_state(state)
abs_approx_orientation, agent_orientation_index = get_abs_orientation(
agent_state, self.orientation_approximator
)
agent_orientation_angle = state["agent_head_dir"]
# print('Current heading direction :', agent_orientation_angle)
if len(self.agent_state_history) > 0:
prev_frame_info = self.agent_state_history[-1]
else:
prev_frame_info = None
relative_orientation = get_rel_orientation(
prev_frame_info, agent_state, goal_state
)
relative_orientation_goal = get_rel_goal_orientation(
self.orientation_approximator,
self.rel_orient_conv,
agent_state,
agent_orientation_index,
goal_state,
)
change_in_orientation = self.get_change_in_orientation(
state["agent_state"]["orientation"]
)
for i in range(16):
self.bins[str(i)] = []
# print('absolute orientation :', abs_approx_orientation.reshape((3,3)))
# print('relative orientation :', relative_orientation_goal.reshape((3,3)))
self.populate_orientation_bin(
agent_orientation_angle, agent_state, obstacles
)
collision_info = self.compute_bin_info(
agent_orientation_angle, agent_state
)
self.agent_state_history.append(copy.deepcopy(state["agent_state"]))
extracted_feature = np.concatenate(
(
relative_orientation,
relative_orientation_goal,
change_in_orientation,
collision_info.reshape((-1)),
)
)
"""
#***debugging block*****#
print('Relative orientation :', relative_orientation)
print('Relative orientation goal :', relative_orientation_goal.reshape(3,3))
print('Change in orientation :', change_in_orientation)
pdb.set_trace()
#****end block****#
"""
return reset_wrapper(extracted_feature)
class DroneFeatureRisk_speed(DroneFeatureRisk):
def __init__(
self,
thresh1=1,
thresh2=2,
agent_width=10,
step_size=10,
obs_width=10,
grid_size=10,
show_bins=False,
max_speed=2,
show_agent_persp=False,
return_tensor=False,
):
super().__init__(
thresh1=thresh1,
thresh2=thresh2,
agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
grid_size=grid_size,
show_bins=show_bins,
show_agent_persp=show_agent_persp,
)
# change the state representation size accordingly
"""
relative_orientation 9
relative_orientation_goal 4
change_in_orientation 5
collision_info 48
speed_info 6
"""
self.state_rep_size = 4 + 9 + 5 + 16 * 3 + 6
self.max_speed = max_speed
self.speed_divisions = 6
self.generate_hash_variable()
self.return_tensor = return_tensor
def smooth_state(self, state):
"""
Drone feature risk has 5 parts;
relative orientation
relative orientation goal
change in orientation
collision info
speed info
Divide the state vector into the above define parts and each of the
cases separately. Finally concatenate to get the final smoothened state
"""
smoothing_kernel_general = np.array([0.1, 0.8, 0.1])
# relative orientation : asymmetric features, so kind of hacky
rel_orient = state[0:4]
if rel_orient[0] == 1:
smoothing_kernel = np.array([0.9, 0.1]) # .8, .2
if rel_orient[1] == 1:
smoothing_kernel = np.array([0.1, 0.9, 0]) # .2, .8
if rel_orient[2] == 1:
smoothing_kernel = np.array([0.05, 0.9, 0.05]) # .05, .9, .05
if rel_orient[3] == 1:
smoothing_kernel = np.array([0.1, 0.9, 0]) # [.1, .9, 0]
rel_orient_smooth = np.convolve(rel_orient, smoothing_kernel, "same")
# relative_orientation_goal
# just take the first 8 and do the convolve
relative_orientation_goal = state[4 : 4 + 8].astype(np.float)
relative_orientation_goal_full = state[4 : 4 + 9]
smoothing_kernel = smoothing_kernel_general
relative_orientation_goal_smooth = convolve(
relative_orientation_goal, smoothing_kernel, mode="wrap"
)
relative_orientation_goal_smooth_9 = np.zeros(9)
relative_orientation_goal_smooth_9[
0:8
] = relative_orientation_goal_smooth
# change in orientation
# no wrap this time
change_in_orientation = state[13 : 13 + 5]
smoothing_kernel = smoothing_kernel_general
change_in_orientation_smooth = np.convolve(
change_in_orientation, smoothing_kernel, "same"
)
# normalize the weights so that the sum remains 1
change_in_orientation_smooth = change_in_orientation_smooth / np.sum(
change_in_orientation_smooth
)
# local bin information
# bin information comes in a matrix of size 16 * 3
# the convolution will happen in axis = 1
# bin information are in two concentric cicle
# so have to separate the two circles before smoothing
risk_info = state[18 : 18 + 48].reshape([16, 3]).astype(np.float)
risk_info_inner_circle = risk_info[0:8, :]
risk_info_outer_circle = risk_info[8:, :]
smoothing_kernel = np.array([0, 1, 0])
# smooth the risk values spatially. ie. moderate risk in a bin will be
# smoothened to moderate risk to nearby bins. Moderate risk will not be
# smoothened to low or high risk
risk_info_inner_circle_smooth = np.zeros(risk_info_inner_circle.shape)
risk_info_outer_circle_smooth = np.zeros(risk_info_outer_circle.shape)
# going through each of the columns (ie the risk levels)
# the smoothing does not smooth over the risk levels
# ie. high risk at a bin never smoothens to be a medium or low risk
# in someother bin.
for i in range(risk_info_inner_circle.shape[1]):
risk_info_part = risk_info_inner_circle[:, i]
risk_info_part_smooth = convolve(
risk_info_part, smoothing_kernel, mode="wrap"
)
risk_info_inner_circle_smooth[:, i] = risk_info_part_smooth
for i in range(risk_info_outer_circle.shape[1]):
risk_info_part = risk_info_outer_circle[:, i]
risk_info_part_smooth = convolve(
risk_info_part, smoothing_kernel, mode="wrap"
)
risk_info_outer_circle_smooth[:, i] = risk_info_part_smooth
# speed information
# no wrap in the smoothing function
speed_information = state[-6:]
smoothing_kernel = smoothing_kernel_general
speed_information_smooth = np.convolve(
speed_information, smoothing_kernel, "same"
)
# normalize the weights so that the sum remains 1
speed_information_smooth = speed_information_smooth / np.sum(
speed_information_smooth
)
# ********* for debugging purposes *********
"""
print('State information :')
print ("relative orientation")
print(rel_orient, " ", rel_orient_smooth)
print("relative_orientation_goal")
print(relative_orientation_goal_full, " " , relative_orientation_goal_smooth_9)
print("change in orienatation")
print(change_in_orientation, " ", change_in_orientation_smooth)
print("risk information")
print("inner circle")
print(np.c_[risk_info_inner_circle, risk_info_inner_circle_smooth])
print("outer circle")
print(np.c_[risk_info_outer_circle, risk_info_outer_circle_smooth])
print("speed information")
print(speed_information, ' ', speed_information_smooth)
if sum(risk_info[:,0]) < 15:
pdb.set_trace()
#*******************************************
"""
return np.concatenate(
(
rel_orient_smooth,
relative_orientation_goal_smooth_9,
change_in_orientation_smooth,
risk_info_inner_circle_smooth.reshape((-1)),
risk_info_outer_circle_smooth.reshape((-1)),
speed_information_smooth,
)
)
def get_speed_info(self, agent_state):
speed_info =
|
np.zeros(self.speed_divisions)
|
numpy.zeros
|
import time
import datetime
import numpy as np
from scipy.linalg import expm, expm_frechet
import qutip.control.pulseoptim as cpo
import scipy.optimize
import matplotlib.pyplot as plt
class Optcontrol_ADMM_CNOT():
"""
optimal controller with ADMM algorithm to implement minimum up time constraint
"""
def __init__(self):
self.H_d = None
self.H_c = None
self.H_d_qobj = None
self.H_c_qobj = None
self.X_0 = None
self.X_targ = None
self.X_0_qobj = None
self.X_targ_qobj = None
self.n_ts = 0
self.evo_time = None
self.amp_lbound = None
self.amp_ubound = None
self.fid_err_targ = None
self.min_grad = None
self.max_iter_step = None
self.max_wall_time_step = None
self.obj_type = None
self.phase_option = None
self.p_type = None
self.seed = None
self.constant = None
self.initial_control = None
self.output_num = None
self.output_fig = None
self.output_control = None
self.sum_cons_1 = False
self.n_ctrls = None
self.admm_err_targ = None
self.time_optimize_start_step = 0
self.num_iter_step = 0
self.cur_obj = 0
self.cur_origin_obj = 0
self.onto = [None] * (self.n_ts + 1)
self.fwd = [None] * (self.n_ts + 1)
# variables and parameters for ADMM
self.v = None
self.u = None
self._lambda = None
self.rho = None
self.alpha = None
self.err_list = []
self.obj_list = []
self.max_iter_admm = None
self.max_wall_time_admm = None
self.result = None
self.qutip_optimizer = None
def build_optimizer(self, H_d, H_c, X_0, X_targ, n_ts, evo_time, amp_lbound=0, amp_ubound=1,
fid_err_targ=1e-4, min_grad=1e-8, max_iter_step=500, max_wall_time_step=120,
fid_type="UNIT", phase_option="PSU", p_type="ZERO", seed=None, constant=0, initial_control=None,
output_num=None, output_fig=None, output_control=None, sum_cons_1=False,
alpha=1, rho=2, max_iter_admm=500, max_wall_time_admm=7200, admm_err_targ=1e-3):
self.H_d_qobj = H_d
self.H_c_qobj = H_c
self.H_d = H_d.full()
self.H_c = [h_c.full() for h_c in H_c]
self.X_0_qobj = X_0
self.X_targ_qobj = X_targ
self.X_0 = X_0.full()
self.X_targ = X_targ.full()
self.n_ts = n_ts
self.evo_time = evo_time
self.amp_lbound = amp_lbound
self.amp_ubound = amp_ubound
self.fid_err_targ = fid_err_targ
self.min_grad = min_grad
self.max_iter_step = max_iter_step
self.max_wall_time_step = max_wall_time_step
self.obj_type = fid_type
self.phase_option = phase_option
self.p_type = p_type
self.constant = constant
self.initial_control = initial_control
self.output_num = output_num
self.output_fig = output_fig
self.output_control = output_control
self.sum_cons_1 = sum_cons_1
self.max_iter_admm = max_iter_admm
self.max_wall_time_admm = max_wall_time_admm
self.admm_err_targ = admm_err_targ
self.rho = rho
self.seed = seed
self.alpha = alpha
if self.sum_cons_1:
# H_c_origin = H_c
# # Controller Hamiltonian
# self.H_c = [H_c_origin[i].full() - H_c_origin[-1].full() for i in range(len(H_c_origin) - 1)]
# self.H_c_qobj = [H_c_origin[i] - H_c_origin[-1] for i in range(len(H_c_origin) - 1)]
# # Drift Hamiltonian
# self.H_d = H_d.full() + H_c_origin[-1].full()
# self.H_d_qobj = H_d + H_c_origin[-1]
self.alpha = 2 * alpha
self.n_ctrls = len(self.H_c)
self.rho = rho
self.u = np.zeros((self.n_ts, self.n_ctrls))
# if self.sum_cons_1:
# self.v = np.zeros((self.n_ts - 1, self.n_ctrls + 1))
# self._lambda = np.zeros((self.n_ts - 1, self.n_ctrls + 1))
# else:
self.v = np.zeros((self.n_ts - 1, self.n_ctrls))
self._lambda = np.zeros((self.n_ts - 1, self.n_ctrls))
self.cur_obj = 0
self.onto = [None] * (self.n_ts + 1)
self.fwd = [None] * (self.n_ts + 1)
optim = cpo.create_pulse_optimizer(self.H_d_qobj, self.H_c_qobj, self.X_0_qobj, self.X_targ_qobj,
self.n_ts, self.evo_time,
amp_lbound=self.amp_lbound, amp_ubound=self.amp_ubound,
fid_err_targ=self.fid_err_targ, min_grad=min_grad,
max_iter=self.max_iter_step, max_wall_time=self.max_wall_time_step,
dyn_type='UNIT',
fid_type=self.obj_type, phase_option="PSU",
init_pulse_params={"offset": self.constant},
gen_stats=True)
self.qutip_optimizer = optim
def _initialize_control(self):
"""
:param self:
:return: an n_ts*n_ctrls array
"""
self.init_amps = np.zeros([self.n_ts, self.n_ctrls])
if self.p_type == "RND":
if self.seed:
np.random.seed(self.seed)
self.init_amps = np.random.rand(
self.n_ts, self.n_ctrls) * (self.amp_ubound - self.amp_lbound) + self.amp_lbound
if self.p_type == "CONSTANT":
self.init_amps = np.zeros((self.n_ts, self.n_ctrls)) + self.constant
if self.p_type == "WARM":
# file = open(self.initial_control)
if self.sum_cons_1:
warm_start_control = np.loadtxt(self.initial_control, delimiter=",")[:, 0]
else:
warm_start_control = np.loadtxt(self.initial_control, delimiter=",")
evo_time_start = warm_start_control.shape[0]
step = self.n_ts / evo_time_start
for j in range(self.n_ctrls):
for time_step in range(self.n_ts):
self.init_amps[time_step, j] = warm_start_control[int(np.floor(time_step / step)), j]
if self.p_type == "ADMM":
self.init_amps = self.u.copy()
def evolution(self, control_amps):
delta_t = self.evo_time / self.n_ts
X = [self.X_0]
for t in range(self.n_ts):
H_t = self.H_d.copy()
for j in range(self.n_ctrls):
H_t += control_amps[t, j] * self.H_c[j].copy()
X_t = expm(-1j * H_t * delta_t).dot(X[t])
X.append(X_t)
self.fwd = X
return X[-1]
def compute_fid(self, evolution_result):
fid = 0
if self.obj_type == "UNIT" and self.phase_option == "PSU":
fid = np.abs(np.trace(
np.linalg.inv(self.X_targ).dot(evolution_result))) / self.X_targ.shape[0]
return fid
def compute_norm(self, control_amps):
norm = sum(sum(np.power(control_amps[time_step + 1, j] - control_amps[time_step, j] - self.v[time_step, j]
+ self._lambda[time_step, j], 2) for time_step in range(self.n_ts - 1))
for j in range(self.n_ctrls))
# if self.sum_cons_1:
# norm += sum(np.power(sum(control_amps[time_step, j] - control_amps[time_step + 1, j]
# for j in range(self.n_ctrls)) - self.v[time_step, self.n_ctrls]
# + self._lambda[time_step, self.n_ctrls], 2) for time_step in range(self.n_ts - 1))
return norm
def compute_tv_norm(self):
return sum(sum(abs(self.u[t + 1, j] - self.u[t, j]) for t in range(self.n_ts - 1)) for j in range(self.n_ctrls))
def _compute_err(self, *args):
"""
:param args: control list
:return: error
"""
control_amps = args[0].copy()
control_amps = control_amps.reshape([self.n_ts, self.n_ctrls])
evolution_result = self.evolution(control_amps)
fid = self.compute_fid(evolution_result)
# norm = sum(sum(np.power(control_amps[time_step + 1, j] - control_amps[time_step, j] - self.v[time_step, j]
# + self._lambda[time_step, j], 2) for time_step in range(self.n_ts - 1))
# for j in range(self.n_ctrls))
norm = self.compute_norm(control_amps)
# print(1 - fid)
return 1 - fid + self.rho / 2 * norm
# return 1 - fid
def _step_call_back(self, *args):
wall_time_step = time.time() - self.time_optimize_start_step
# if wall_time_step > self.max_wall_time_step:
# raise ValueError("The time exceeds the given max wall time.")
self.num_iter_step += 1
def _fprime(self, *args):
control_amps = args[0].copy().reshape([self.n_ts, self.n_ctrls])
delta_t = self.evo_time / self.n_ts
fwd = [self.X_0]
onto = [self.X_targ.conj().T]
H = [None] * self.n_ts
for t in range(self.n_ts):
H[t] = self.H_d.copy()
for j in range(self.n_ctrls):
H[t] += control_amps[t, j] * self.H_c[j].copy()
cur_fwd = expm(-1j * H[t] * delta_t).dot(fwd[-1])
fwd.append(cur_fwd)
H_t_onto = self.H_d.copy()
for j in range(self.n_ctrls):
H_t_onto += control_amps[self.n_ts - t - 1, j] * self.H_c[j].copy()
cur_onto = onto[0].dot(expm(-1j * H_t_onto * delta_t))
onto.insert(0, cur_onto)
onto = np.array(onto)
fwd = np.array(fwd)
grad = np.zeros((self.n_ts, self.n_ctrls), dtype=complex)
for t in range(self.n_ts):
for j in range(self.n_ctrls):
grad_temp = expm_frechet(-1j * H[t] * delta_t, -1j * self.H_c[j] * delta_t, compute_expm=False)
g = np.trace(onto[t + 1].dot(grad_temp).dot(fwd[t]))
grad[t, j] = g
fid_pre = np.trace(self.X_targ.conj().T.dot(fwd[-1]))
fid_grad = - np.real(grad * np.exp(-1j * np.angle(fid_pre)) / self.X_targ.shape[0]).flatten()
norm_grad = np.zeros((self.n_ts, self.n_ctrls))
for j in range(self.n_ctrls):
norm_grad[0, j] = -self.rho * (control_amps[1, j] - control_amps[0, j] - self.v[0, j] + self._lambda[0, j])\
+ self.rho * (sum(control_amps[0, j] - control_amps[1, j] for j in range(self.n_ctrls))
- self.v[0, self.n_ctrls] + self._lambda[0, self.n_ctrls])
norm_grad[self.n_ts - 1, j] = self.rho * (control_amps[self.n_ts - 1, j] - control_amps[self.n_ts - 2, j]
- self.v[self.n_ts - 2, j] + self._lambda[self.n_ts - 2, j]) \
- self.rho * (sum(
control_amps[self.n_ts - 2, j] - control_amps[self.n_ts - 1, j] for j in range(self.n_ctrls))
- self.v[self.n_ts - 2, self.n_ctrls] + self._lambda[
self.n_ts - 2, self.n_ctrls])
for t in range(1, self.n_ts - 1):
norm_grad[t, j] = self.rho * (control_amps[t, j] - control_amps[t - 1, j] - self.v[t - 1, j]
+ self._lambda[t - 1, j]) \
- self.rho * (control_amps[t + 1, j] - control_amps[t, j] - self.v[t, j]
+ self._lambda[t, j]) \
+ self.rho * (sum(control_amps[t, j] - control_amps[t + 1, j]
for j in range(self.n_ctrls))
- self.v[t, self.n_ctrls] + self._lambda[t, self.n_ctrls]) \
- self.rho * (sum(control_amps[t - 1, j] - control_amps[t, j]
for j in range(self.n_ctrls))
- self.v[t - 1, self.n_ctrls] + self._lambda[t - 1, self.n_ctrls])
return fid_grad + norm_grad.flatten()
def _minimize_u(self):
self.time_optimize_start_step = time.time()
self.num_iter_step = 0
# results = scipy.optimize.minimize(self._compute_err, self.init_amps.reshape(-1), method='L-BFGS-B',
# bounds=scipy.optimize.Bounds(self.amp_lbound, self.amp_ubound),
# tol=self.min_grad,
# options={"maxiter": self.max_iter_step}, callback=self._step_call_back)
# initial_grad = self._fprime(self.u.reshape(-1))
# threshold = 1e-2
# min_grad = max(np.linalg.norm(initial_grad) * threshold, self.min_grad)
min_grad = self.min_grad
# f = open(self.output_num, "a+")
# print(min_grad, file=f)
# results = scipy.optimize.fmin_l_bfgs_b(self._compute_err, self.init_amps.reshape(-1),
# bounds=[(self.amp_lbound, self.amp_ubound)] * self.n_ts * self.n_ctrls,
# pgtol=min_grad, fprime=self._fprime,
# maxiter=self.max_iter_step, callback=self._step_call_back)
# self.u = results[0].reshape((self.n_ts, self.n_ctrls)).copy()
# self.cur_obj = results[1]
self.qutip_optimizer.termination_conditions.min_gradient_norm = min_grad
self.qutip_optimizer.ADMM_rho = self.rho
self.qutip_optimizer.v = self.v.copy()
self.qutip_optimizer._lambda = self._lambda.copy()
dyn = self.qutip_optimizer.dynamics
dyn.initialize_controls(self.init_amps)
result = self.qutip_optimizer.run_optimization_tv_penalty()
self.u = result.final_amps
self.cur_obj = result.fid_err + self.rho / 2 * self.compute_norm(self.u)
self.cur_origin_obj = result.fid_err + self.alpha * self.compute_tv_norm()
# self.cur_grad = result.grad_norm_final
self.num_iter_step = result.num_iter
# self.termination_reason = result.termination_reason
self.result = result
def _minimize_v(self):
for j in range(self.n_ctrls):
for t in range(self.n_ts - 1):
temp = self.u[t + 1, j] - self.u[t, j] + self._lambda[t, j]
if temp > self.alpha / self.rho:
self.v[t, j] = -self.alpha / self.rho + temp
if temp < -self.alpha / self.rho:
self.v[t, j] = self.alpha / self.rho + temp
if -self.alpha / self.rho <= temp <= self.alpha / self.rho:
self.v[t, j] = 0
# if self.sum_cons_1:
# for t in range(self.n_ts - 1):
# temp = sum(self.u[t, j] - self.u[t + 1, j] for j in range(self.n_ctrls)) + self._lambda[t, self.n_ctrls]
# if temp > self.alpha / self.rho:
# self.v[t, self.n_ctrls] = -self.alpha / self.rho + temp
# if temp < -self.alpha / self.rho:
# self.v[t, self.n_ctrls] = self.alpha / self.rho + temp
# if -self.alpha / self.rho <= temp <= self.alpha / self.rho:
# self.v[t, self.n_ctrls] = 0
def _update_dual(self):
for j in range(self.n_ctrls):
for t in range(self.n_ts - 1):
self._lambda[t, j] += self.u[t + 1, j] - self.u[t, j] - self.v[t, j]
if self.sum_cons_1:
for t in range(self.n_ts - 1):
self._lambda[t, self.n_ctrls] += sum(self.u[t, j] - self.u[t + 1, j] for j in range(self.n_ctrls))\
- self.v[t, self.n_ctrls]
def _admm_err(self):
err = sum(sum(
|
np.power(self.u[t + 1, j] - self.u[t, j] - self.v[t, j], 2)
|
numpy.power
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 13:23:05 2020
@author: kvstr
"""
import numpy as np
import scipy.sparse as sparse
from scipy.sparse import linalg
from scipy.linalg import solve_banded
from scipy.interpolate import griddata
import time
from numba import njit
from numba import prange
import matplotlib.pyplot as plt
import vtk
from vtk.util.numpy_support import vtk_to_numpy
from skimage.measure import block_reduce
# %% Continuity
# @njit(parallel=True)
def Continuity(u, v, x, y):
"""
Calculation of the continuity error in a 2D flow field
Parameters
----------
u: MxN Array
u-velocity matrix
v: MxN Array
v-velocity matrix
x: Nx1 vector
x-coordinates of points
y: Mx1 vector
y-coordinates of points
Returns
-------
error : NxM array
Continuity error on each grid point
"""
if not u.shape == v.shape:
print('Fields have different sizes')
return None
else:
error = abs(np.divide((u[:-1, 1:] - u[:-1, :-1]),\
np.gradient(x, axis=1)[:-1, :-1])\
+np.divide(v[1:, :-1] - v[:-1, :-1],\
np.gradient(y, axis=0)[:-1, :-1]))
error = np.pad(error, ((0, 1),), constant_values=0)
return error
# %% Momentum
# @njit # (parallel=True)
def Momentum(vort, u, v, dx, dy):
"""
Calculation of the momentum error in a 2D flow field
Parameters
----------
vort : NxM array
Vorticity value on each grid point.
u : NxM array
u-velocity value on each grid point.
v : NxM array
v-velocity value on each grid point.
dx : Mx1 Array
x-distance cell centre-face.
dy : Nx1
y-distance cell centre-face.
Returns
-------
error: NxM array
Momentum error on each grid point.
"""
nu = 1.05e-6
if not (np.shape(vort) == np.shape(u) and np.shape(vort) == np.shape(v)):
print('Momentum: Shape mismatch')
return None
else:
# Vorticity Gradient x
vortx = np.zeros_like(vort)
vortxx = np.zeros_like(vortx)
vortx[:, -1] = np.divide(vort[:, -1]-vort[:, -2], dx[-1]+dx[-2])
vortx[:, 0] = np.divide(vort[:, 1]-vort[:, 0], dx[1]+dx[0])
for i in range(1, vort.shape[1]-1):
vortx[:, i] = (np.divide(vort[:, i+1]*dx[i] - vort[:, i]*dx[i+1],
dx[i]+dx[i+1])
-np.divide(vort[:, i]*dx[i-1] - vort[:, i-1]*dx[i],
dx[i]+dx[i-1])) / 2*dx[i]
vortxx[:, -1] = np.divide(vortx[:, -1]-vortx[:, -2], dx[-1]+dx[-2])
vortxx[:, 0] = np.divide(vortx[:, 1]-vortx[:, 0], dx[0]+dx[1])
for i in range(1, vortx.shape[1]-1):
vortxx[:, i] = (np.divide(vortx[:, i+1]*dx[i] - vortx[:, i]*dx[i+1],
dx[i]+dx[i+1])
-np.divide(vortx[:, i]*dx[i-1] - vortx[:, i-1]*dx[i],
dx[i]+dx[i-1])) / 2*dx[i]
# Vorticity Gradient y
vorty = np.zeros_like(vort)
vortyy = np.zeros_like(vortx)
vorty[-1, :] = np.divide(vort[-1, :]-vort[-2, :], dy[-1]+dy[-2])
vorty[0, :] = np.divide(vort[1, :]-vort[0, :], dy[0]+dy[1])
for i in range(1, vort.shape[0]-1):
vorty[i, :] = (np.divide(vort[i+1, :]*dy[i] - vort[i, :]*dy[i+1],
dy[i]+dy[i+1])
-np.divide(vort[i, :]*dy[i-1] - vort[i-1, :]*dy[i],
dy[i]+dy[i-1])) / 2*dy[i]
vortyy[-1, :] = np.divide(vorty[-1, :]-vorty[-2, :], dy[-1]+dy[-2])
vortyy[0, :] = np.divide(vorty[1, :]-vorty[0, :], dy[0]+dy[1])
for i in range(1, vorty.shape[0]-1):
vortyy[i, :] = (np.divide(vorty[i+1, :]*dy[i] - vorty[i, :]*dy[i+1],
dy[i]+dy[i+1])
-np.divide(vorty[i, :]*dy[i-1] - vorty[i-1, :]*dy[i],
dy[i]+dy[i-1])) / 2*dy[i]
t1 = np.multiply(u, vortx)
t2 = np.multiply(v, vorty)
t3 = nu * (vortxx+vortyy)
error = abs(np.subtract(t1+t2, t3))
return error
# %% CellSizes
def CellSizes(x, y):
"""
Calculates the distance from cell centre to cell face in either direction
Parameters
----------
x : Mx1 Array
x-Coordinates of cell centers.
y : Nx1 Array
y-Coordinates of cell centers.
Returns
-------
dx : Mx1 Array
x-distance cell centre-face.
dy : Nx1
y-distance cell centre-face.
"""
# Calcuating Cell sizes x-direction
first = np.where(np.gradient(x) == 1)[0][0]
last = np.where(np.gradient(x) == 1)[0][-1]
dx = np.ones_like(x)*.5
for i in np.linspace(first-1, 0, first, dtype=int):
dx[i] = x[i+1] - x[i] - dx[i+1]
for i in range(last, x.shape[0]):
dx[i] = x[i] - x[i-1] - dx[i-1]
# Calculating cell sizes in y-direction
first = np.where(np.gradient(y) == 1)[0][0]
last = np.where(np.gradient(y) == 1)[0][-1]
dy = np.ones_like(y)*.5
for i in np.linspace(first-1, 0, first, dtype=int):
dy[i] = y[i+1] - y[i] - dy[i+1]
for i in range(last, y.shape[0]):
dy[i] = y[i] - y[i-1] -dy[i-1]
return dx, dy
# %% Vorticity
def Vorticity(u, v, dx, dy):
"""
Calculates the Vorticity from velocity Components and Cell sizes
Parameters
----------
u : NxM Array
u-velocity at each grid point.
v : NxM Array
v-velocity at each grid point.
dx : Mx1 Array
Half cell sizes in x-direction.
dy : Nx1 Array
Half cell sizes in y-direction.
Returns
-------
vort : NxM Array
Vorticity at each grid point.
"""
# Gradient v-velocity
dvdx = np.zeros_like(v)
dvdx[:, 0] = np.divide(v[:, 1] - v[:, 0], dx[0]+dx[1])
dvdx[:, -1] = np.divide(v[:, -1]-v[:, -2], dx[-1]+dx[-2])
for i in range(1, v.shape[1]-1):
vpl = np.divide(v[:, i]*dx[i+1] + v[:, i+1]*dx[i], dx[i]+dx[i+1])
vmi = np.divide(v[:, i]*dx[i-1] + v[:, i-1]*dx[i], dx[i-1]+dx[i])
dvdx[:, i] = np.divide(vpl - vmi, 2*dx[i])
# Gradient u-velocity
dudy = np.zeros_like(u)
dudy[0, :] = np.divide(u[1, :] - u[0, :], dy[0]+dy[1])
dudy[-1, :] = np.divide(u[-1, :] - u[-2, :], dy[0]+dy[1])
for i in range(1, u.shape[0]-1):
upl = np.divide(u[i, :]*dy[i+1] + u[i+1, :]*dy[i], dy[i]+dy[i+1])
umi = np.divide(u[i, :]*dy[i-1] + u[i-1, :]*dy[i], dy[i]+dy[i-1])
dudy[i, :] = np.divide(upl-umi, 2*dy[i])
vort = dvdx - dudy
return vort
# %% Pressure
def Pressure(x, y, u, v, x_body, y_body, step=1, rho=1, nu=128/150):
"""
Calculates the pressure field from the velocity field. To avoid problems
with the velocity inside the body, the integration is carried out always
only up to the body.
Parameters
----------
x : 2D-Array
x-Coordinates.
y : 2D-Array
x-Coordinates.
u : 2D-Array
Velocity x-component.
v : 2D-Array
Velocity x-component.
x_body : 1D-Array
x-coordinates of body.
y_body : 1D-Array
y-coordinates of body.
step : int, optional
Step size on grid. The default is 1.
rho : float, optional
Density. The default is 1.
nu : float, optional
Viscosity. The default is 128/150.
Returns
-------
p : 2D-Array
Pressure field.
"""
dudx = np.gradient(u, step, axis=1)
dudy = np.gradient(u, step, axis=0)
dpdx = -(rho*(u*dudx + v * dudy)
+ nu*(np.gradient(dudx, step, axis=1))
+ np.gradient(dudy, step, axis=0))
dvdx = np.gradient(v, step, axis=1)
dvdy = np.gradient(v, step, axis=0)
dpdy = -(rho*(u*dvdx + v * dvdy)
+ nu*(np.gradient(dvdx, step, axis=1)
+ np.gradient(dvdy, step, axis=0)))
p = np.empty_like(x)
p[:, 0] = step * dpdx[:, 0]
i = 1
while (x[0, i] < np.min(x_body)) and i < len(x):
p[:, i] = p[:, i-1] + step * dpdx[:, i]
i += 1
for k in range(i, len(x)):
p[0, k] = p[0, k-1] + step * dpdx[0, k]
p[-1, k] = p[-1, k-1] + step * dpdx[-1, k]
k = 1
while (y[k, 0] < np.min(y_body)) and k < len(x):
p[k, :] = p[k-1, :] + step * dpdy[k, :]
k += 1
l = len(x)-2
while (y[l, 0] > np.max(y_body)) and l > 0:
p[l, :] = p[l+1, :] - step * dpdy[l, :]
l -= 1
while x[0, i] <= np.max(x_body):
yl = np.interp(x[0, i], x_body, y_body)
ind = abs(yl - y[:, 0]).argmin()
for m in range(k, ind):
p[m, i] = p[m-1, i] + step * dpdy[k, i]
for m in range(l-ind):
p[l-m, i] = p[l-m+1, i] - step * dpdy[l-m, i]
i += 1
yl = np.interp(x[0, i], x_body, y_body)
ind = abs(yl - y[:, 0]).argmin()
for m in range(k, ind):
p[m, i] = p[m-1, i] + step * dpdy[k, i]
for m in range(l-ind):
p[l-m, i] = p[l-m+1, i] - step * dpdy[l-m, i]
i += 1
for m in range(k, l+1):
p[m, i:] = p[m-1, i:] + step * dpdy[m, i:]
return p
# %% Forces
def Forces(x, y, u, v, p, xb, yb, Sb, dr, Angles, chord, nu=128/150):
"""
Calculates the pressure & viscous forces acting on a body.
Parameters
----------
x : meshgrid x
y : TYPE
meshgrid y.
u : TYPE
x-velocity field.
v : TYPE
y-velocity field.
p : TYPE
pressure field.
xb : TYPE
Body x-coordinates.
yb : TYPE
Body y-coordinates.
dr : TYPE
Body half thickness.
Angles : TYPE
Body panel angles.
chord : TYPE
Body Chord length.
nu : TYPE, optional
Viscosity. The default is 128/150.
Returns
-------
Lift : TYPE
Lift COefficient.
Drag : TYPE
Drag Coefficient.
"""
ptop = np.zeros_like(xb)
pbot = np.zeros_like(ptop)
top = np.zeros((len(xb), 2))
bot = np.zeros_like(top)
i = 0
for xt, yt in zip(xb, yb):
top[i, 0] = xt - np.sin(Angles[i]) * dr
top[i, 1] = yt + np.cos(Angles[i]) * dr
bot[i, 0] = xt + np.sin(Angles[i]) * dr
bot[i, 1] = yt - np.cos(Angles[i]) * dr
i += 1
maskx = np.logical_and(x > np.min(xb)-5, x<np.max(xb)+5)
masky = np.logical_and(y > np.min(yb)-5, y<np.max(yb)+5)
mask = np.logical_and(maskx, masky)
ptop = griddata(np.vstack((x[mask], y[mask])).transpose(),
p[mask], top)
pbot = griddata(np.vstack((x[mask], y[mask])).transpose(),
p[mask], bot)
veltop = griddata(np.vstack((x[mask], y[mask])).transpose(),
np.sqrt(u**2 + v**2)[mask], top)
velbot = griddata(np.vstack((x[mask], y[mask])).transpose(),
np.sqrt(u**2 + v**2)[mask], bot)
Lift = 2 * (np.sum(np.cos(Angles)*(pbot - ptop)* Sb)
+ nu * np.sum(np.sin(Angles)*(veltop+velbot)* Sb)) / chord
Drag = 2 * (-np.sum(np.sin(Angles)*(pbot - ptop)* Sb)
+ nu * np.sum(np.cos(Angles)*(veltop+velbot)* Sb))/ chord
return Lift, Drag
# %% Read VTR
def read_vtr(file):
"""
Parameters
----------
file : PATH
Path to file to be read.
Returns
-------
u : NxM Array
u-velocity at each grid point.
v : NxM Array
v-velocity at each grid point..
Pressure : NxM Array
Pressure at each grid point..
x : Mx1 Array
x-coordinates of gridpoints.
y : Nx1 Array
y-coordinates of gridpoints.
"""
reader = vtk.vtkXMLRectilinearGridReader()
reader.SetFileName(file)
reader.Update()
data = reader.GetOutput()
pointData = data.GetPointData()
sh = data.GetDimensions()[::-1]
ndims = len(sh)
# Get Vector Field
v = np.array(pointData.GetVectors("Velocity")).reshape(sh + (ndims, ))
Velocity = []
for d in range(ndims):
a = v[..., d]
Velocity.append(a)
# Get Sccalar field
Pressure = np.array(pointData.GetScalars('Pressure')).reshape(sh + (1, )).squeeze()
u = Velocity[0].squeeze()
v = Velocity[1].squeeze()
# Obtain Grid
x = np.array(data.GetXCoordinates())
y = np.array(data.GetYCoordinates())
return u, v, Pressure, x, y
# %% Read Data
def Read_Data(AoAs, start=50, timesteps=100, step=1, verbose=False,
getPressure=False):
"""
Parameters
----------
AoAs : tuple with N scalar entries
Angles of attack in degrees for which to read data.
start : scalar, optional
First tmestep to use. The default is 50.
timesteps : TYPE, optional
Total number of timesteps, will use start-timesteps. The default is 100.
Returns
-------
x : MxO Array
x-coordinates of grid.
y : MxO Array
y-coordinates of grid.
u : NxMxO Array
u-velocity at each AoA and grid point.
v : NxMxO Array
v-velocity at each AoA and grid point.
vort : NxMxO Array
vorticity at each AoA and grid point.
u_std : NxMxO Array
u standard deviation at each AoA and grid point.
v_std : NxMxO Array
v standard deviation at each AoA and grid point.
Cont : NxMxO Array
Continuity error at each AoA and grid point.
Mom : NxMxO Array
Momentum error at each AoA and grid point.
"""
n_files = timesteps-start
j = 0
for alpha in AoAs:
print('alpha = {:03d}deg'.format(alpha))
u0, v0, Press0, xlin, ylin = read_vtr("../Data/arc_{:03d}_Re_150/dat0x0x0/fluid.{:01d}.vtr".format(alpha, start))
u_files = np.empty((n_files, u0.shape[0], u0.shape[1]))
u_files[0] = u0
v_files = np.empty((n_files, v0.shape[0], v0.shape[1]))
v_files[0] = v0
Press_files = np.empty((n_files, Press0.shape[0], Press0.shape[1]))
Press_files[0] = Press0
for i in range(1, n_files):
file = "../Data/arc_{:03d}_Re_150/dat0x0x0/fluid.{:01d}.vtr".format(alpha, i+start)
u_files[i], v_files[i], Press_files[i], xlin, ylin = read_vtr(file)
x_stretch, y_stretch = np.meshgrid(xlin, ylin)
# Statistics
if j == 0:
u_std = np.zeros((len(AoAs), u0.shape[0], u0.shape[1]))
v_std = np.zeros((len(AoAs), u0.shape[0], u0.shape[1]))
u_stretch = np.mean(u_files, axis=0)
v_stretch = np.mean(v_files, axis=0)
Press = np.mean(Press_files, axis=0)
u_std[j] = np.std(u_files, axis=0)
v_std[j] = np.std(v_files, axis=0)
if verbose:
print('Mean std u: %.8f' % np.mean(u_std[j]))
print('Mean std v: %.8f' % np.mean(v_std[j]))
print('Max std u: %.8f' % np.max(u_std[j]))
print('Max std v: %.8f' % np.max(v_std[j]))
dx, dy = CellSizes(xlin, ylin)
vort_stretch = Vorticity(u_stretch, v_stretch, dx, dy)
# Interpolate to regular grid
if j == 0:
xmin = np.ceil(np.min(xlin))
xmax = np.floor(np.max(xlin))
ymin = np.ceil(np.min(ylin))
ymax = np.floor(np.max(ylin))
x, y = (np.mgrid[xmin+1:xmax:step, ymin:ymax:step] - .5)
x = x.transpose().astype(float)
y = y.transpose().astype(float)
u = np.zeros((len(AoAs), x.shape[0], x.shape[1]))
v =
|
np.zeros_like(u)
|
numpy.zeros_like
|
from impedance.models.circuits import BaseCircuit, CustomCircuit, Randles
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import pytest
# get example data
data = np.genfromtxt(os.path.join("./data/", "exampleData.csv"), delimiter=",")
f = data[:, 0]
Z = data[:, 1] + 1j * data[:, 2]
def test_BaseCircuit():
initial_guess = [0.01, 0.02, 50]
base_circuit = BaseCircuit(initial_guess)
# __init__()
# check initial_guess is loaded in correctly
assert base_circuit.initial_guess == initial_guess
# improper input_guess types raise an TypeError
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=["hi", 0.1])
# __eq__()
# incorrect comparisons raise a TypeError
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r == 8
# fit()
# improper data types in fitting raise a TypeError
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r.fit([42, 4.2], []) # frequencies not ndarray
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r.fit(np.array([42 + 42j]), []) # frequencies not numeric type
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r.fit(np.array([42]), [42 + 42j]) # Z not ndarray
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r.fit(np.array([42]), np.array([0.5, 0.2])) # Z not complex
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r.fit(np.array([42, 4.2]), np.array([42 + 42j])) # mismatched lengths
# predict()
# improper data types in fitting raise a TypeError
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r.predict([42, 4.2]) # frequencies not ndarray
with pytest.raises(TypeError):
r = BaseCircuit(initial_guess=[0.01, 0.005, 0.1, 0.0001, 200])
r.predict(np.array([42 + 42j])) # frequencies not numeric type
# plot()
# kind = {'nyquist', 'bode'} should return a plt.Axes() object
_, ax = plt.subplots()
assert isinstance(base_circuit.plot(ax, None, Z, kind="nyquist"), type(ax))
assert isinstance(base_circuit.plot(None, f, Z, kind="nyquist"), type(ax))
_, axes = plt.subplots(nrows=2)
assert isinstance(base_circuit.plot(axes, f, Z, kind="bode")[0], type(ax))
assert isinstance(base_circuit.plot(None, f, Z, kind="bode")[0], type(ax))
# incorrect kind raises a ValueError
with pytest.raises(ValueError):
base_circuit.plot(None, f, Z, kind="SomethingElse")
def test_Randles():
randles = Randles(initial_guess=[0.01, 0.005, 0.1, 0.01, 200])
randlesCPE = Randles(initial_guess=[0.01, 0.05, 0.1, 0.9, 0.01, 200], CPE=True)
with pytest.raises(ValueError):
randlesCPE = Randles([0.01, 200]) # incorrect initial guess length
randles.fit(f[np.imag(Z) < 0], Z[np.imag(Z) < 0])
randlesCPE.fit(f[np.imag(Z) < 0], Z[np.imag(Z) < 0])
# compare with known fit parameters
np.testing.assert_almost_equal(
randles.parameters_,
np.array(
[
1.86146620e-02,
1.15477171e-02,
1.33331949e00,
6.31473571e-02,
2.22407275e02,
]
),
decimal=2,
)
# compare with known impedance predictions
assert np.isclose(
randles.predict(np.array([10.0])),
|
np.complex(0.02495749, -0.00614842)
|
numpy.complex
|
# Copyright (c) Facebook, Inc. and its affiliates.
import sys
import os
import numpy as np
import torch
import pybullet
import ml3
from ml3.envs.reacher_sim import ReacherSimulation
from ml3.mbrl_utils import Dynamics
from ml3.learnable_losses import Ml3_loss_reacher as Ml3_loss
from ml3.optimizee import Reacher_Policy as Policy
from ml3.ml3_train import meta_train_mbrl_reacher as meta_train
from ml3.ml3_test import test_ml3_loss_reacher as test_ml3_loss
EXP_FOLDER = os.path.join(ml3.__path__[0], "experiments/data/mbrl_reacher")
class Task_loss(object):
def __call__(self, a, s, goal):
loss = 10*torch.norm(s[-1,:2]-goal[:2])+torch.mean(torch.norm(s[:,:2]-goal[:2],dim=1))+0.0001*torch.mean(torch.norm(s[:,2:],dim=1))
return loss
def random_babbling(env, time_horizon):
# do random babbling
actions = np.random.uniform(-1.0, 1.0, [time_horizon, 2])
states = []
state = env.reset()
states.append(state)
for u in actions:
state = env.sim_step(state, u)
states.append(state.copy())
return np.array(states), actions
if __name__ == '__main__':
if not os.path.exists(EXP_FOLDER):
os.makedirs(EXP_FOLDER)
|
np.random.seed(0)
|
numpy.random.seed
|
import math
import cv2
import numpy as np
"""Pipeline to process images and draw road lanes"""
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def extrapolate_line(list_x, list_y, min_allowed_slope, max_allowed_slope, img, color, thickness):
"""
Fits listx, list_y to 1st deg polynomial.
Tries polifit 1st and average if that does not work
:param list_x: list of x's of points to be extrapolated
:param list_y: list of y's of points to be extrapolated
:param min_allowed_slope: min slope allowed
:param max_allowed_slope: max slope allowed
:param img: image to be extrapolated on
:param color: line color
:param thickness: line thickness
:return :
"""
if not list_x or not list_y:
return
imgy = img.shape[0]
extrap_base_y = imgy # lower y coord of extrapolation
extrap_horizon_y = math.floor(imgy * 2. / 3.) # upper y coord of extrapolation
# Try polyfit
m, b =
|
np.polyfit(list_x, list_y, 1)
|
numpy.polyfit
|
import os
import numpy as np
import keras
from keras import backend as K
from . import _utils
from . import _deck
from . import _game
from . import _representations
class IntelligentPlayer(_game.DefaultPlayer):
def __init__(self, network):
super().__init__()
self._network = network
def get_model_prediction(self, board):
representation = self._network.representation.create(board, self)
return self._network.predict(representation)
def _propose_card_to_play(self, board):
output = self.get_model_prediction(board)
index = np.argmax(output[:32])
return _deck.Card.from_global_index(index)
def set_reward(self, board, value):
last_player, last_card = board.actions[-1]
if np.random.rand() < .3 and self.last_correct + 1 >= len(board.actions):
value = min(250, value + max(np.max(self.get_model_prediction(board)), 0))
previous_representation = self._network.representation.create(board, self, no_last=True)
if last_player == self.order:
expected = self._network.output_framework.make_reference(self._last_playable_cards, last_card, value)
self._network.train(previous_representation, expected)
elif
|
np.random.rand()
|
numpy.random.rand
|
import logging
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from scipy.ndimage.morphology import binary_fill_holes
from torch.nn.functional import one_hot
from seg_utils.utils import convert_binary_output_to_classes
DEBUG = True
def show_img(img, title=None, ax=None, show=True):
if ax is None:
plt.figure()
ax = plt.gca()
ax.imshow(img)
if title is not None:
ax.set_title(title)
if show:
plt.show()
def to_cpu(t):
if "cuda" in str(t.device):
t = t.cpu()
if t.requires_grad:
t = t.detach()
return t
def binarize_img(img):
""" minumum processing to convert to a binary image and fill holes. Should not fail"""
_, img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY)
img = binary_fill_holes(img).astype(np.float32)
return img
def open_and_close(img, plot=False):
""" find the largest contour... may fail if contours are not found..."""
if plot:
show_img(img, "og")
kernel = np.ones((15, 15), np.uint8)
num_iters = 3
for i in range(num_iters):
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
for i in range(num_iters * 2):
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
for i in range(num_iters):
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
if plot:
show_img(img, "closed")
return img
def find_largest_contour(img, plot=False):
""" find the largest contour... may fail if contours are not found..."""
img = img.astype(np.uint8)
contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
try:
largest = get_largest_contour(contours)
except ValueError as e:
print(e)
return img
# draw largest contour filled on image
img = np.zeros(img.shape[0:2]).astype(np.uint8)
cv2.drawContours(img, largest, -1, (1, 1, 1), -1)
if plot:
show_img(img, "contour image 2")
return img
def smooth_contour(img, plot=False):
for i in range(3):
img = cv2.GaussianBlur(img, (15, 15), 0)
if plot:
show_img(img, "blurred")
_, img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY)
if plot:
show_img(img, "thresholded")
return img
def simplicity(img):
""" return the simplicity of a contour. Image should be a binary mask image """
contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
largest = get_largest_contour(contours)
perimeter = cv2.arcLength(largest[0], True)
area = cv2.contourArea(largest[0])
return np.sqrt(4 * np.pi * area) / perimeter
def choose_who_gets_overlapping_region_by_simplicity(img1, img2):
""" for two images with an area of overlap this function will find who gets the overlapping region by evaluating
the simplicity of each with and without the region """
mask1 = img1.astype(np.bool)
mask2 = img2.astype(np.bool)
simplicity_img1_with = simplicity(mask1.astype(np.uint8))
simplicity_img2_with = simplicity(mask2.astype(np.uint8))
try:
simplicity_img1_wout = simplicity((mask1 & ~mask2).astype(np.uint8))
except ValueError:
# entire LA is in overlapping region - LA gets it
mask2 = mask2 & ~mask1
return mask1.astype(np.uint8), mask2.astype(np.uint8)
try:
simplicity_img2_wout = simplicity((mask2 & ~mask1).astype(np.uint8))
except ValueError:
# entire LV is in overlapping region?? seems bad but give to LV
print("WARNING: detected strange overlap between LV and LA")
mask1 = mask1 & ~mask2
return mask1.astype(np.uint8), mask2.astype(np.uint8)
change1 = simplicity_img1_with - simplicity_img1_wout
change2 = simplicity_img2_with - simplicity_img2_wout
# higher simplicity with the region means that the region should be included
if change1 > change2:
mask2 = mask2 & ~mask1
else:
mask1 = mask1 & ~mask2
return mask1.astype(np.uint8), mask2.astype(np.uint8)
def get_largest_contour(contours):
""" find all contours above threshold """
largest = None
current_biggest = 0
for contour in contours:
area = cv2.contourArea(contour)
if area > current_biggest:
largest = contour
current_biggest = area
if largest is None:
raise ValueError("no contours in image > 0 area")
return [largest]
class MaskCombiner(object):
""" combine masks for input to post processing then uncombine after.
Specifically designed to combine the LV myocardium with LV blood pool for post processing
Call combine before post-processing and uncombine after.
"""
def __init__(self, channels_to_merge, output_channel=None):
assert len(channels_to_merge) >= 2, "must provide at least two channels to merge"
self.channels_to_merge = channels_to_merge
if output_channel is None:
self.output_channel = channels_to_merge[0]
else:
self.output_channel = output_channel
def combine(self, output):
""" merge several channels into a single channel to simplify post processing"""
assert len(output.shape) == 3, f"function is designed to handle output of shape (C, W, H) got {output.shape}"
output[self.output_channel, :, :] = output[(self.channels_to_merge), :, :].max(0)
def uncombine(self, output):
""" every channel in output should now be a processed binary mask.
This function will subtract the binary masks from the other channels.
"""
# import pdb
# pdb.set_trace()
# print('pdb')
converted = output.argmax(0)
oc = output[self.output_channel]
for c in self.channels_to_merge:
if c != self.output_channel:
oc[converted == c] = output.min() # these pixels will no longer be attributed to output channel
print(f"uncombining channel {c} from {self.output_channel}")
print(f"found {(converted == c).sum()} pixels for channel {c}")
print(f"found {(converted == self.output_channel).sum()} pixels for channel {self.output_channel}")
class PostProcessMultiChannel:
""" version 2 of a post processor intended for multi-channel output"""
def __init__(self, output_nc):
# self.mask_combiner = MaskCombiner(channels_to_merge=(1, 2), output_channel=2)
self.output_nc = output_nc
@staticmethod
def _post_process_single(img, plot=False):
img = binarize_img(img) # convert to binary
img = open_and_close(img, plot=plot)
# contour finding... may fail
try:
img = find_largest_contour(img, plot=plot)
except ValueError as e:
print(f"post processing failed because {e}")
# redo with plotting on
try:
find_largest_contour(img, plot=True)
except ValueError:
pass
img = smooth_contour(img, plot=plot)
img = img.astype(np.bool)
return img
def merge_multiple(self, la, other, plot=False):
overlap = la.astype(np.bool) & other.astype(np.bool)
if la.sum() > 0 and overlap.sum() / la.sum() > 0.04:
if plot:
f, axs = plt.subplots(1, 3)
show_img(la, title="LA pre", ax=axs[0], show=False)
show_img(other, title="Other pre", ax=axs[1], show=False)
show_img(overlap, title="overlap pre", ax=axs[2])
la, other = choose_who_gets_overlapping_region_by_simplicity(la, other)
return la, other
def process_four_channel(self, output):
segs = to_cpu(output["segs"]).numpy()
for i in range(segs.shape[0]):
classes = segs[i].argmax(0)
la_orig = classes == 3
la = self._post_process_single(la_orig.astype(np.uint8).copy(), plot=False)
lv_endo_orig = classes == 1
lv_endo = self._post_process_single(lv_endo_orig.astype(np.uint8).copy())
lv_epi_orig = ((classes == 1) | (classes == 2))
lv_epi = self._post_process_single(lv_epi_orig.astype(np.uint8).copy())
la, lv_endo = self.merge_multiple(la, lv_endo, plot=False)
la, lv_epi = self.merge_multiple(la, lv_epi, plot=False)
if la.sum() / la_orig.sum() < 0.01:
la = la_orig
logging.warning("post processing reduced la to <1%. Resetting to original")
if lv_endo.sum() / lv_endo_orig.sum() < 0.01:
lv_endo = lv_endo_orig
logging.warning("post processing reduced lv endo to <1%. Resetting to original")
if lv_epi.sum() / lv_epi_orig.sum() < 0.01:
lv_epi = lv_epi_orig
logging.warning("post processing reduced lv epi to <1%. Resetting to original")
# may cut some regions off so find largest again
la = find_largest_contour(la).astype(np.bool)
lv_endo = find_largest_contour(lv_endo).astype(np.bool)
lv_epi = find_largest_contour(lv_epi).astype(np.bool)
# now fill image
res = np.zeros(classes.shape, dtype=segs.dtype)
res[la] = 3
res[lv_endo] = 1
res[lv_epi & ~lv_endo] = 2
res = torch.LongTensor(res)
res = one_hot(res, num_classes=self.output_nc).permute((2, 0, 1))
segs[i] = np.array(res).astype(segs.dtype)
output["segs"] = torch.tensor(segs)
def process_two_channel(self, output, output_nc=None):
output_nc = output_nc if output_nc is not None else self.output_nc
segs = to_cpu(output["segs"]).numpy()
for i in range(segs.shape[0]):
classes = segs[i].argmax(0)
lv_endo = classes == 1
lv_endo = self._post_process_single(lv_endo.astype(np.uint8))
# now fill image
res = np.zeros(classes.shape, dtype=segs.dtype)
res[lv_endo] = 1
res = torch.LongTensor(res)
res = one_hot(res, num_classes=output_nc).permute((2, 0, 1))
segs[i] =
|
np.array(res)
|
numpy.array
|
"""Solvers for non-linear systems of equations for implicit integrators."""
from mici.errors import ConvergenceError, LinAlgError
import numpy as np
def euclidean_norm(vct):
"""Calculate the Euclidean (L-2) norm of a vector."""
return np.sum(vct ** 2) ** 0.5
def maximum_norm(vct):
"""Calculate the maximum (L-infinity) norm of a vector."""
return np.max(abs(vct))
def solve_fixed_point_direct(
func,
x0,
convergence_tol=1e-9,
divergence_tol=1e10,
max_iters=100,
norm=maximum_norm,
):
"""Solve fixed point equation `func(x) = x` using direct iteration.
Args:
func (Callable[[array], array]): Function to find fixed point of.
x0 (array): Initial state (function argument).
convergence_tol (float): Convergence tolerance - solver successfully
terminates when `norm(func(x) - x) < convergence_tol`.
divergence_tol (float): Divergence tolerance - solver aborts if
`norm(func(x) - x) > divergence_tol` on any iteration.
max_iters (int): Maximum number of iterations before raising exception.
norm (Callable[[array], float]): Norm to use to assess convergence.
Returns:
Solution to fixed point equation with
`norm(func(x) - x) < convergence_tol`.
Raises:
`mici.errors.ConvergenceError` if solver does not converge within
`max_iters` iterations, diverges or encounters a `ValueError` during
the iteration.
"""
for i in range(max_iters):
try:
x = func(x0)
error = norm(x - x0)
if error > divergence_tol or np.isnan(error):
raise ConvergenceError(
f"Fixed point iteration diverged on iteration {i}."
f"Last error={error:.1e}."
)
if error < convergence_tol:
return x
x0 = x
except (ValueError, LinAlgError) as e:
# Make robust to errors in intermediate linear algebra ops
raise ConvergenceError(
f"{type(e)} at iteration {i} of fixed point solver ({e})."
)
raise ConvergenceError(
f"Fixed point iteration did not converge. Last error={error:.1e}."
)
def solve_fixed_point_steffensen(
func,
x0,
convergence_tol=1e-9,
divergence_tol=1e10,
max_iters=100,
norm=maximum_norm,
):
"""Solve fixed point equation `func(x) = x` using Steffensen's method.
Steffennsen's method [1] achieves quadratic convergence but at the cost of
two function evaluations per iteration so for functions where convergence
is achieved in a small number of iterations, direct iteration may be
cheaper.
[1] : https://en.wikipedia.org/wiki/Steffensen%27s_method
Args:
func (Callable[[array], array]): Function to find fixed point of.
x0 (array): Initial state (function argument).
convergence_tol (float): Convergence tolerance - solver successfully
terminates when `norm(func(x) - x) < convergence_tol`.
divergence_tol (float): Divergence tolerance - solver aborts if
`norm(func(x) - x) > divergence_tol` on any iteration.
max_iters (int): Maximum number of iterations before raising exception.
norm (Callable[[array], float]): Norm to use to assess convergence.
Returns:
Solution to fixed point equation with
`norm(func(x) - x) < convergence_tol`.
Raises:
`mici.errors.ConvergenceError` if solver does not converge within
`max_iters` iterations, diverges or encounters a `ValueError` during
the iteration.
"""
for i in range(max_iters):
try:
x1 = func(x0)
x2 = func(x1)
denom = x2 - 2 * x1 + x0
# Set any zero values in denominator of update term to smalllest
# floating point value to prevent divide-by-zero errors
denom[abs(denom) == 0.0] = np.finfo(x0.dtype).eps
x = x0 - (x1 - x0) ** 2 / denom
error = norm(x - x0)
if error > divergence_tol or np.isnan(error):
raise ConvergenceError(
f"Fixed point iteration diverged on iteration {i}."
f"Last error={error:.1e}."
)
if error < convergence_tol:
return x
x0 = x
except (ValueError, LinAlgError) as e:
# Make robust to errors in intermediate linear algebra ops
raise ConvergenceError(
f"{type(e)} at iteration {i} of fixed point solver ({e})."
)
raise ConvergenceError(
f"Fixed point iteration did not converge. Last error={error:.1e}."
)
def solve_projection_onto_manifold_quasi_newton(
state,
state_prev,
dt,
system,
constraint_tol=1e-9,
position_tol=1e-8,
divergence_tol=1e10,
max_iters=50,
norm=maximum_norm,
):
"""Solve constraint equation using quasi-Newton method.
Uses a quasi-Newton iteration to solve the non-linear system of equations
in `λ`
system.constr(
state.pos + dh2_flow_pos_dmom @
system.jacob_constr(state_prev).T @ λ) == 0
where `dh2_flow_pos_dmom = system.dh2_flow_dmom(dt)[0]` is the derivative
of the action of the (linear) `system.h2_flow` map on the state momentum
component with respect to the position component, `state` is a post
(unconstrained) `system.h2_flow` update state with position component
outside of the manifold and `state_prev` is the corresponding pre-update
state in the co-tangent bundle.
Only requires re-evaluating the constraint function `system.constr` within
the solver loop and no recomputation of matrix decompositions on each
iteration.
Args:
state (mici.states.ChainState): Post `h2_flow `update state to project.
state_prev (mici.states.ChainState): Previous state in co-tangent
bundle manifold before `h2_flow` update which defines the
co-tangent space to perform projection in.
dt (float): Integrator time step used in `h2_flow` update.
system (mici.systems.ConstrainedEuclideanMetricSystem): Hamiltonian
system defining `h2_flow` and `constr` functions used to define
constraint equation to solve.
constraint_tol (float): Convergence tolerance in constraint space.
Iteration will continue until `norm(constr(pos)) < constraint_tol`
where `pos` is the position at the current iteration.
position_tol (float): Convergence tolerance in position space.
Iteration will continue until `norm(delt_pos) < position_tol`
where `delta_pos` is the change in the position in the current
iteration.
divergence_tol (float): Divergence tolerance - solver aborts if
`norm(constr(pos)) > divergence_tol` on any iteration where `pos`
is the position at the current iteration and raises
`mici.errors.ConvergenceError`.
max_iters (int): Maximum number of iterations to perform before
aborting and raising `mici.errors.ConvergenceError`.
norm (Callable[[array], float]): Norm to use to test for convergence.
Returns:
Updated `state` object with position component satisfying constraint
equation to within `constraint_tol`, i.e.
`norm(system.constr(state.pos)) < constraint_tol`.
Raises:
`mici.errors.ConvergenceError` if solver does not converge within
`max_iters` iterations, diverges or encounters a `ValueError` during
the iteration.
"""
mu = np.zeros_like(state.pos)
jacob_constr_prev = system.jacob_constr(state_prev)
# Use absolute value of dt and adjust for sign of dt in mom update below
dh2_flow_pos_dmom, dh2_flow_mom_dmom = system.dh2_flow_dmom(abs(dt))
inv_jacob_constr_inner_product = system.jacob_constr_inner_product(
jacob_constr_prev, dh2_flow_pos_dmom
).inv
for i in range(max_iters):
try:
constr = system.constr(state)
error = norm(constr)
delta_mu = jacob_constr_prev.T @ (inv_jacob_constr_inner_product @ constr)
delta_pos = dh2_flow_pos_dmom @ delta_mu
if error > divergence_tol or np.isnan(error):
raise ConvergenceError(
f"Quasi-Newton solver diverged on iteration {i}. "
f"Last |constr|={error:.1e}, "
f"|delta_pos|={norm(delta_pos):.1e}."
)
elif error < constraint_tol and norm(delta_pos) < position_tol:
state.mom -= np.sign(dt) * dh2_flow_mom_dmom @ mu
return state
mu += delta_mu
state.pos -= delta_pos
except (ValueError, LinAlgError) as e:
# Make robust to errors in intermediate linear algebra ops
raise ConvergenceError(
f"{type(e)} at iteration {i} of quasi-Newton solver ({e})."
)
raise ConvergenceError(
f"Quasi-Newton solver did not converge with {max_iters} iterations. "
f"Last |constr|={error:.1e}, |delta_pos|={norm(delta_pos)}."
)
def solve_projection_onto_manifold_newton(
state,
state_prev,
dt,
system,
constraint_tol=1e-9,
position_tol=1e-8,
divergence_tol=1e10,
max_iters=50,
norm=maximum_norm,
):
"""Solve constraint equation using Newton method.
Uses a Newton iteration to solve the non-linear system of equations in `λ`
system.constr(
state.pos + dh2_flow_pos_dmom @
system.jacob_constr(state_prev).T @ λ) == 0
where `dh2_flow_pos_dmom = system.dh2_flow_dmom(dt)[0]` is the derivative
of the action of the (linear) `system.h2_flow` map on the state momentum
component with respect to the position component, `state` is a post
(unconstrained) `system.h2_flow` update state with position component
outside of the manifold and `state_prev` is the corresponding pre-update
state in the co-tangent bundle.
Requires re-evaluating both the constraint function `system.constr` and
constraint Jacobian `system.jacob_constr` within the solver loop and
computation of matrix decompositions of a preconditioned matrix on each
iteration.
Args:
state (mici.states.ChainState): Post `h2_flow `update state to project.
state_prev (mici.states.ChainState): Previous state in co-tangent
bundle manifold before `h2_flow` update which defines the
co-tangent space to perform projection in.
dt (float): Integrator time step used in `h2_flow` update.
system (mici.systems.ConstrainedEuclideanMetricSystem): Hamiltonian
system defining `h2_flow` and `constr` functions used to define
constraint equation to solve.
constraint_tol (float): Convergence tolerance in constraint space.
Iteration will continue until `norm(constr(pos)) < constraint_tol`
where `pos` is the position at the current iteration.
position_tol (float): Convergence tolerance in position space.
Iteration will continue until `norm(delt_pos) < position_tol`
where `delta_pos` is the change in the position in the current
iteration.
divergence_tol (float): Divergence tolerance - solver aborts if
`norm(constr(pos)) > divergence_tol` on any iteration where `pos`
is the position at the current iteration and raises
`mici.errors.ConvergenceError`.
max_iters (int): Maximum number of iterations to perform before
aborting and raising `mici.errors.ConvergenceError`.
norm (Callable[[array], float]): Norm to use to test for convergence.
Returns:
Updated `state` object with position component satisfying constraint
equation to within `constraint_tol`, i.e.
`norm(system.constr(state.pos)) < constraint_tol`.
Raises:
`mici.errors.ConvergenceError` if solver does not converge within
`max_iters` iterations, diverges or encounters a `ValueError` during
the iteration.
"""
mu =
|
np.zeros_like(state.pos)
|
numpy.zeros_like
|
import numpy as np
import cv2
from collections import deque
import pickle
import os
class ImageProcessor:
"""
Class used to process an image for the LaneDetector. Applies both color and gradient thresholding and produces a set of
images (undistored, thresholded and warped) that can be used for debugging.
"""
def __init__(self, calibration_data_file):
# Camera calibration data
calibration_data = self._load_calibration_data(file_path = calibration_data_file)
self.mtx = calibration_data['mtx']
self.dist = calibration_data['dist']
# Gradient and color thresholding parameters
self.sobel_kernel = 5
self.grad_x_thresh = (15, 255) # Sobel x threshold
self.grad_y_thresh = (25, 255) # Sobel y threshold
self.grad_mag_thresh = (40, 255) # Sobel mag threshold
self.grad_dir_thresh = (0.7, 1.3) # Sobel direction range
self.grad_v_thresh = (180, 255) # HSV, V channel threshold to filter gradient
self.r_thresh = (195, 255) # RGB, Red channel threshold
self.s_thresh = (100, 255) # HSL, S channel threshold
self.l_thresh = (195, 255) # HSL, L channel threshold
self.b_thresh = (150, 255) # LAB, B channel threshold
self.v_thresh = (140, 255) # HSV, V channel threshold
# Perspective transformation parameters
# slope = (y2 - y1) / (x2 - x1)
# intercept = y1 - slope * x1
# top left, top right = (570, 470), (722, 470)
# bottom left, bottom right = (220, 720), (1110, 720)
self.persp_src_left_line = (-0.7142857143, 877.142857146) # Slope and intercept for left line
self.persp_src_right_line = (0.6443298969, 4.793814441) # Slope and intercept for right line
self.persp_src_top_pct = 0.645 # Percentage from the top
self.persp_src_bottom_pct = 0.02 # Percentage from bottom
self.persp_dst_x_pct = 0.22 # Destination offset percent
self.persp_src = None
self.persp_dst = None
def _load_calibration_data(self, file_path = os.path.join('camera_cal', 'calibration.p')):
with open(file_path, 'rb') as f:
return pickle.load(f)
def _warp_coordinates(self, img):
if self.persp_src is None or self.persp_dst is None:
cols = img.shape[1]
rows = img.shape[0]
src_top_offset = rows * self.persp_src_top_pct
src_bottom_offset = rows * self.persp_src_bottom_pct
left_slope, left_intercept = self.persp_src_left_line
right_slope, right_intercept = self.persp_src_right_line
top_left = [(src_top_offset - left_intercept) / left_slope, src_top_offset]
top_right = [(src_top_offset - right_intercept) / right_slope, src_top_offset]
bottom_left = [(rows - src_bottom_offset - left_intercept) / left_slope, rows - src_bottom_offset]
bottom_right = [(rows - src_bottom_offset - right_intercept) / right_slope, rows - src_bottom_offset]
#Top left, Top right, Bottom right, Bottom left
src = np.float32([top_left, top_right, bottom_right, bottom_left])
dst_x_offset = cols * self.persp_dst_x_pct
top_left = [dst_x_offset, 0]
top_right = [cols - dst_x_offset, 0]
bottom_left = [dst_x_offset, rows]
bottom_right = [cols - dst_x_offset, rows]
dst = np.float32([top_left, top_right, bottom_right, bottom_left])
self.persp_src = src
self.persp_dst = dst
return self.persp_src, self.persp_dst
def _sobel(self, img, orient = 'x', sobel_kernel = 3):
# Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x':
sobel = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize = sobel_kernel)
else:
sobel = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize = sobel_kernel)
return sobel
def _apply_thresh(self, img, thresh = [0, 255]):
result = np.zeros_like(img)
result[(img >= thresh[0]) & (img <= thresh[1])] = 1
return result
def unwarp_image(self, img):
img_shape = img.shape[1::-1]
src, dst = self._warp_coordinates(img)
warp_m = cv2.getPerspectiveTransform(dst, src)
unwarped = cv2.warpPerspective(img, warp_m, img_shape)
return unwarped
def warp_image(self, img):
img_shape = img.shape[1::-1]
src, dst = self._warp_coordinates(img)
warp_m = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, warp_m, img_shape)
return warped
def undistort_image(self, img):
return cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
def sobel_abs_thresh(self, sobel, thresh=[0,255]):
# Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))
binary_output = self._apply_thresh(scaled_sobel, thresh)
return binary_output
def sobel_mag_thresh(self, sobel_x, sobel_y, thresh=(0, 255)):
# Calculate the gradient magnitude
gradmag = np.sqrt(sobel_x**2 + sobel_y**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
binary_output = self._apply_thresh(gradmag, thresh)
return binary_output
def sobel_dir_thresh(self, sobel_x, sobel_y, thresh=(0, np.pi/2)):
# Take the absolute value of the x and y gradients
abs_sobel_x = np.absolute(sobel_x)
abs_sobel_y = np.absolute(sobel_y)
# Calculate the direction of the gradient
abs_grad_dir = np.arctan2(abs_sobel_y, abs_sobel_x)
binary_output = self._apply_thresh(abs_grad_dir, thresh)
return binary_output
def gradient_thresh(self, img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
v_ch = hsv_img[:,:,2]
v_binary = self._apply_thresh(v_ch, self.grad_v_thresh)
sobel_x = self._sobel(gray_img, sobel_kernel = self.sobel_kernel, orient = 'x')
sobel_y = self._sobel(gray_img, sobel_kernel = self.sobel_kernel, orient = 'y')
sobel_x_binary = self.sobel_abs_thresh(sobel_x, thresh = self.grad_x_thresh)
sobel_y_binary = self.sobel_abs_thresh(sobel_y, thresh = self.grad_y_thresh)
sobel_mag_binary = self.sobel_mag_thresh(sobel_x, sobel_y, thresh = self.grad_mag_thresh)
sobel_dir_binary = self.sobel_dir_thresh(sobel_x, sobel_y, thresh = self.grad_dir_thresh)
sobel_binary = np.zeros_like(sobel_x_binary)
sobel_binary[(((sobel_x_binary == 1) & (sobel_y_binary == 1)) | (sobel_dir_binary == 1)) & (sobel_mag_binary == 1) & (v_binary == 1)] = 1
return sobel_binary
def color_thresh(self, img):
hls_img = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lab_img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
r_ch = img[:,:,2]
r_binary = self._apply_thresh(r_ch, self.r_thresh)
l_ch = hls_img[:,:,1]
l_binary = self._apply_thresh(l_ch, self.l_thresh)
s_ch = hls_img[:,:,2]
s_binary = self._apply_thresh(s_ch, self.s_thresh)
b_ch = lab_img[:,:,2]
b_binary = self._apply_thresh(b_ch, self.b_thresh)
v_ch = hsv_img[:,:,2]
v_binary = self._apply_thresh(v_ch, self.v_thresh)
result = np.zeros_like(s_binary)
# B and V for yellow, R and L for white, S and V for both
result[((b_binary == 1) & (v_binary == 1)) | ((r_binary == 1) & (l_binary == 1)) | ((s_binary == 1) & (v_binary == 1))] = 1
return result
def threshold_image(self, img):
gradient_binary = self.gradient_thresh(img)
color_binary = self.color_thresh(img)
result = np.zeros_like(gradient_binary)
result[(gradient_binary == 1) | (color_binary) == 1] = 255
return result
def process_image(self, img):
"""
Process the given image appling undistorsion from the camera calibration data, thresholds the result and then
warps the image for an bird-eye view of the road.
"""
undistorted_img = self.undistort_image(img)
thresholded_img = self.threshold_image(undistorted_img)
warped_img = self.warp_image(thresholded_img)
return undistorted_img, thresholded_img, warped_img
class LaneDetector:
"""
The class is used to detect road lanes in processed (from img_processor) frames, using a sliding window
through convolutions to detect hot pixels. For each slice extracts the centroids found in the windows and
fits a polynomial to compute the curvature and deviation from center. The same polynomial can be used to draw
the lines in the frame. The final centroids returned by the pipeline are averaged among last X frames to smooth
the result.
"""
FAIL_CODES = {
1: 'Lane distance out of range',
2: 'Lane distance deviates from mean',
3: 'Lane distance deviates from previous frame',
4: 'Low left lane confidence',
5: 'Low right lane confidence',
9: 'Low lanes confidence'
}
def __init__(self, window_width = 30, window_height = 80, margin = 35, smooth_frames = 15, xm = 3.7/700, ym = 3/110):
"""
Initializes the class with the given parameters for the windows. Note that if smooth_frames is zero no interpolation is
performed between frames.
Parameters
window_width: The width of the sliding window
window_height: The height of the sliding window
margin: Left/right margin that is used by the sliding window in subsequent layers
smooth_frames: The number of frames to use for smoothing the result of the detection
xm: The number of meters per pixel on the horizontal axis
ym: The number of meters per pixel on the vertical axis
"""
# [(left, right, y)]
self.centroids_buffer = deque(maxlen = smooth_frames)
self.last_lanes_distance = None
self.window_width = window_width
self.window_height = window_height
self.margin = margin
self.first_window_height = .75 # The height for the first window (for the start of the lane at the bottom)
self.min_points_fit = 4 # Number of point already found before trying to fit a line when no center is detected
self.min_confidence = 0.16 # Min confidence to keep a detected lane
self.dist_thresh = (510, 890) # Lanes distance threshold
self.max_dist_diff = 60 # Max lanes distance difference between frames
self.max_dist_mean_dev = 80 # Max lanes distance deviation from mean
self.xm = xm
self.ym = ym
self.min_conv_signal = 1000 # Min conv signal to avoid noise
self.max_window_signal = None # Cache for the max amount of signal in a window to compute confidence
def compute_window_max_signal(self, window, width, height, max_value = 255):
"""
Returns the maximum amount of signal in a window with the given dimension, given the value for each pixel
"""
window_sum = np.sum(np.ones((height, width)) * max_value, axis = 0)
conv_signal = np.convolve(window, window_sum)
return np.max(conv_signal)
def detect_lanes(self, img):
"""
Detection pipeline: Starts out with detecting the bottom lanes using a bigger window for the convolution. The
centroids found at this stage are used as base for the next layer (look around the margin). For each layer estimates
the correctness of the detected centroids and tries to detect failures based on the confidence (given by the amount of
signal in each window) and the distance between lanes (and the mean of the previous lanes if smoothing is enabled).
Parameters
img: The input image, must be a processed image from the ImageProcessor
Returns
lanes_centroids: The centroids for the detected lanes
(left_fit, right_fit): The left and right polynomial coefficients from the lanes_centroids
(left_curvature, right_curvature): The curvature in meters
deviation: The deviation from the center of the lane
fail_code: 0 if the lanes could be detected from this frame, otherwise a code that can be mapped in the FAIL_CODES dictionary
Note that if the detection was not successful the lanes_centroids and the fits are the one from the previous frame
"""
lanes_centroids = []
centroids_confidence = []
window = np.ones(self.window_width)
if self.max_window_signal is None:
self.max_window_signal = self.compute_window_max_signal(window, self.window_width, self.window_height)
left_center, left_confidence, right_center, right_confidence, center_y = self.estimate_start_centroids(img, window)
# Add what we found for the first layer
lanes_centroids.append((left_center, right_center, center_y))
centroids_confidence.append((left_confidence, right_confidence))
# Go through each layer looking for max pixel locations
for level in range(1, (int)(img.shape[0] / self.window_height)):
left_center, left_confidence, right_center, right_confidence, center_y = self.estimate_centroids(img, window, level, left_center, right_center, lanes_centroids)
lanes_centroids.append((left_center, right_center, center_y))
centroids_confidence.append((left_confidence, right_confidence))
lanes_centroids = np.array(lanes_centroids)
centroids_confidence = np.array(centroids_confidence)
fail_code = self.detect_failure(lanes_centroids, centroids_confidence)
# If the lane detection failed and we have frames uses the last one
if fail_code > 0 and len(self.centroids_buffer) > 0:
lanes_centroids = self.centroids_buffer[-1]
self.centroids_buffer.append(lanes_centroids)
if len(self.centroids_buffer) > 0:
self.last_lanes_distance = self.compute_mean_distance(lanes_centroids[:,0], lanes_centroids[:,1])
# Average frames for smoothing
lanes_centroids = np.average(self.centroids_buffer, axis = 0)
left_fit, right_fit = self.lanes_fit(lanes_centroids)
left_fit_scaled, right_fit_scaled = self.lanes_fit(lanes_centroids, ym = self.ym, xm = self.xm)
curvature = self.compute_curvature(left_fit_scaled, right_fit_scaled, np.max(lanes_centroids[:,:2]) * self.ym)
deviation = self.compute_deviation(left_fit_scaled, right_fit_scaled, img.shape[0] * self.ym, img.shape[1] * self.xm)
return lanes_centroids, (left_fit, right_fit), curvature, deviation, fail_code
def estimate_start_centroids(self, img, window):
"""
Estimates the centroids at the bottom of the image, if some frames are buffered uses the previous frames
to define a boundary.
Parameters
img: Input image, must be processed from the ImageProcessor
window: The base window used in the convolutions within a frame
"""
if len(self.centroids_buffer) > 0:
# If a "good" start was found already, limit the search within the previous
# frame start boundaries
prev_centroids = np.array(self.centroids_buffer)
prev_left_centroids = prev_centroids[:,:,0]
prev_right_centroids = prev_centroids[:,:,1]
left_min_index = int(max(np.min(prev_left_centroids) - self.margin, 0))
left_max_index = int(min(np.max(prev_left_centroids) + self.margin, img.shape[1]))
right_min_index = int(max(np.min(prev_right_centroids) - self.margin, 0))
right_max_index = int(min(np.max(prev_right_centroids) + self.margin, img.shape[1]))
else:
left_min_index = 0
left_max_index = int(img.shape[1] / 2)
right_min_index = int(img.shape[1] / 2)
right_max_index = img.shape[1]
window_top = int(img.shape[0] * self.first_window_height)
window_y = int(img.shape[0] - self.window_height / 2)
left_sum = np.sum(img[window_top:, left_min_index:left_max_index], axis=0)
left_signal = np.convolve(window, left_sum)
left_center, left_confidence = self.get_conv_center(left_signal, left_min_index, max_signal = None)
right_sum = np.sum(img[window_top:, right_min_index:right_max_index], axis=0)
right_signal = np.convolve(window, right_sum)
right_center, right_confidence = self.get_conv_center(right_signal, right_min_index, max_signal = None)
return left_center, left_confidence, right_center, right_confidence, window_y
def get_conv_center(self, conv_signal, offset, max_signal = None):
"""
Computes the center from the given convolution signal assuming the given offset
Parameters
conv_signal: The result of the convolution of a window
offset: The offset used for the convolution (so that the center is relative to the image and not the window)
max_signal: The maximum amount of singal in the convolution, used to compute the confidence, if supplied a threshold
is applied for the minimum amount of signal to consider valid
Returns
center: The center x, None if not enough signal
confidence: The ratio between the signal and the max amount of signal
"""
max_conv_signal =
|
np.max(conv_signal)
|
numpy.max
|
"""PCA tests."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import numpy as np
from scipy import signal
from spikedetekt2.processing import compute_pcs, project_pcs
# -----------------------------------------------------------------------------
# PCA tests
# -----------------------------------------------------------------------------
def test_compute_pcs():
"""Test PCA on a 2D array."""
# Horizontal ellipsoid.
x = np.random.randn(20000, 2) * np.array([[10., 1.]])
# Rotate the points by pi/4.
a = 1./np.sqrt(2.)
rot = np.array([[a, -a], [a, a]])
x = np.dot(x, rot)
# Compute the PCs.
pcs = compute_pcs(x)
assert pcs.ndim == 2
assert (np.abs(pcs) - a).max() < 1e-2
def test_compute_pcs_3d():
"""Test PCA on a 3D array."""
x1 =
|
np.random.randn(20000, 2)
|
numpy.random.randn
|
# Copyright (c) 2016-2018 The Regents of the University of Michigan
# This file is part of the General Simulation Data (GSD) project, released under the BSD 2-Clause License.
import gsd.fl
import gsd.pygsd
import tempfile
import numpy
import platform
from nose.tools import ok_, eq_, assert_raises
def test_create():
with tempfile.TemporaryDirectory() as d:
gsd.fl.create(name=d+"/test_create.gsd", application="test_create", schema="none", schema_version=[1,2]);
def test_dtypes():
for typ in [numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64, numpy.int8, numpy.int16, numpy.int32,
numpy.int64, numpy.float32, numpy.float64]:
yield check_dtype, typ
def check_dtype(typ):
data1d = numpy.array([1,2,3,4,5,10012], dtype=typ);
data2d = numpy.array([[10,20],[30,40],[50,80]], dtype=typ);
with tempfile.TemporaryDirectory() as d:
gsd.fl.create(name=d+"/test_dtype.gsd", application="test_dtype", schema="none", schema_version=[1,2]);
with gsd.fl.GSDFile(name=d+"/test_dtype.gsd", mode='wb') as f:
f.write_chunk(name='data1d', data=data1d);
f.write_chunk(name='data2d', data=data2d);
f.end_frame();
with gsd.fl.GSDFile(name=d+"/test_dtype.gsd", mode='rb') as f:
read_data1d = f.read_chunk(frame=0, name='data1d');
read_data2d = f.read_chunk(frame=0, name='data2d');
eq_(data1d.dtype, read_data1d.dtype);
numpy.testing.assert_array_equal(data1d, read_data1d);
eq_(data2d.dtype, read_data2d.dtype);
numpy.testing.assert_array_equal(data2d, read_data2d);
# test again with pygsd
with gsd.pygsd.GSDFile(file=open(d+"/test_dtype.gsd", mode='rb')) as f:
read_data1d = f.read_chunk(frame=0, name='data1d');
read_data2d = f.read_chunk(frame=0, name='data2d');
eq_(data1d.dtype, read_data1d.dtype);
numpy.testing.assert_array_equal(data1d, read_data1d);
eq_(data2d.dtype, read_data2d.dtype);
numpy.testing.assert_array_equal(data2d, read_data2d);
def test_metadata():
with tempfile.TemporaryDirectory() as d:
gsd.fl.create(name=d+'/test_metadata.gsd', application='test_metadata', schema='none', schema_version=[1,2]);
data =
|
numpy.array([1,2,3,4,5,10012], dtype=numpy.int64)
|
numpy.array
|
import torch, os
import numpy as np
from MiniImagenet import MiniImagenet
import scipy.stats
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
import random, sys, pickle
import argparse
from meta import Meta
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def cal_conf(result_array):
"""result_array: nxsteps"""
m = np.mean(result_array, 0)
std =
|
np.std(result_array, 0)
|
numpy.std
|
import os
from tqdm import tqdm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from python_speech_features import mfcc, logfbank
import librosa
def plot_signals(signals):
fig, axes = plt.subplots(nrows=2, ncols=5, sharex=False,
sharey=True, figsize=(20,5))
fig.suptitle('Time Series', size=16)
i = 0
for x in range(2):
for y in range(5):
axes[x,y].set_title(list(signals.keys())[i])
axes[x,y].plot(list(signals.values())[i])
axes[x,y].get_xaxis().set_visible(False)
axes[x,y].get_yaxis().set_visible(False)
i += 1
def plot_fft(fft):
fig, axes = plt.subplots(nrows=2, ncols=5, sharex=False,
sharey=True, figsize=(20,5))
fig.suptitle('Fourier Transforms', size=16)
i = 0
for x in range(2):
for y in range(5):
data = list(fft.values())[i]
Y, freq = data[0], data[1]
axes[x,y].set_title(list(fft.keys())[i])
axes[x,y].plot(freq, Y)
axes[x,y].get_xaxis().set_visible(False)
axes[x,y].get_yaxis().set_visible(False)
i += 1
def plot_fbank(fbank):
fig, axes = plt.subplots(nrows=2, ncols=5, sharex=False,
sharey=True, figsize=(20,5))
fig.suptitle('Filter Bank Coefficients', size=16)
i = 0
for x in range(2):
for y in range(5):
axes[x,y].set_title(list(fbank.keys())[i])
axes[x,y].imshow(list(fbank.values())[i],
cmap='hot', interpolation='nearest')
axes[x,y].get_xaxis().set_visible(False)
axes[x,y].get_yaxis().set_visible(False)
i += 1
def plot_mfccs(mfccs):
fig, axes = plt.subplots(nrows=2, ncols=5, sharex=False,
sharey=True, figsize=(20,5))
fig.suptitle('Mel Frequency Cepstrum Coefficients', size=16)
i = 0
for x in range(2):
for y in range(5):
axes[x,y].set_title(list(mfccs.keys())[i])
axes[x,y].imshow(list(mfccs.values())[i],
cmap='hot', interpolation='nearest')
axes[x,y].get_xaxis().set_visible(False)
axes[x,y].get_yaxis().set_visible(False)
i += 1
def envelope(y, rate, threshold):
mask = []
y = pd.Series(y).apply(np.abs)
y_mean = y.rolling(window=int(rate/10), min_periods=1, center=True).mean()
for mean in y_mean:
if mean > threshold:
mask.append(True)
else:
mask.append(False)
return mask
def calc_fft(y, rate):
n = len(y)
freq = np.fft.rfftfreq(n, d=1/rate)
Y = abs(np.fft.rfft(y)/n)
return (Y, freq)
df = pd.read_csv('instruments.csv')
df.set_index('fname', inplace=True)
for f in df.index:
rate, signal = wavfile.read('wavfiles/'+f)
df.at[f, 'length'] = signal.shape[0]/rate
classes = list(
|
np.unique(df.label)
|
numpy.unique
|
"""
Implementation of the mean-field Ising Influence Maximisation algorithm.
"""
#Import modules:
import numpy as np
import math
import scipy
from scipy import sparse
import networkx as nx
#Modules in this dir:
from . import projection_simplex as proj
def project_block_to_graph(block_sizes, block_level_vals):
"""
Projects a set of values at the level of
blocks to the nodes in the graph.
"""
full_graph_values = []
for k, n in enumerate(block_sizes):
current_block = []
for q in range(n):
current_block.append(block_level_vals[k])
full_graph_values = np.concatenate((full_graph_values, current_block))
return full_graph_values
def get_ascending_pairs(values) :
edges = np.concatenate(([0], np.cumsum(values)))
pairs = [(edges[i], edges[i + 1]) for i in range(len(edges) - 1)]
return pairs
def block_level_average(block_sizes,node_values) :
"""
Average a quantity defined at the level of nodes
to the level of blocks.
Parameters
-------------
node_values : list
Values of a list at the level of nodes
"""
block_level_average=[]
size_ascending_pairs = get_ascending_pairs(block_sizes)
for index_pairs in size_ascending_pairs :
block_level_average.append(np.mean(node_values[index_pairs[0]:index_pairs[1]]))
return block_level_average
class mean_field_ising_system :
"""
Mean field Ising system on a specific graph
"""
def __init__(self,graph,background_field,block_sizes=None,notebook=False) :
"""
Initialises the mean-field Ising system class
on a graph
Parameters
----------
graph : networkx graph
Networkx graph encoding the interactions between spins
background_field : numpy array
Background fields for nodes - this array must be the same size as the
network. Set this to zero if no external fields are required.
block_sizes : list
List of block sizes. Assume that graph nodes are
ordered according to the respective blocks.
"""
self.graph=graph
self.background_field=background_field
self.block_sizes=block_sizes
#FP iteration parameters
"""
Gamma = Damping parameter for fixed point iteration. For standard mean-field
theory the dynamics will converge even with gamma=1.0.
(See: Aspelmeier, Timo, et al. "Free-energy landscapes, dynamics, and the edge of chaos in mean-field models of spin glasses." Physical Review B 74.18 (2006): 184411. )
Note in the paper above the damping parameter is alpha and gamma is used differently.
Damping can be introduced to provide a 'smoother' convergence.
"""
self.gamma=1.0
"""
tol : float
Tolerance parameter. The iterations are terminated when the difference
between successive magnetisation values goes below this tolerance.
"""
self.tol=1E-5
self.max_mf_fp_iterations=10000 #Sets a maximum to the number of iterations.
self.mf_fp_init_state='aligned' #This can also be set as an array.
self.mf_fp_noisy=True
#IIM parameters
self.max_mf_iim_iterations=1000
self.mf_iim_step_size=1.0
self.mf_iim_tolerance=1E-6
self.mf_iim_init_control='uniform'
self.mf_iim_noisy=True #If ture then we print the number of iterations and current magnetisation at each step.
self.notebook = notebook
def mf_magnetization(self,h,beta,return_sequence=False) :
"""
Implements damped fixed point iteration
for solving the mean-field self consistency
equations on a weighted graph with general
external field.
Parameters
--------------
graph : networkx graph
Weighted networkx graph where the edge weights represent
the coupling strengths between nodes.
h : numpy array
control field acting on the N nodes
beta : float
Inverse temperature of the Ising system
return_sequence : bool (opt)
If true then we return the sequence of iterations of the
algorithm. This output can be used to visualize the extent
of convergence.
Returns
-----------
m : numpy array
Magnetizations for each of the nodes
"""
if self.notebook == True :
from tqdm import tqdm_notebook as tqdm
else :
from tqdm import tqdm
N = len(self.graph)
if self.mf_fp_init_state == 'aligned':
m = np.ones(N) #Initialize at the maximum magnetization.
else:
m = np.copy(self.mf_fp_init_state)
m_sequence = []
m_sequence.append(np.mean(m))
for t in (tqdm(range(self.max_mf_fp_iterations)) if self.mf_fp_noisy else range(self.max_mf_fp_iterations)): # tqdm progress bar used if 'noisy' is set.
old_m = np.mean(m)
for i in range(N):
neighbors_mags = [m[p] for p in list(dict(self.graph[i]).keys())]
#Networkx graphs may not contain edge weights. Have to check whether they exist.
edges_to_current_node = [[i, p] for p in list(dict(self.graph[i]).keys())]
if len(edges_to_current_node) > 0:
weights_to_current_node = [len(list(self.graph.get_edge_data(i, j).values())) for i, j in
zip(np.transpose(edges_to_current_node)[0],
np.transpose(edges_to_current_node)[1])]
else:
weights_to_current_node = [0.0]
if
|
np.sum(weights_to_current_node)
|
numpy.sum
|
import argparse
import os
import zipfile
import cv2
import flwr as fl
import numpy as np
import sklearn
import tensorflow as tf
import wget
from tensorflow.keras.utils import to_categorical
# Make TensorFlow logs less verbose
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
class_encoding = {'COVID': 0, 'Normal': 1, 'Viral Pneumonia': 2}
def download_dataset():
wget.download('https://storage.googleapis.com/fl-covid-data/train_valid.zip')
with zipfile.ZipFile('./train_valid.zip', 'r') as zip_ref:
zip_ref.extractall('.')
def get_samples_from(dir_path):
print('getting samples from', dir_path)
samples = []
for sample in os.listdir(dir_path):
samples.append(os.path.join(dir_path, sample))
return samples
def get_all_vals(class_dict):
x = []
y = []
for k, v in class_dict.items():
for sample in v:
x.append(sample)
y.append(k)
y = np.array(list(map(lambda label: class_encoding[label], y)))
return np.array(x), y
def get_iid(id, class_to_samples, count):
x, y = get_all_vals(class_to_samples)
return transform_to_train(id, x, y, count)
def get_random(id, class_to_samples, count):
x, y = get_all_vals(class_to_samples)
sklearn.utils.shuffle(x, y)
return transform_to_train(id, x, y, count)
def get_noniid(id, class_to_samples, count):
x, y = get_all_vals(class_to_samples)
n = len(x)
batch_size = n // count
x_train = np.array(map(lambda path: load_image(path), x[batch_size * id: batch_size * (id + 1)]))
y_train = np.array(y[batch_size * id: batch_size * (id + 1)])
return x_train, y_train
def transform_to_train(id, x, y, count):
x_train, y_train = [], []
i = id
while i < len(x):
img = load_image(x[i])
x_train.append(img)
y_train.append(y[i])
i += count
x_train, y_train = np.array(x_train),
|
np.array(y_train)
|
numpy.array
|
import os
import numpy as np
from easydict import EasyDict as edict
__C = edict()
cfg = __C
# Dataset options
#
__C.DATASET = edict()
__C.DATASET.NUM_CLASSES = 0
__C.DATASET.DATAROOT = ''
__C.DATASET.SOURCE_NAME = ''
__C.DATASET.TARGET_NAME = ''
# Model options
__C.MODEL = edict()
__C.MODEL.FEATURE_EXTRACTOR = 'resnet101'
__C.MODEL.FC_HIDDEN_DIMS = ()
__C.MODEL.PRETRAINED = True
# data pre-processing options
#
__C.DATA_TRANSFORM = edict()
__C.DATA_TRANSFORM.RESIZE_OR_CROP = 'resize_and_crop'
__C.DATA_TRANSFORM.LOADSIZE = 256
__C.DATA_TRANSFORM.FINESIZE = 224
__C.DATA_TRANSFORM.FLIP = True
__C.DATA_TRANSFORM.WITH_FIVE_CROP = False
__C.DATA_TRANSFORM.NORMALIZE_MEAN = (0.485, 0.456, 0.406)
__C.DATA_TRANSFORM.NORMALIZE_STD = (0.229, 0.224, 0.225)
# Training options
#
__C.TRAIN = edict()
# batch size setting
__C.TRAIN.SOURCE_BATCH_SIZE = 30
__C.TRAIN.TARGET_BATCH_SIZE = 30
__C.TRAIN.TARGET_CLASS_BATCH_SIZE = 3
__C.TRAIN.SOURCE_CLASS_BATCH_SIZE = 3
__C.TRAIN.NUM_SELECTED_CLASSES = 10
# model setting
__C.TRAIN.STOP_GRAD = 'layer1'
__C.TRAIN.DROPOUT_RATIO = (0.0,)
# learning rate schedule
__C.TRAIN.BASE_LR = 0.001
__C.TRAIN.MOMENTUM = 0.9
__C.TRAIN.LR_MULT = 10
__C.TRAIN.OPTIMIZER = 'SGD'
__C.TRAIN.WEIGHT_DECAY = 0.0005
__C.TRAIN.LR_SCHEDULE = 'inv'
__C.TRAIN.MAX_LOOP = 50
__C.TRAIN.STOP_THRESHOLDS = (0.001, 0.001, 0.001)
__C.TRAIN.MIN_SN_PER_CLASS = 3
__C.TRAIN.LOGGING = True
__C.TRAIN.TEST_INTERVAL = 1.0 # percentage of total iterations each loop
__C.TRAIN.SAVE_CKPT_INTERVAL = 1.0 # percentage of total iterations in each loop
__C.TRAIN.NUM_LOGGING_PER_LOOP = 6.0
__C.TRAIN.UPDATE_EPOCH_PERCENTAGE = 1.0
# optimizer options
__C.ADAM = edict()
__C.ADAM.BETA1 = 0.9
__C.ADAM.BETA2 = 0.999
__C.INV = edict()
__C.INV.ALPHA = 0.001
__C.INV.BETA = 0.75
__C.EXP = edict()
__C.EXP.LR_DECAY_RATE = 0.1
__C.EXP.LR_DECAY_STEP = 30
# Clustering options
__C.CLUSTERING = edict()
__C.CLUSTERING.TARGET_BATCH_SIZE = 100
__C.CLUSTERING.SOURCE_BATCH_SIZE = 100
__C.CLUSTERING.TARGET_DATASET_TYPE = 'SingleDatasetWithoutLabel'
__C.CLUSTERING.BUDGET = 1000
__C.CLUSTERING.EPS = 0.005
__C.CLUSTERING.FILTERING_THRESHOLD = 1.0
__C.CLUSTERING.FEAT_KEY = 'feat'
__C.CLUSTERING.HISTORY_LEN = 2
# CDD options
__C.CDD = edict()
__C.CDD.KERNEL_NUM = (5, 5)
__C.CDD.KERNEL_MUL = (2, 2)
__C.CDD.LOSS_WEIGHT = 0.3
__C.CDD.ALIGNMENT_FEAT_KEYS = ['feat', 'probs']
__C.CDD.INTRA_ONLY = False
# MMD/JMMD options
__C.MMD = edict()
__C.MMD.KERNEL_NUM = (5, 5)
__C.MMD.KERNEL_MUL = (2, 2)
__C.MMD.LOSS_WEIGHT = 0.3
__C.MMD.ALIGNMENT_FEAT_KEYS = ['feat', 'probs']
__C.MMD.JOINT = False
# Testing options
#
__C.TEST = edict()
__C.TEST.BATCH_SIZE = 30
__C.TEST.DATASET_TYPE = 'SingleDataset'
__C.TEST.DOMAIN = ''
# MISC
__C.WEIGHTS = ''
__C.RESUME = ''
__C.EVAL_METRIC = "accuracy" # "mean_accu" as alternative
__C.EXP_NAME = 'exp'
__C.SAVE_DIR = ''
__C.NUM_WORKERS = 3
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k in a:
# a must specify keys that are in b
v = a[k]
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v =
|
np.array(v, dtype=b[k].dtype)
|
numpy.array
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.