prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import os
import numpy as np
import torch
import torch.utils.data as data
import torch.nn.functional as F
from PIL import Image
import matplotlib.pyplot as plt
from lib.datasets.utils import angle2class
from lib.datasets.utils import gaussian_radius
from lib.datasets.utils import draw_umich_gaussian
from lib.datasets.utils import get_angle_from_box3d,check_range
from lib.datasets.kitti_utils import get_objects_from_label
from lib.datasets.kitti_utils import Calibration
from lib.datasets.kitti_utils import get_affine_transform
from lib.datasets.kitti_utils import affine_transform
from lib.datasets.kitti_utils import compute_box_3d
import pdb
class KITTI(data.Dataset):
def __init__(self, root_dir, split, cfg):
# basic configuration
self.num_classes = 3
self.max_objs = 50
self.class_name = ['Pedestrian', 'Car', 'Cyclist']
self.cls2id = {'Pedestrian': 0, 'Car': 1, 'Cyclist': 2}
self.resolution = np.array([1280, 384]) # W * H
self.use_3d_center = cfg['use_3d_center']
self.writelist = cfg['writelist']
if cfg['class_merging']:
self.writelist.extend(['Van', 'Truck'])
if cfg['use_dontcare']:
self.writelist.extend(['DontCare'])
'''
['Car': np.array([3.88311640418,1.62856739989,1.52563191462]),
'Pedestrian': np.array([0.84422524,0.66068622,1.76255119]),
'Cyclist': np.array([1.76282397,0.59706367,1.73698127])]
'''
##l,w,h
self.cls_mean_size = np.array([[1.76255119 ,0.66068622 , 0.84422524 ],
[1.52563191462 ,1.62856739989, 3.88311640418],
[1.73698127 ,0.59706367 , 1.76282397 ]])
# data split loading
assert split in ['train', 'val', 'trainval', 'test']
self.split = split
split_dir = os.path.join(root_dir, 'KITTI', 'ImageSets', split + '.txt')
self.idx_list = [x.strip() for x in open(split_dir).readlines()]
# path configuration
self.data_dir = os.path.join(root_dir, 'KITTI', 'testing' if split == 'test' else 'training')
self.image_dir = os.path.join(self.data_dir, 'image_2')
self.depth_dir = os.path.join(self.data_dir, 'depth')
self.calib_dir = os.path.join(self.data_dir, 'calib')
self.label_dir = os.path.join(self.data_dir, 'label_2')
# data augmentation configuration
self.data_augmentation = True if split in ['train', 'trainval'] else False
self.random_flip = cfg['random_flip']
self.random_crop = cfg['random_crop']
self.scale = cfg['scale']
self.shift = cfg['shift']
# statistics
self.mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
self.std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
# others
self.downsample = 4
def get_image(self, idx):
img_file = os.path.join(self.image_dir, '%06d.png' % idx)
assert os.path.exists(img_file)
return Image.open(img_file) # (H, W, 3) RGB mode
def get_label(self, idx):
label_file = os.path.join(self.label_dir, '%06d.txt' % idx)
assert os.path.exists(label_file)
return get_objects_from_label(label_file)
def get_calib(self, idx):
calib_file = os.path.join(self.calib_dir, '%06d.txt' % idx)
assert os.path.exists(calib_file)
return Calibration(calib_file)
def __len__(self):
return self.idx_list.__len__()
def __getitem__(self, item):
# ============================ get inputs ===========================
index = int(self.idx_list[item]) # index mapping, get real data id
# image loading
img = self.get_image(index)
img_size = np.array(img.size)
# data augmentation for image
center = np.array(img_size) / 2
crop_size = img_size
random_crop_flag, random_flip_flag = False, False
if self.data_augmentation:
if np.random.random() < self.random_flip:
random_flip_flag = True
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if
|
np.random.random()
|
numpy.random.random
|
import emcee
import numpy as np
from typing import List, Optional
from autofit.mapper.prior_model.abstract import AbstractPriorModel
from autofit.non_linear.samples.mcmc import MCMCSamples
from autofit.non_linear.mcmc.auto_correlations import AutoCorrelationsSettings, AutoCorrelations
from autofit.non_linear.samples import Sample
class EmceeSamples(MCMCSamples):
@classmethod
def from_results_internal(
cls,
results_internal: emcee.backends.HDFBackend,
model: AbstractPriorModel,
auto_correlation_settings: AutoCorrelationsSettings,
unconverged_sample_size: int = 100,
time: Optional[float] = None,
):
"""
The `Samples` classes in **PyAutoFit** provide an interface between the results of a `NonLinearSearch` (e.g.
as files on your hard-disk) and Python.
To create a `Samples` object after an `emcee` model-fit the results must be converted from the
native format used by `emcee` (which is a HDFBackend) to lists of values, the format used by the **PyAutoFit**
`Samples` objects.
This classmethod performs this conversion before creating a `EmceeSamples` object.
Parameters
----------
results_internal
The MCMC results in their native internal format from which the samples are computed.
model
Maps input vectors of unit parameter values to physical values and model instances via priors.
auto_correlations_settings
Customizes and performs auto correlation calculations performed during and after the search.
unconverged_sample_size
If the samples are for a search that is yet to convergence, a reduced set of samples are used to provide
a rough estimate of the parameters. The number of samples is set by this parameter.
time
The time taken to perform the model-fit, which is passed around `Samples` objects for outputting
information on the overall fit.
"""
parameter_lists = results_internal.get_chain(flat=True).tolist()
log_prior_list = model.log_prior_list_from(parameter_lists=parameter_lists)
log_posterior_list = results_internal.get_log_prob(flat=True).tolist()
log_likelihood_list = [
log_posterior - log_prior for
log_posterior, log_prior in
zip(log_posterior_list, log_prior_list)
]
weight_list = len(log_likelihood_list) * [1.0]
sample_list = Sample.from_lists(
model=model,
parameter_lists=parameter_lists,
log_likelihood_list=log_likelihood_list,
log_prior_list=log_prior_list,
weight_list=weight_list
)
return EmceeSamples(
model=model,
sample_list=sample_list,
auto_correlation_settings=auto_correlation_settings,
unconverged_sample_size=unconverged_sample_size,
time=time,
results_internal=results_internal,
)
@property
def backend(self) -> emcee.backends.HDFBackend:
"""
Makes internal results accessible as `self.backend` for consistency with Emcee API.
"""
return self.results_internal
@property
def samples_after_burn_in(self) -> np.ndarray:
"""
The emcee samples with the initial burn-in samples removed.
The burn-in period is estimated using the auto-correlation times of the parameters.
"""
discard = int(3.0 *
|
np.max(self.auto_correlations.times)
|
numpy.max
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 14:22, 11/04/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import numpy as np
from copy import deepcopy
from mealpy.optimizer import Optimizer
class BaseMA(Optimizer):
"""
The original version of: Memetic Algorithm (MA)
(On evolution, search, optimization, genetic algorithms and martial arts: Towards memetic algorithms)
Link:
Clever Algorithms: Nature-Inspired Programming Recipes - Memetic Algorithm (MA)
http://www.cleveralgorithms.com/nature-inspired/physical/memetic_algorithm.html
"""
ID_BIT = 2
def __init__(self, problem, epoch=10000, pop_size=100, pc=0.85, pm=0.15,
p_local=0.5, max_local_gens=20, bits_per_param=16, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
pc (float): cross-over probability, default = 0.85
pm (float): mutation probability, default = 0.15
p_local (float): Probability of local search for each agent, default=0.5
max_local_gens (int): Number of local search agent will be created during local search mechanism, default=20
bits_per_param (int): Number of bits to decode a real number to 0-1 bitstring, default=16
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
self.pc = pc
self.pm = pm
self.p_local = p_local
self.max_local_gens = max_local_gens
self.bits_per_param = bits_per_param
self.bits_total = self.problem.n_dims * self.bits_per_param
def create_solution(self):
"""
Returns:
The position position with 2 element: index of position/location and index of fitness wrapper
The general format: [position, [target, [obj1, obj2, ...]], bitstring]
## To get the position, fitness wrapper, target and obj list
## A[self.ID_POS] --> Return: position
## A[self.ID_FIT] --> Return: [target, [obj1, obj2, ...]]
## A[self.ID_FIT][self.ID_TAR] --> Return: target
## A[self.ID_FIT][self.ID_OBJ] --> Return: [obj1, obj2, ...]
"""
position = np.random.uniform(self.problem.lb, self.problem.ub)
fitness = self.get_fitness_position(position=position)
bitstring = ''.join(["1" if np.random.uniform() < 0.5 else "0" for _ in range(0, self.bits_total)])
return [position, fitness, bitstring]
def _decode(self, bitstring=None):
"""
Decode the random bitstring into real number
Args:
bitstring (str): "11000000100101000101010" - bits_per_param = 16, 32 bit for 2 variable. eg. x1 and x2
Returns:
list of real number (vector)
"""
vector = np.ones(self.problem.n_dims)
for idx in range(0, self.problem.n_dims):
param = bitstring[idx * self.bits_per_param: (idx + 1) * self.bits_per_param] # Select 16 bit every time
vector[idx] = self.problem.lb[idx] + ((self.problem.ub[idx] - self.problem.lb[idx]) / ((2.0 ** self.bits_per_param) - 1)) * int(param, 2)
return vector
def _crossover(self, dad=None, mom=None):
if np.random.uniform() >= self.pc:
temp = deepcopy([dad])
return temp[0]
else:
child = ""
for idx in range(0, self.bits_total):
if np.random.uniform() < 0.5:
child += dad[idx]
else:
child += mom[idx]
return child
def _point_mutation(self, bitstring=None):
child = ""
for bit in bitstring:
if
|
np.random.uniform()
|
numpy.random.uniform
|
"""
This will be used to run the kaggle competition version of this solver.
Everything should be modular enough that it should be easy to hot swap this starter and visualizer with the web based
version of this app.
"""
from src.models.problem_fetcher import ProblemFetcher
from src.models.SingleProblemSolver import SingleProblemSolver
from src.models.ZoltansColorAndCounting.ColorAndCountingModuloQ import Recolor, Create
from src.models.XGBoostBullshit import do_bullshit
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
import math
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
def plot_one(ax, title, input_matrix):
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
ax.imshow(input_matrix, cmap=cmap, norm=norm)
ax.grid(True, which='both', color='lightgrey', linewidth=0.5)
ax.set_yticks([x - 0.5 for x in range(1 + len(input_matrix))])
ax.set_xticks([x - 0.5 for x in range(1 + len(input_matrix[0]))])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_title(title)
def plot_problem_with_gridlines(f_name, task, attempted_solutions):
"""
Plots the first train, test pairs of a specified task, and the attempted solutions to that task
using same color scheme as the ARC app
:param f_name: The file name of the problem
:param task: The task as it is defined in json in the file defined by f_name
:param attempted_solutions: A list of attempted solutions by various algorithms.
:return: Nothin just plots shite
"""
print(f_name)
num_train = len(task['train'])
num_test = len(task['test'])
num_solutions = len(attempted_solutions) * len(attempted_solutions[0][1])
fig, axs = plt.subplots(2, num_train + num_test + num_solutions, figsize=(4 * num_train, 3 * 2))
for i in range(num_train):
plot_one(axs[0, i], 'train input', task['train'][i]['input'])
plot_one(axs[1, i], 'train output', task['train'][i]['output'])
# plt.tight_layout()
# plt.show()
# fig, axs = plt.subplots(2, num_test, figsize=(3 * num_test, 3 * 2))
if num_test == 1:
plot_one(axs[0, num_train], 'test input', task['test'][0]['input'])
if 'output' in task['test'][0]:
plot_one(axs[1, num_train], 'test output', task['test'][0]['output'])
else:
for i in range(num_test):
plot_one(axs[0, num_train + i], 'test input', task['test'][i]['input'])
if 'output' in task['test'][0]:
plot_one(axs[1, num_train + i], 'test output', task['test'][i]['output'])
# fig, axs = plt.subplots(2, num_solutions, figsize=(3 * num_test, 3 * 2))
if num_solutions == 1:
plot_one(axs[0, num_train + num_test],
attempted_solutions[0][0], attempted_solutions[0][1][0])
else:
for i in range(len(attempted_solutions)):
for j in range(len(attempted_solutions[i][1])):
plot_one(axs[i % 2, math.floor(num_train + num_test + i + j)],
attempted_solutions[i][0], attempted_solutions[i][1][j])
plt.tight_layout()
plt.show()
def percentage_correct(expected, calculated):
expected =
|
np.array(expected)
|
numpy.array
|
# Original work Copyright 2018 The Google AI Language Team Authors.
# Modified work Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
For discrimination finetuning (e.g. saying whether or not the generation is human/grover)
"""
import json
import os
import sys
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
from lm.dataloader import classification_convert_examples_to_features, classification_input_fn_builder, classification_input_dataset, classification_convert_examples_to_features_new
from lm.modeling import classification_model_fn_builder, GroverConfig, GroverModelTF2
from lm.utils import _save_np
from lm.optimization_adafactor import CustomSchedule, loss_function
from sample.encoder import get_encoder
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"config_file", '../lm/configs/base.json',
"The config json file corresponding to the pre-trained news model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_data", 'gs://yanxu98/tinydata/realnews_tiny.jsonl',
"The input data dir. Should contain the .tsv files (or other data files) for the task.")
flags.DEFINE_string(
"additional_data", None,
"Should we provide additional input data? maybe.")
flags.DEFINE_string(
"output_dir", 'gs://yanxu98/tinydata_out',
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", 'gs://grover-models/discrimination/generator=base~discriminator=grover~discsize=base~dataset=p=0.96/model.ckpt-1562',
"Initial checkpoint (usually from a pre-trained model).")
flags.DEFINE_integer(
"max_seq_length", 1024,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer("iterations_per_loop", 32,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("batch_size", 32, "Batch size used")
flags.DEFINE_integer("max_training_examples", -1, "if you wanna limit the number")
flags.DEFINE_bool("do_train", True, "Whether to run training.")
flags.DEFINE_bool("predict_val", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"predict_test", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_bool(
"require_labels", True,
"Whether require labels when running eval/test"
)
flags.DEFINE_float("num_train_epochs", 100.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_bool("adafactor", False, "Whether to run adafactor")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("use_tpu", True, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", "grover",
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def _flatten_and_tokenize_metadata(encoder, item):
"""
Turn the article into tokens
:param item: Contains things that need to be tokenized
fields are ['domain', 'date', 'authors', 'title', 'article', 'summary']
:return: dict
"""
metadata = []
for key in ['domain', 'date', 'authors', 'title', 'article']:
val = item.get(key, None)
if val is not None:
metadata.append(encoder.__dict__[f'begin_{key}'])
metadata.extend(encoder.encode(val))
metadata.append(encoder.__dict__[f'end_{key}'])
return metadata
def main(_):
LABEL_LIST = ['machine', 'human']
LABEL_INV_MAP = {label: i for i, label in enumerate(LABEL_LIST)}
# These lines of code are just to check if we've already saved something into the directory
if tf.io.gfile.exists(FLAGS.output_dir):
print(f"The output directory {FLAGS.output_dir} exists!")
#if FLAGS.do_train:
# print("EXITING BECAUSE DO_TRAIN is true", flush=True)
# return
#for split in ['val', 'test']:
# if tf.gfile.Exists(os.path.join(FLAGS.output_dir, f'{split}-probs.npy')) and getattr(FLAGS,
# f'predict_{split}'):
# print(f"EXITING BECAUSE {split}-probs.npy exists", flush=True)
# return
# Double check to see if it has trained!
#if not tf.gfile.Exists(os.path.join(FLAGS.output_dir, 'checkpoint')):
# print("EXITING BECAUSE NO CHECKPOINT.", flush=True)
# return
#stuff = {}
#with tf.gfile.Open(os.path.join(FLAGS.output_dir, 'checkpoint'), 'r') as f:
# # model_checkpoint_path: "model.ckpt-0"
# # all_model_checkpoint_paths: "model.ckpt-0"
# for l in f:
# key, val = l.strip().split(': ', 1)
# stuff[key] = val.strip('"')
#if stuff['model_checkpoint_path'] == 'model.ckpt-0':
# print("EXITING BECAUSE IT LOOKS LIKE NOTHING TRAINED", flush=True)
# return
#elif not FLAGS.do_train:
# print("EXITING BECAUSE DO_TRAIN IS FALSE AND PATH DOESNT EXIST")
# return
else:
tf.io.gfile.makedirs(FLAGS.output_dir)
news_config = GroverConfig.from_json_file(FLAGS.config_file)
# TODO might have to change this
encoder = get_encoder()
examples = {'train': [], 'val': [], 'test': []}
|
np.random.seed(123456)
|
numpy.random.seed
|
from pathlib import Path
import unittest
import numpy as np
import pickle
import copy
from iblutil.util import Bunch
import brainbox.behavior.wheel as wheel
import brainbox.behavior.training as train
import brainbox.behavior.pyschofit as psy
class TestWheel(unittest.TestCase):
def setUp(self):
""" Load pickled test data
Test data is in the form ((inputs), (outputs)) where inputs is a tuple containing a
numpy array of timestamps and one of positions; outputs is a tuple of outputs from
the function under test, i.e. wheel.movements
The first set - test_data[0] - comes from Rigbox MATLAB and contains around 200
seconds of (reasonably) evenly sampled wheel data from a 1024 ppr device with X4
encoding, in raw samples. test_data[0] = ((t, pos), (onsets, offsets, amps, peak_vel))
The second set - test_data[1] - comes from ibllib FPGA and contains around 180 seconds
of unevenly sampled wheel data from a 1024 ppr device with X2 encoding, in linear cm units.
test_data[1] = ((t, pos), (onsets, offsets, amps, peak_vel))
"""
pickle_file = Path(__file__).parent.joinpath('fixtures', 'wheel_test.p')
if not pickle_file.exists():
self.test_data = None
else:
with open(pickle_file, 'rb') as f:
self.test_data = pickle.load(f)
# Trial timestamps for trial_data[0]
self.trials = {
'stimOn_times': np.array([0.2, 75, 100, 120, 164]),
'feedback_times': np.array([60.2, 85, 103, 130, 188]),
'intervals': np.array([[0, 62], [63, 90], [95, 110], [115, 135], [140, 200]])
}
def test_derivative(self):
if self.test_data is None:
return
t = np.array([0, .5, 1., 1.5, 2, 3, 4, 4.5, 5, 5.5])
p = np.arange(len(t))
v = wheel.velocity(t, p)
self.assertTrue(len(v) == len(t))
self.assertTrue(np.all(v[0:4] == 2) and v[5] == 1 and np.all(v[7:] == 2))
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(t[:-1] + np.diff(t) / 2, np.diff(p) / np.diff(t), '*-')
# plt.plot(t, v, '-*')
def test_movements(self):
# These test data are the same as those used in the MATLAB code
inputs = self.test_data[0][0]
expected = self.test_data[0][1]
on, off, amp, peak_vel = wheel.movements(
*inputs, freq=1000, pos_thresh=8, pos_thresh_onset=1.5)
self.assertTrue(np.array_equal(on, expected[0]), msg='Unexpected onsets')
self.assertTrue(np.array_equal(off, expected[1]), msg='Unexpected offsets')
self.assertTrue(np.array_equal(amp, expected[2]), msg='Unexpected move amps')
# Differences due to convolution algorithm
all_close = np.allclose(peak_vel, expected[3], atol=1.e-2)
self.assertTrue(all_close, msg='Unexpected peak velocities')
def test_movements_FPGA(self):
# These test data are the same as those used in the MATLAB code. Test data are from
# extracted FPGA wheel data
pos, t = wheel.interpolate_position(*self.test_data[1][0], freq=1000)
expected = self.test_data[1][1]
thresholds = wheel.samples_to_cm(np.array([8, 1.5]))
on, off, amp, peak_vel = wheel.movements(
t, pos, freq=1000, pos_thresh=thresholds[0], pos_thresh_onset=thresholds[1])
self.assertTrue(np.allclose(on, expected[0], atol=1.e-5), msg='Unexpected onsets')
self.assertTrue(np.allclose(off, expected[1], atol=1.e-5), msg='Unexpected offsets')
self.assertTrue(np.allclose(amp, expected[2], atol=1.e-5), msg='Unexpected move amps')
self.assertTrue(np.allclose(peak_vel, expected[3], atol=1.e-2),
msg='Unexpected peak velocities')
def test_traces_by_trial(self):
t, pos = self.test_data[0][0]
start = self.trials['stimOn_times']
end = self.trials['feedback_times']
traces = wheel.traces_by_trial(t, pos, start=start, end=end)
# Check correct number of tuples returned
self.assertEqual(len(traces), start.size)
expected_ids = (
[144, 60143],
[74944, 84943],
[99944, 102943],
[119944, 129943],
[163944, 187943]
)
for trace, ind in zip(traces, expected_ids):
trace_t, trace_pos = trace
np.testing.assert_array_equal(trace_t[[0, -1]], t[ind])
|
np.testing.assert_array_equal(trace_pos[[0, -1]], pos[ind])
|
numpy.testing.assert_array_equal
|
"""Methods of masking tissue and background."""
from abc import ABC, abstractmethod
import cv2
import numpy as np
from skimage.filters import threshold_otsu
from tiatoolbox.utils.misc import objective_power2mpp
class TissueMasker(ABC):
"""Base class for tissue maskers.
Takes an image as in put and outputs a mask.
"""
def __init__(self) -> None:
super().__init__()
self.fitted = False
@abstractmethod
def fit(self, images: np.ndarray, masks=None) -> None:
"""Fit the masker to the images and parameters.
Args:
images (:class:`numpy.ndarray`):
List of images, usually WSI thumbnails. Expected shape is
NHWC (number images, height, width, channels).
masks (:class:`numpy.ndarray`):
Target/ground-truth masks. Expected shape is NHW (n
images, height, width).
"""
@abstractmethod
def transform(self, images: np.ndarray) -> np.ndarray:
"""Create and return a tissue mask.
Args:
images (:class:`numpy.ndarray`):
RGB image, usually a WSI thumbnail.
Returns:
:class:`numpy.ndarray`:
Map of semantic classes spatially over the WSI
e.g. regions of tissue vs background.
"""
if not self.fitted:
raise Exception("Fit must be called before transform.")
def fit_transform(self, images: np.ndarray, **kwargs) -> np.ndarray:
"""Perform :func:`fit` then :func:`transform`.
Sometimes it can be more optimal to perform both at the same
time for a single sample. In this case the base implementation
of :func:`fit` followed by :func:`transform` can be overridden.
Args:
images (:class:`numpy.ndarray`):
Image to create mask from.
**kwargs (dict):
Other key word arguments passed to fit.
"""
self.fit(images, **kwargs)
return self.transform(images)
class OtsuTissueMasker(TissueMasker):
"""Tissue masker which uses Otsu's method to determine background.
Otsu's method.
Examples:
>>> from tiatoolbox.tools.tissuemask import OtsuTissueMasker
>>> masker = OtsuTissueMasker()
>>> masker.fit(thumbnail)
>>> masks = masker.transform([thumbnail])
>>> from tiatoolbox.tools.tissuemask import OtsuTissueMasker
>>> masker = OtsuTissueMasker()
>>> masks = masker.fit_transform([thumbnail])
"""
def __init__(self) -> None:
super().__init__()
self.threshold = None
def fit(self, images: np.ndarray, masks=None) -> None:
"""Find a binary threshold using Otsu's method.
Args:
images (:class:`numpy.ndarray`):
List of images with a length 4 shape (N, height, width,
channels).
masks (:class:`numpy.ndarray`):
Unused here, for API consistency.
"""
images_shape = np.shape(images)
if len(images_shape) != 4:
raise ValueError(
"Expected 4 dimensional input shape (N, height, width, 3)"
f" but received shape of {images_shape}."
)
# Convert RGB images to greyscale
grey_images = [x[..., 0] for x in images]
if images_shape[-1] == 3:
grey_images = np.zeros(images_shape[:-1], dtype=np.uint8)
for n, image in enumerate(images):
grey_images[n] = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
pixels = np.concatenate([np.array(grey).flatten() for grey in grey_images])
# Find Otsu's threshold for all pixels
self.threshold = threshold_otsu(pixels)
self.fitted = True
def transform(self, images: np.ndarray) -> np.ndarray:
"""Create masks using the threshold found during :func:`fit`.
Args:
images (:class:`numpy.ndarray`):
List of images with a length 4 shape (N, height, width,
channels).
Returns:
:class:`numpy.ndarray`:
List of images with a length 4 shape (N, height, width,
channels).
"""
super().transform(images)
masks = []
for image in images:
grey = image[..., 0]
if len(image.shape) == 3 and image.shape[-1] == 3:
grey = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
mask = (grey < self.threshold).astype(bool)
masks.append(mask)
return [mask]
class MorphologicalMasker(OtsuTissueMasker):
"""Tissue masker which uses a threshold and simple morphological operations.
This method applies Otsu's threshold before a simple small region
removal, followed by a morphological dilation. The kernel for the
dilation is an ellipse of radius 64/mpp unless a value is given for
kernel_size. MPP is estimated from objective power via
func:`tiatoolbox.utils.misc.objective_power2mpp` if a power argument
is given instead of mpp to the initialiser.
For small region removal, the minimum area size defaults to the area
of the kernel. If no mpp, objective power, or kernel_size arguments
are given then the kernel defaults to a size of 1x1.
The scale of the morphological operations can also be manually
specified with the `kernel_size` argument, for example if the
automatic scale from mpp or objective power is too large or small.
Examples:
>>> from tiatoolbox.tools.tissuemask import MorphologicalMasker
>>> from tiatoolbox.wsicore.wsireader import get_wsireader
>>> wsi = get_wsireader("slide.svs")
>>> thumbnail = wsi.slide_thumbnail(32, "mpp")
>>> masker = MorphologicalMasker(mpp=32)
>>> masks = masker.fit_transform([thumbnail])
An example reading a thumbnail from a file where the objective power
is known:
>>> from tiatoolbox.tools.tissuemask import MorphologicalMasker
>>> from tiatoolbox.utils.misc import imread
>>> thumbnail = imread("thumbnail.png")
>>> masker = MorphologicalMasker(power=1.25)
>>> masks = masker.fit_transform([thumbnail])
"""
def __init__(
self, *, mpp=None, power=None, kernel_size=None, min_region_size=None
) -> None:
"""Initialise a morphological masker.
Args:
mpp (float or tuple(float)):
The microns per-pixel of the image to be masked. Used to
calculate kernel_size a 64/mpp, optional.
power (float or tuple(float)):
The objective power of the image to be masked. Used to
calculate kernel_size as 64/objective_power2mpp(power),
optional.
kernel_size (int or tuple(int)):
Size of elliptical kernel in x and y, optional.
min_region_size (int):
Minimum region size in pixels to consider as foreground.
Defaults to area of the kernel.
"""
super().__init__()
self.min_region_size = min_region_size
self.threshold = None
# Check for conflicting arguments
if sum(arg is not None for arg in [mpp, power, kernel_size]) > 1:
raise ValueError("Only one of mpp, power, kernel_size can be given.")
# Default to kernel_size of (1, 1) if no arguments given
if all(arg is None for arg in [mpp, power, kernel_size]):
kernel_size =
|
np.array([1, 1])
|
numpy.array
|
import numpy as np
import scipy as sp
from scipy import stats
import chippr
from chippr import defaults as d
from chippr import utils as u
def mean(population):
"""
Calculates the mean of a population
Parameters
----------
population: np.array, float
population over which to calculate the mean
Returns
-------
mean: np.array, float
mean value over population
"""
shape = np.shape(population)
flat = population.reshape(np.prod(shape[:-1]), shape[-1])
mean = np.mean(flat, axis=0)
return mean
def norm_fit(population):
"""
Calculates the mean and standard deviation of a population
Parameters
----------
population: np.array, float
population over which to calculate the mean
Returns
-------
norm_stats: tuple, list, float
mean and standard deviation over population
"""
shape = np.shape(population)
flat = population.reshape(np.prod(shape[:-1]), shape[-1]).T
locs, scales = [], []
for k in range(shape[-1]):
(loc, scale) = sp.stats.norm.fit_loc_scale(flat[k])
locs.append(loc)
scales.append(scale)
locs = np.array(locs)
scales = np.array(scales)
norm_stats = (locs, scales)
return norm_stats
def calculate_kld(pe, qe, vb=True):
"""
Calculates the Kullback-Leibler Divergence between two PDFs.
Parameters
----------
pe: numpy.ndarray, float
probability distribution evaluated on a grid whose distance from `q`
will be calculated.
qe: numpy.ndarray, float
probability distribution evaluated on a grid whose distance to `p` will
be calculated.
vb: boolean
report on progress to stdout?
Returns
-------
Dpq: float
the value of the Kullback-Leibler Divergence from `q` to `p`
"""
# Normalize the evaluations, so that the integrals can be done
# (very approximately!) by simple summation:
pn = pe / np.sum(pe)
qn = qe / np.sum(qe)
# Compute the log of the normalized PDFs
logp = u.safe_log(pn)
logq = u.safe_log(qn)
# Calculate the KLD from q to p
Dpq = np.sum(pn * (logp - logq))
return Dpq
def calculate_rms(pe, qe, vb=True):
"""
Calculates the Root Mean Square Error between two PDFs.
Parameters
----------
pe: numpy.ndarray, float
probability distribution evaluated on a grid whose distance _from_ `q`
will be calculated.
qe: numpy.ndarray, float
probability distribution evaluated on a grid whose distance _to_ `p`
will be calculated.
vb: boolean
report on progress to stdout?
Returns
-------
rms: float
the value of the RMS error between `q` and `p`
"""
npoints = len(pe)
assert len(pe) == len(qe)
# Calculate the RMS between p and q
rms = np.sqrt(np.sum((pe - qe) ** 2) / npoints)
return rms
def single_parameter_gr_stat(chain):
"""
Calculates the Gelman-Rubin test statistic of convergence of an MCMC chain
over one parameter
Parameters
----------
chain: numpy.ndarray, float
single-parameter chain
Returns
-------
R_hat: float
potential scale reduction factor
"""
ssq = np.var(chain, axis=1, ddof=1)
W = np.mean(ssq, axis=0)
xb = np.mean(chain, axis=1)
xbb = np.mean(xb, axis=0)
m = chain.shape[0]
n = chain.shape[1]
B = n / (m - 1.) * np.sum((xbb - xb)**2., axis=0)
var_x = (n - 1.) / n * W + 1. / n * B
R_hat = np.sqrt(var_x / W)
return R_hat
def multi_parameter_gr_stat(sample):
"""
Calculates the Gelman-Rubin test statistic of convergence of an MCMC chain
over multiple parameters
Parameters
----------
sample: numpy.ndarray, float
multi-parameter chain output
Returns
-------
Rs: numpy.ndarray, float
vector of the potential scale reduction factors
"""
dims = np.shape(sample)
(n_walkers, n_iterations, n_params) = dims
n_burn_ins = n_iterations / 2
chain_ensemble = np.swapaxes(sample, 0, 1)
chain_ensemble = chain_ensemble[n_burn_ins:, :]
Rs = np.zeros((n_params))
for i in range(n_params):
chains = chain_ensemble[:, :, i].T
Rs[i] = single_parameter_gr_stat(chains)
return Rs
def gr_test(sample, threshold=d.gr_threshold):
"""
Performs the Gelman-Rubin test of convergence of an MCMC chain
Parameters
----------
sample: numpy.ndarray, float
chain output
threshold: float, optional
Gelman-Rubin test statistic criterion (usually around 1)
Returns
-------
test_result: boolean
True if burning in, False if post-burn in
"""
gr = multi_parameter_gr_stat(sample)
print('Gelman-Rubin test statistic = '+str(gr))
test_result = np.max(gr) > threshold
return test_result
def cft(xtimes, lag):#xtimes has ntimes elements
"""
Helper function to calculate autocorrelation time for chain of MCMC samples
Parameters
----------
xtimes: numpy.ndarray, float
single parameter values for a single walker over all iterations
lag: int
maximum lag time in number of iterations
Returns
-------
ans: numpy.ndarray, float
autocorrelation time for one time lag for one parameter of one walker
"""
lent = len(xtimes) - lag
allt = xrange(lent)
ans =
|
np.array([xtimes[t+lag] * xtimes[t] for t in allt])
|
numpy.array
|
import os
import numpy as np
import scipy.constants as ct
from astropy.io import fits
from .tools import *
from .load_quantities import *
from .load_arithmetic_quantities import *
from .bifrost import Rhoeetab
from . import document_vars
class MuramAtmos:
"""
Class to read MURaM atmosphere
Parameters
----------
fdir : str, optional
Directory with snapshots.
template : str, optional
Template for snapshot number.
verbose : bool, optional
If True, will print more information.
dtype : str or numpy.dtype, optional
Datatype of read data.
big_endian : bool, optional
Endianness of output file. Default is False (little endian).
prim : bool, optional
Set to True if moments are written instead of velocities.
"""
def __init__(self, fdir='.', template=".020000", verbose=True, dtype='f4',
sel_units='cgs', big_endian=False, prim=False, iz0=None, inttostring=(lambda x: '{0:07d}'.format(x))):
self.prim = prim
self.fdir = fdir
self.verbose = verbose
self.sel_units = sel_units
self.iz0 = iz0
# endianness and data type
if big_endian:
self.dtype = '>' + dtype
else:
self.dtype = '<' + dtype
self.uni = Muram_units()
self.read_header("%s/Header%s" % (fdir, template))
#self.read_atmos(fdir, template)
# Snapshot number
self.snap = int(template[1:])
self.filename=''
self.inttostring=inttostring
self.siter = template
self.file_root = template
self.transunits = False
self.cstagop = False # This will not allow to use cstagger from Bifrost in load
self.hion = False # This will not allow to use HION from Bifrost in load
tabfile = os.path.join(self.fdir, 'tabparam.in')
if os.access(tabfile, os.R_OK):
self.rhoee = Rhoeetab(tabfile=tabfile,fdir=fdir,radtab=False)
self.genvar(order=self.order)
document_vars.create_vardict(self)
document_vars.set_vardocs(self)
def read_header(self, headerfile):
tmp = np.loadtxt(headerfile)
#self.dims_orig = tmp[:3].astype("i")
dims = tmp[:3].astype("i")
deltas = tmp[3:6]
#if len(tmp) == 10: # Old version of MURaM, deltas stored in km
# self.uni.uni['l'] = 1e5 # JMS What is this for?
self.time= tmp[6]
layout = np.loadtxt('layout.order')
self.order = layout[0:3].astype(int)
#if len(self.order) == 0:
# self.order = np.array([0,2,1]).astype(int)
#self.order = tmp[-3:].astype(int)
# dims = [1,2,0] 0=z,
#dims = np.array((self.dims_orig[self.order[2]],self.dims_orig[self.order[0]],self.dims_orig[self.order[1]]))
#deltas = np.array((deltas[self.order[2]],deltas[self.order[0]],deltas[self.order[1]])).astype('float32')
deltas = deltas[self.order]
dims = dims[self.order]
if self.sel_units=='cgs':
deltas *= self.uni.uni['l']
self.x = np.arange(dims[0])*deltas[0]
self.y = np.arange(dims[1])*deltas[1]
self.z = np.arange(dims[2])*deltas[2]
if self.iz0 != None:
self.z = self.z - self.z[self.iz0]
self.dx, self.dy, self.dz = deltas[0], deltas[1], deltas[2]
self.nx, self.ny, self.nz = dims[0], dims[1], dims[2]
if self.nx > 1:
self.dx1d = np.gradient(self.x)
else:
self.dx1d = np.zeros(self.nx)
if self.ny > 1:
self.dy1d = np.gradient(self.y)
else:
self.dy1d = np.zeros(self.ny)
if self.nz > 1:
self.dz1d = np.gradient(self.z)
else:
self.dz1d = np.zeros(self.nz)
def read_atmos(self, fdir, template):
ashape = (self.nx, self.nz, self.ny)
file_T = "%s/eosT%s" % (fdir, template)
#When 0-th dimension is vertical, 1st is x, 2nd is y
# when 1st dimension is vertical, 0th is x.
# remember to swap names
bfact = np.sqrt(4 * np.pi)
if os.path.isfile(file_T):
self.tg = np.memmap(file_T, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
file_press = "%s/eosP%s" % (fdir, template)
if os.path.isfile(file_press):
self.pressure = np.memmap(file_press, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
file_rho = "%s/result_prim_0%s" % (fdir, template)
if os.path.isfile(file_rho):
self.rho = np.memmap(file_rho, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
file_vx = "%s/result_prim_1%s" % (fdir, template)
if os.path.isfile(file_vx):
self.vx = np.memmap(file_vx, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
file_vz = "%s/result_prim_2%s" % (fdir, template)
if os.path.isfile(file_vz):
self.vz = np.memmap(file_vz, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
file_vy = "%s/result_prim_3%s" % (fdir, template)
if os.path.isfile(file_vy):
self.vy = np.memmap(file_vy, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
file_ei = "%s/result_prim_4%s" % (fdir, template)
if os.path.isfile(file_ei):
self.ei = np.memmap(file_ei, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
file_Bx = "%s/result_prim_5%s" % (fdir, template)
if os.path.isfile(file_Bx):
self.bx = np.memmap(file_Bx, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
self.bx = self.bx * bfact
file_Bz = "%s/result_prim_6%s" % (fdir, template)
if os.path.isfile(file_Bz):
self.bz = np.memmap(file_Bz, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
self.bz = self.bz * bfact
file_By = "%s/result_prim_7%s" % (fdir, template)
if os.path.isfile(file_By):
self.by = np.memmap(file_By, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
self.by = self.by * bfact
file_tau = "%s/tau%s" % (fdir, template)
if os.path.isfile(file_tau):
self.tau = np.memmap(file_tau, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
file_Qtot = "%s/Qtot%s" % (fdir, template)
if os.path.isfile(file_Qtot):
self.qtot = np.memmap(file_Qtot, mode="r", shape=ashape,
dtype=self.dtype,
order="F")
# from moments to velocities
#if self.prim:
# if hasattr(self,'rho'):
# if hasattr(self,'vx'):
# self.vx /= self.rho
# if hasattr(self,'vy'):
# self.vy /= self.rho
# if hasattr(self,'vz'):
# self.vz /= self.rho
def read_Iout(self):
tmp = np.fromfile(self.fdir+'I_out.'+self.siter)
size = tmp[1:3].astype(int)
time = tmp[3]
return tmp[4:].reshape([size[1],size[0]]).swapaxes(0,1),size,time
def read_slice(self,var,depth):
tmp = np.fromfile(self.fdir+var+'_slice_'+depth+'.'+self.siter)
nslices = tmp[0].astype(int)
size = tmp[1:3].astype(int)
time = tmp[3]
return tmp[4:].reshape([nslices,size[1],size[0]]).swapaxes(1,2),nslices,size,time
def read_dem(self,path,max_bins=None):
tmp = np.fromfile(path+'corona_emission_adj_dem_'+self.fdir+'.'+self.siter)
bins = tmp[0].astype(int)
size = tmp[1:3].astype(int)
time = tmp[3]
lgTmin = tmp[4]
dellgT = tmp[5]
dem = tmp[6:].reshape([bins,size[1],size[0]]).transpose(2,1,0)
taxis = lgTmin+dellgT*np.arange(0,bins+1)
X_H = 0.7
dem = dem*X_H*0.5*(1+X_H)*3.6e19
if max_bins != None:
if bins > max_bins :
dem = dem[:,:,0:max_bins]
else :
tmp=dem
dem=
|
np.zeros([size[0],size[1],max_bins])
|
numpy.zeros
|
"""
Generate a large batch of samples from a super resolution model, given a batch
of samples from a regular model from image_sample.py.
"""
import argparse
import os
import blobfile as bf
import numpy as np
import torch as th
import torch.distributed as dist
from improved_diffusion import dist_util, logger
from improved_diffusion.script_util import (
sr_model_and_diffusion_defaults,
sr_create_model_and_diffusion,
args_to_dict,
add_dict_to_argparser,
load_config_to_args
)
from improved_diffusion.image_datasets import load_superres_data, load_tokenizer, tokenize
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
config_path = args.config_path
have_config_path = config_path != ""
using_config = have_config_path and os.path.exists(config_path)
if using_config:
args, _ = load_config_to_args(config_path, args)
using_ground_truth = args.base_data_dir != "" and os.path.exists(args.base_data_dir)
tokenizer = None
if True: # using_ground_truth:
tokenizer = load_tokenizer(max_seq_len=args.max_seq_len, char_level=args.char_level)
logger.log("creating model...")
model_diffusion_args = args_to_dict(args, sr_model_and_diffusion_defaults().keys())
model_diffusion_args['tokenizer'] = tokenizer
model, diffusion = sr_create_model_and_diffusion(
**model_diffusion_args
)
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu")
)
model.to(dist_util.dev())
model.eval()
logger.log("loading data...")
print(f"args.base_data_dir: {repr(args.base_data_dir)} | using_ground_truth: {using_ground_truth} | colorize: {args.colorize}")
n_texts = args.num_samples // args.batch_size
if n_texts > 1:
raise ValueError("num_samples != bs TODO")
if using_ground_truth:
data = load_superres_data(
args.base_data_dir,
batch_size=n_texts,
large_size=args.large_size,
small_size=args.small_size,
class_cond=args.class_cond,
txt=args.txt,
monochrome=args.monochrome,
deterministic=True,
offset=args.base_data_offset,
colorize=args.colorize
)
data = (model_kwargs for _, model_kwargs in data)
else:
data = load_data_for_worker(args.base_samples, args.batch_size, args.class_cond, args.txt, colorize=args.colorize)
logger.log("creating samples...")
if args.seed > -1:
print(f"setting seed to {args.seed}")
th.manual_seed(args.seed)
all_images = []
image_channels = 1 if args.monochrome else 3
while len(all_images) * args.batch_size < args.num_samples:
model_kwargs = next(data)
if using_ground_truth:
print(f"text: {repr(model_kwargs['txt'])}")
if args.txt_override != "":
model_kwargs['txt'] = [args.txt_override for _ in model_kwargs['txt']]
print(f"overridden with: {repr(model_kwargs['txt'])}")
txt = tokenize(tokenizer, model_kwargs["txt"])
txt = th.as_tensor(txt).to(dist_util.dev())
model_kwargs["txt"] = txt
for k, v in model_kwargs.items():
print((k, v.shape))
model_kwargs['low_res'] = th.cat([model_kwargs['low_res'] for _ in range(args.batch_size)])
model_kwargs['txt'] = th.cat([model_kwargs['txt'] for _ in range(args.batch_size)])
for k, v in model_kwargs.items():
print((k, v.shape))
model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()}
if args.clf_free_guidance:
txt_uncon = args.batch_size * tokenize(tokenizer, [args.txt_drop_string])
txt_uncon = th.as_tensor(txt_uncon).to(dist_util.dev())
model_kwargs["guidance_scale"] = args.guidance_scale
model_kwargs["unconditional_model_kwargs"] = {
"txt": txt_uncon,
"low_res": model_kwargs["low_res"]
}
sample = diffusion.p_sample_loop(
model,
(args.batch_size, image_channels, args.large_size, args.large_size),
clip_denoised=args.clip_denoised,
model_kwargs=model_kwargs,
)
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
all_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(all_samples, sample) # gather not supported with NCCL
for sample in all_samples:
all_images.append(sample.cpu().numpy())
logger.log(f"created {len(all_images) * args.batch_size} samples")
arr = np.concatenate(all_images, axis=0)
arr = arr[: args.num_samples]
if dist.get_rank() == 0:
shape_str = "x".join([str(x) for x in arr.shape])
out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz")
logger.log(f"saving to {out_path}")
np.savez(out_path, arr)
dist.barrier()
logger.log("sampling complete")
def load_data_for_worker(base_samples, batch_size, class_cond, txt, colorize=False):
with bf.BlobFile(base_samples, "rb") as f:
obj = np.load(f)
image_arr = obj["arr_0"]
if class_cond or txt:
label_arr = obj["arr_1"]
rank = dist.get_rank()
num_ranks = dist.get_world_size()
buffer = []
label_buffer = []
while True:
for i in range(rank, len(image_arr), num_ranks):
buffer.append(image_arr[i])
if class_cond or txt:
label_buffer.append(label_arr[i])
if len(buffer) == batch_size:
batch = th.from_numpy(np.stack(buffer)).float()
batch = batch / 127.5 - 1.0
batch = batch.permute(0, 3, 1, 2)
if colorize:
batch = batch.mean(dim=1, keepdim=True)
res = dict(low_res=batch)
if class_cond or txt:
key = "txt" if txt else "y"
res[key] = th.from_numpy(
|
np.stack(label_buffer)
|
numpy.stack
|
import numpy as np
from fit_ell import fit_ellipse
from fit_ell import ellipse_var
from cv2 import cv2
import math
x = [316, 17, 335, 637]
y = [454, 251, 91, 298]
def transform(x, y, img):
xs = np.array([0, 1, 1, 0])
ys = np.array([0, 0, 1, 1])
As =
|
np.array([[xs[0], xs[1], xs[2]], [ys[0], ys[1], ys[2]], [1, 1, 1]])
|
numpy.array
|
import numpy as np
import operator as op
from functools import reduce
def ncr(n, r):
r = min(r, n-r)
numerator = reduce(op.mul, range(n, n-r, -1), 1)
denominator = reduce(op.mul, range(1, r+1), 1)
return numerator / denominator
def scale_mean(x, q, w, scalar):
mean = np.mean(x, axis=0, keepdims=True)
x -= mean
q -= mean
scale = np.max(np.abs(x)) / scalar
x /= scale
q /= scale
w /= scale
return x, q, w
def simple_lsh(x, q):
nx, d = np.shape(x)
nq, _ = np.shape(q)
x_ = np.empty(shape=(nx, d + 1))
q_ = np.empty(shape=(nq, d + 1))
x_[:, :d] = x
q_[:, :d] = q
norms = np.linalg.norm(x, axis=1)
m =
|
np.max(norms)
|
numpy.max
|
#!/usr/bin/env python
### Up to date as of 10/2019 ###
'''Section 0: Import python libraries
This code has a number of dependencies, listed below.
They can be installed using the virtual environment "slab23"
that is setup using script 'library/setup3env.sh'.
Additional functions are housed in file 'slab2functions.py'
and imported below.
There are some additional dependencies used by the function file
that do not need to be installed separately.
'''
# stdlib imports
from datetime import datetime
import os.path
import argparse
import numpy as np
from pandas import DataFrame
import pandas as pd
import warnings
import slab2functions as s2f
import math
import mapio.gmt as gmt
from functools import partial
from multiprocess import Pool
import loops as loops
from scipy import ndimage
import psutil
import cProfile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def main(args):
'''Section 1: Setup
In this section:
(1) Identify necessary input files
(2) Load parameters from '[slab]input.par'
(3) Define optional boxes for PDF/print testing
(4) Define output file names
(5) Gathering optional arguments, setting defaults
(6) Define search ellipsoid parameters
(7) Define Average active source profiles
(8) Define reference model (Slab1.0 and/or slab guides)
(9) Define Trench Locations
(10) Open and modify input dataset
(11) Calculate seismogenic zone thickness
(12) Record variable parameters used for this model
(13) Define search grid
(14) Identify tomography datasets
(15) Initialize arrays for Section 2 '''
print('Start Section 1 of 7: Setup')
print(' Loading inputs...')
''' ------ (1) Identify necessary input files ------ '''
trenches = 'library/misc/trenches_usgs_2017_depths.csv'
agesFile = 'library/misc/interp_age.3.2g.nc'
ageerrorsFile = 'library/misc/interp_ageerror.3.2g.nc'
polygonFile = 'library/misc/slab_polygons.txt'
addFile = 'library/misc/addagain.csv'
parFile = args.parFile
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", message="invalid value encountered in less")
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide")
warnings.filterwarnings("ignore", message="invalid value encountered in greater")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
''' ------ (2) Load parameters from '[slab]input.par' ------'''
for line in open(parFile):
plist = line.split()
if len(plist)>2:
if plist[0] == 'inFile':
inFile = plist[2]
if plist[0] == 'use_box':
use_box = plist[2]
if plist[0] == 'latmin':
latmin = np.float64(plist[2])
if plist[0] == 'latmax':
latmax = np.float64(plist[2])
if plist[0] == 'lonmin':
lonmin = np.float64(plist[2])
if plist[0] == 'lonmax':
lonmax = np.float64(plist[2])
if plist[0] == 'slab':
slab = plist[2]
if plist[0] == 'grid':
grid = np.float64(plist[2])
if plist[0] == 'radius1':
radius1 = np.float64(plist[2])
if plist[0] == 'radius2':
radius2 = np.float64(plist[2])
if plist[0] == 'sdr':
sdr = np.float64(plist[2])
if plist[0] == 'ddr':
ddr = np.float64(plist[2])
if plist[0] == 'taper':
taper = np.float64(plist[2])
if plist[0] == 'T':
T = np.float64(plist[2])
if plist[0] == 'node':
node = np.float64(plist[2])
if plist[0] == 'filt':
filt = np.float64(plist[2])
if plist[0] == 'maxdist':
maxdist = np.float64(plist[2])
if plist[0] == 'minunc':
minunc = np.float64(plist[2])
if plist[0] == 'mindip':
mindip = np.float64(plist[2])
if plist[0] == 'minstk':
minstk = np.float64(plist[2])
if plist[0] == 'maxthickness':
maxthickness = np.float64(plist[2])
if plist[0] == 'seismo_thick':
seismo_thick = np.float64(plist[2])
if plist[0] == 'dipthresh':
dipthresh = np.float64(plist[2])
if plist[0] == 'fracS':
fracS = np.float64(plist[2])
if plist[0] == 'kdeg':
kdeg = np.float64(plist[2])
if plist[0] == 'knot_no':
knot_no = np.float64(plist[2])
if plist[0] == 'rbfs':
rbfs = np.float64(plist[2])
# loop through to find latest slab input file if specified
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
if inFile == 'latest':
yearmax = 0
monthmax = 0
for filename in os.listdir('Input'):
if filename.endswith('.csv'):
try:
slabname,datei,instring = filename.split('_')
except:
continue
if slabname == polyname and instring == 'input.csv':
try:
monthi, yeari = datei.split('-')
except:
continue
yeari = int(yeari)
monthi = int(monthi)
if yeari >= yearmax:
yearmax = yeari
inFile = 'Input/%s'%filename
if monthi > monthmax:
monthmax = monthi
inFile = 'Input/%s'%filename
print (' using input file: %s'%inFile)
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
if args.undergrid is None:
if slab == 'mue':
print ('This slab is truncated by the Caribbean (car) slab, argument -u cardepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'cot':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'sul':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'phi':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'ryu':
print ('This slab is truncated by the Kurils-Japan (kur) slab, argument -u kurdepgrid is required')
print ('Exiting .... ')
exit()
else:
undergrid = args.undergrid
''' ------ (4) Define output file names ------ '''
date = datetime.today().strftime('%m.%d.%y')
now = datetime.now()
time = '%s.%s' % (now.hour, now.minute)
folder = '%s_slab2_%s' % (slab, date)
os.system('mkdir Output/%s'%folder)
outFile = 'Output/%s/%s_slab2_res_%s.csv' % (folder, slab, date)
dataFile = 'Output/%s/%s_slab2_dat_%s.csv' % (folder, slab, date)
nodeFile = 'Output/%s/%s_slab2_nod_%s.csv' % (folder, slab, date)
fillFile = 'Output/%s/%s_slab2_fil_%s.csv' % (folder, slab, date)
rempFile = 'Output/%s/%s_slab2_rem_%s.csv' % (folder, slab, date)
clipFile = 'Output/%s/%s_slab2_clp_%s.csv' % (folder, slab, date)
these_params = 'Output/%s/%s_slab2_par_%s.csv' % (folder, slab, date)
datainfo = 'Output/%s/%s_slab2_din_%s.csv' % (folder, slab, date)
nodeinfo = 'Output/%s/%s_slab2_nin_%s.csv' % (folder, slab, date)
suppFile = 'Output/%s/%s_slab2_sup_%s.csv' % (folder, slab, date)
nodexFile = 'Output/%s/%s_slab2_nox_%s.csv' % (folder, slab, date)
nodeuFile = 'Output/%s/%s_slab2_nou_%s.csv' % (folder, slab, date)
depTextFile = 'Output/%s/%s_slab2_dep_%s.txt' % (folder, slab, date)
depGridFile = 'Output/%s/%s_slab2_dep_%s.grd' % (folder, slab, date)
strTextFile = 'Output/%s/%s_slab2_str_%s.txt' % (folder, slab, date)
strGridFile = 'Output/%s/%s_slab2_str_%s.grd' % (folder, slab, date)
dipTextFile = 'Output/%s/%s_slab2_dip_%s.txt' % (folder, slab, date)
dipGridFile = 'Output/%s/%s_slab2_dip_%s.grd' % (folder, slab, date)
uncTextFile = 'Output/%s/%s_slab2_unc_%s.txt' % (folder, slab, date)
uncGridFile = 'Output/%s/%s_slab2_unc_%s.grd' % (folder, slab, date)
thickTextFile = 'Output/%s/%s_slab2_thk_%s.txt' % (folder, slab, date)
thickGridFile = 'Output/%s/%s_slab2_thk_%s.grd' % (folder, slab, date)
savedir = 'Output/%s'%folder
''' ------ (3) Define optional boxes for PDF/print testing ------'''
if args.test is not None:
testlonmin = args.test[0]
testlonmax = args.test[1]
testlatmin = args.test[2]
testlatmax = args.test[3]
if testlonmin < 0:
testlonmin += 360
if testlonmax < 0:
testlonmax += 360
testarea = [testlonmin, testlonmax, testlatmin, testlatmax]
printtest = True
os.system('mkdir Output/PDF%s' % (slab))
os.system('mkdir Output/multitest_%s' % (slab))
f = open(datainfo, 'w+')
f.write('dataID, nodeID, used_or_where_filtered')
f.write('\n')
f.close()
f = open(datainfo, 'w+')
f.write('nodeID, len(df), status, details')
f.write('\n')
f.close()
else:
# an area not in range of any slab polygon
testarea = [220, 230, 15, 20]
printtest = False
''' --- (5) Gathering optional arguments, setting defaults ---'''
if use_box == 'yes':
check = 1
slab = s2f.rectangleIntersectsPolygon(lonmin, lonmax, latmin,
latmax, polygonFile)
if isinstance(slab, str):
slab = slab
else:
try:
slab = slab[0]
except:
print('System exit because box does not intersect slab polygon')
raise SystemExit()
elif use_box == 'no':
check = 0
lon1, lon2, lat1, lat2 = s2f.determine_polygon_extrema(slab,
polygonFile)
lonmin = float(lon1)
lonmax = float(lon2)
latmin = float(lat1)
latmax = float(lat2)
else:
print('use_box in slab2input.par must be "yes" or "no"')
raise SystemExit()
''' ------ (6) Define search ellipsoid parameters ------'''
alen = radius1
blen = radius2
ec = math.sqrt(1-((math.pow(blen, 2))/(math.pow(alen, 2))))
mdist = alen * ec
''' ------ (7) Define Average active source profiles ------'''
# Different because alu is variable E/W
if slab == 'alu':
AA_data = pd.read_csv('library/avprofiles/alu_av5.csv')
global_average = False
elif slab == 'him':
AA_data = pd.read_csv('library/avprofiles/him_av.csv')
global_average = False
elif slab == 'kur' or slab == 'izu':
AA_source = 'library/avprofiles/%s_av.txt' % 'jap'
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
AA_data = AA_data[AA_data.dist < 125]
global_average = False
# Use RF data like AA data to constrain flat slab in Mexico
elif slab == 'cam':
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
RF_data = pd.read_csv('library/avprofiles/cam_RF_av.csv')
AA_data = pd.concat([AA_data,RF_data],sort=True)
global_average = False
else:
global_average = False
# See if there is a averace active source profile for this slab
try:
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
# If there is no profile for this slab, use the global profile
except:
AA_global = pd.read_csv('library/avprofiles/global_as_av2.csv')
AA_data = AA_global[['dist', 'depth']]
global_average = True
if slab == 'phi' or slab == 'mue':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'cot':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'ita' or slab == 'puy':
AA_data = AA_data[AA_data.dist < 1]
''' ------ (8) Define reference model (Slab1.0 and/or slab guides) ------'''
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
# Search for slab guides in library/slabguides
slabguide = None
slabguide2 = None
for SGfile in os.listdir('library/slabguides'):
if SGfile[0:3] == polyname:
SGfile1 = SGfile
slabguide = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
# Find secondary slab guide for regions where there are two
if polyname == 'sum' or polyname == 'man' or polyname == 'phi' or polyname =='sam' or polyname == 'sco' or polyname == 'mak' or polyname == 'jap':
for f in os.listdir('library/slabguides'):
if f[0:3] == polyname and f != SGfile:
print ('f',f)
SGfile2 = f
slabguide2 = gmt.GMTGrid.load('library/slabguides/%s'%SGfile2)
break
break
# Get Slab1.0 grid where applicable
try:
depgrid = s2f.get_grid(slab, 'depth')
except:
print (' Slab1.0 does not exist in this region, using slab guide')
depgrid = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
slabguide = None
# Calculate strike and dip grids
strgrid, dipgrid = s2f.mkSDgrd(depgrid)
slab1data = s2f.mkSlabData(depgrid, strgrid, dipgrid, printtest)
slab1data.to_csv('gradtest.csv',header=True,index=False)
# Add slab guide data to Slab1.0 grids where necessary
if slabguide is not None:
print ('slab guide for this model:',slabguide)
guidestr, guidedip = s2f.mkSDgrd(slabguide)
guidedata = s2f.mkSlabData(slabguide, guidestr, guidedip, printtest)
if SGfile1 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
elif slab == 'ryu':
guidedata = guidedata[guidedata.lon>137]
slab1data = slab1data[slab1data.lat<=137]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
if slabguide2 is not None:
print ('secondary slab guide for this model:',slabguide2)
guidestr, guidedip = s2f.mkSDgrd(slabguide2)
guidedata = s2f.mkSlabData(slabguide2, guidestr, guidedip, printtest)
if SGfile2 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
#slab1data.to_csv('slab1data.csv',header=True,index=False)
''' ------ (9) Define Trench Locations ------'''
TR_data = pd.read_csv(trenches)
if slab == 'izu' or slab == 'kur':
TR_data = TR_data[TR_data.slab == 'jap']
else:
TR_data = TR_data[TR_data.slab == slab]
TR_data = TR_data.reset_index(drop=True)
TR_data.loc[TR_data.lon < 0, 'lon']+=360
''' ------ (10) Open and modify input dataset ------'''
eventlistALL = pd.read_table('%s' % inFile, sep=',', dtype={
'lon': np.float64, 'lat': np.float64,'depth': np.float64,
'unc': np.float64, 'etype': str, 'ID': np.int, 'mag': np.float64,
'S1': np.float64, 'D1': np.float64, 'R1': np.float64,
'S2': np.float64, 'D2': np.float64, 'R2': np.float64,
'src': str, 'time': str, 'mlon': np.float64, 'mlat': np.float64,
'mdep': np.float64})
ogcolumns = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src']
kagancols = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src', 'mlon', 'mlat', 'mdep']
eventlist = eventlistALL[kagancols]
if printtest:
lat65 = eventlist[eventlist.lat>65]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.lat <= 65]', datainfo,'df')
dataGP = eventlist[eventlist.etype == 'GP']
if len(dataGP>0):
s2f.addToDataInfo(dataGP, 0, 'eventlist = eventlist[eventlist.etype != GP]', datainfo,'df')
eventlist = eventlist[eventlist.lat <= 65]
eventlist = eventlist[eventlist.etype != 'GP']
maxID = eventlistALL['ID'].max()
# Add/Remove manually identified points that don't follow general rules
remdir = 'library/points_to_remove/current_files'
for badFile in os.listdir(remdir):
if badFile[0:3] == slab or badFile[0:3] == 'ALL' or ((slab == 'izu' or slab == 'kur') and badFile[0:3] == 'jap'):
print (' manually removing points listed in:',badFile)
donotuse = pd.read_csv('%s/%s'%(remdir,badFile))
eventlist = s2f.removePoints(donotuse, eventlist, lonmin,
lonmax, latmin, latmax, printtest, datainfo, True, slab)
doubleuse = pd.read_csv(addFile)
eventlist, maxID = s2f.doublePoints(doubleuse, eventlist, maxID)
if slab == 'kur':
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 100
if slab == 'sul' or slab == 'man':
eventlist = eventlist[eventlist.etype != 'CP']
if slab == 'him':
eventlist = eventlist[eventlist.src != 'schulte']
if slab == 'sumz' or slab == 'kur' or slab == 'jap' or slab == 'izu':
if printtest:
lat65 = eventlist[eventlist.etype=='TO']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != TO]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'TO']
if slab == 'kurz':
if printtest:
lat65 = eventlist[eventlist.etype=='ER']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != ER]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'ER']
if slab == 'sol':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon <= 149)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lon > 149)]
TR_data = TR_data[TR_data.lon>149]
if slab == 'man':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon >= 120)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype == BA) & (eventlist.lon >= 120)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | ((eventlist.lon < 120)|(eventlist.lat > 15))]
if slab == 'sum':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lat > 21)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lat <= 21)]
if slab == 'ryu':
ryutodata = eventlist[(eventlist.etype == 'TO')&(eventlist.lon>133)]
if slab == 'hel':
eventlist.loc[eventlist.etype == 'RF', 'etype'] = 'CP'
if slab == 'puyz' or slab == 'mak':
eventlist = eventlist[eventlist.src != 'ddgap']
# Set default uncertainties for events without uncertainties
eventlist.loc[eventlist.etype == 'EQ', 'unc'] = 15.0
eventlist.loc[eventlist.etype == 'CP', 'unc'] = 5.0
eventlist.loc[eventlist.etype == 'BA', 'unc'] = 1.0
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 40.0
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <5), 'unc'] = 5.0
if slab == 'puy':
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <15), 'unc'] = 15.0
eventlist.loc[eventlist.mlon < 0, 'mlon'] += 360
# Ensure all data are within longitudes 0-360
eventlist.loc[eventlist.lon < 0, 'lon']+=360
# Define mean depth of bathymetry (for constraining interp outboard trench)
meanBAlist = eventlist[eventlist.etype == 'BA']
meanBA = meanBAlist['depth'].mean()
del eventlistALL
''' ----- (11) Calculate seismogenic zone thickness ------ '''
# define seismogenic thickness parameters. change if needed
maxdep = 65
maxdepdiff = 20
origorcentl = 'c'
origorcentd = 'c'
slaborev = 'e'
lengthlim = -50
ogcolumns = eventlist.columns
eventlist = s2f.getReferenceKagan(slab1data, eventlist, origorcentl, origorcentd)
if slab != 'hin':
seismo_thick, taper_start = s2f.getSZthickness(eventlist,folder,slab,maxdep,maxdepdiff,origorcentl,origorcentd,slaborev,savedir,lengthlim)
else:
seismo_thick = 20
taper_start = 20
if slab == 'hel' or slab == 'car' or slab == 'mak':
seismo_thick = 40
if slab == 'sol':
seismo_thick = 40
if slab == 'alu' or slab == 'cot' or slab == 'sul':
seismo_thick = 10
if slab == 'sol':
eventlistE = eventlist[eventlist.lon>148]
eventlistW = eventlist[eventlist.lon<=148]
eventlistE = s2f.cmtfilter(eventlistE,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistE,eventlistW],sort=True)
if slab == 'sum':
eventlistS = eventlist[eventlist.lat<=22]
eventlistN = eventlist[eventlist.lat>22]
eventlistS = s2f.cmtfilter(eventlistS,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistS,eventlistN],sort=True)
if slab != 'hal' and slab != 'him' and slab != 'pam' and slab != 'hin' and slab != 'sol' and slab != 'sum' and slab != 'cas':
eventlist = s2f.cmtfilter(eventlist,seismo_thick,printtest,datainfo,slab)
eventlist = eventlist[ogcolumns]
''' ------ (12) Record variable parameters used for this model ------'''
f = open(these_params, 'w+')
f.write('Parameters used to create file for slab_Date_time: %s_%s_%s \n' \
%(slab, date, time))
f.write('\n')
f.close()
f = open(these_params, 'a')
f.write('inFile: %s \n' % inFile)
f.write('use_box: %s \n' % use_box)
f.write('latmin: %s \n' % str(latmin))
f.write('latmax: %s \n' % str(latmax))
f.write('lonmin: %s \n' % str(lonmin))
f.write('lonmax: %s \n' % str(lonmax))
f.write('slab: %s \n' % slab)
f.write('grid: %s \n' % str(grid))
f.write('radius1: %s \n' % str(radius1))
f.write('radius2: %s \n' % str(radius2))
f.write('alen: %s \n' % str(alen))
f.write('blen: %s \n' % str(blen))
f.write('sdr: %s \n' % str(sdr))
f.write('ddr: %s \n' % str(ddr))
f.write('taper: %s \n' % str(taper))
f.write('T: %s \n' % str(T))
f.write('node: %s \n' % str(node))
f.write('filt: %s \n' % str(filt))
f.write('maxdist: %s \n' % str(maxdist))
f.write('mindip: %s \n' % str(mindip))
f.write('minstk: %s \n' % str(minstk))
f.write('maxthickness: %s \n' % str(maxthickness))
f.write('seismo_thick: %s \n' % str(seismo_thick))
f.write('dipthresh: %s \n' % str(dipthresh))
f.write('fracS: %s \n' % str(fracS))
f.write('knot_no: %s \n' % str(knot_no))
f.write('kdeg: %s \n' % str(kdeg))
f.write('rbfs: %s \n' % str(rbfs))
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
f.write('undergrid: %s \n' % str(undergrid))
f.close()
''' ------ (13) Define search grid ------ '''
print(' Creating search grid...')
#Creates a grid over the slab region
regular_grid = s2f.create_grid_nodes3(grid, lonmin, lonmax, latmin, latmax)
grid_in_polygon = s2f.createGridInPolygon2(regular_grid, slab, polygonFile)
lons = grid_in_polygon[:, 0]
lats = grid_in_polygon[:, 1]
lons = np.round(lons,decimals=1)
lats = np.round(lats,decimals=1)
lons[lons <0] += 360
slab1guide,slab1query = s2f.makeReference(slab1data,lons,lats,grid,printtest,slab)
''' ------ (14) Identify tomography datasets ------ '''
## Identify how many tomography datasets are included
tomo_data = eventlist[eventlist.etype == 'TO']
if len(tomo_data) > 0 and slab != 'sam':
sources = tomo_data.src
TOsrc = set()
for x in sources:
TOsrc.add(x)
tomo_sets = TOsrc
tomo = True
else:
tomo_sets = 0
tomo = False
premulti = pd.DataFrame()
postmulti = pd.DataFrame()
OGmulti = pd.DataFrame()
elistAA = pd.DataFrame()
loncuts,latcuts,elistcuts = s2f.getlatloncutoffs(lons,lats,eventlist,printtest)
''' ------ (15) Initialize arrays for Section 2 ------ '''
# Creates list of events that were used for the model based on ID
used_all = np.zeros((1, 2))
used_TO = np.zeros((1, 2))
warnings.filterwarnings('ignore', 'Mean of empty slice.')
pd.options.mode.chained_assignment = None
'''Section 2: First loop
This Accomplishes:
1) Calculate error for each used tomography model.
This is accomplished by determining the difference between measured
depths for tomography and earthquake data, which will be used
outside of the loop.
2) Identify data to constrain depth/coordinate of center of Benioff Zone.
2a) Identify local strike, dip, and depth of Slab1.0.
If Slab 1.0 does not exist, acquire strike from closest trench
location with a strike oriented perpendicularly to this lon/lat.
If extending beyond Slab1.0 depths perpendicularly, find nearest and
most perpendicular point on Slab1.0, and define depth to
search from based on dip and depth of that point on Slab1.0. The
dip is defined as the dip of the local Slab1.0 point.
If extending along strike from Slab1.0, define depth to search from
based on mean depth of data within defined radius of node. The
dip of the node is defined as 0.
2b) Filter by ellipsoid oriented perpendicularly to Slab1.0.
If the local dip is less than mindip, orient ellipsoid vertically
and along strike found in (2a).
If the local dip is greater than mindip, orient ellipsoid
perpendicular to strike/dip found in (2a).
The long axis of the ellipse is defined as radius1, the short axis
is defined as radius2.
The shallow extent of the ellipsoid is defined as sdr at depths
above seismo_thick, and is tapered to 3*sdr at depths greater
than seismo_thick.
The deep extent of the ellipsoid is defined as sdr at depths above
seismo_thick, and is tapered to ddr at depths greater than
seismo_thick.
2c) Nodes outboard of the trench are only constrained by bathymetry.
Nodes inboard of the trench are constrained by all but bathymetry.
2d) Conditionally add average active source/average reciever functions.
If within the distance of the longest AS profile from the trench
identify the average AS profile depth at that distance from
trench. If there is no active source point within the search
ellipsoid defined in (2b), add an average active source data
point to the set of data to constrain the depth at this node.
Reciever functions in cam and alu are being utilized similarly with
defined distances from trench and distances along strike from
key profiles that need to be utilized in the absence of
seismicity.
2e) If information other than tomography is available above 300 km
depth, all tomography is filtered at that node.
2f) If less than two data points are available to constrain a node, no
depth is resolved at that node.
2g) If |strike of Slab1.0 at node - strike of Slab1.0 at farthest data|
> minstrk, filter data at ends until < minstrk.
If this node is outside of Slab1.0, reduce long axis of search
ellipsoid prior to starting filters.
The output of this loop is two numpy arrays and list of nodes with data:
used_TO: local difference between tomography and earthquake depths and
a tomography dataset identifier
used_all: indices for the data used and their associated nodes
This one is created to prevent the need for re-filtering
in later loops
'''
print("Start Section 2 of 7: First loop")
lons1 = (np.ones(len(lons))*-9999).astype(np.float64)
lats1 = (np.ones(len(lons))*-9999).astype(np.float64)
deps1 = (np.ones(len(lons))*-9999).astype(np.float64)
strs1 = (np.ones(len(lons))*-9999).astype(np.float64)
dips1 = (np.ones(len(lons))*-9999).astype(np.float64)
nIDs1 = (np.ones(len(lons))*-9999).astype(np.float64)
aleng = (np.ones(len(lons))*-9999).astype(np.float64)
bleng = (np.ones(len(lons))*-9999).astype(np.float64)
cleng = (np.ones(len(lons))*-9999).astype(np.float64)
sleng = (np.ones(len(lons))*-9999).astype(np.float64)
dleng = (np.ones(len(lons))*-9999).astype(np.float64)
elons1 = (np.ones(len(lons))*-9999).astype(np.float64)
elats1 = (np.ones(len(lons))*-9999).astype(np.float64)
edeps1 = (np.ones(len(lons))*-9999).astype(np.float64)
estrs1 = (np.ones(len(lons))*-9999).astype(np.float64)
edips1 = (np.ones(len(lons))*-9999).astype(np.float64)
enIDs1 = (np.ones(len(lons))*-9999).astype(np.float64)
ealeng = (np.ones(len(lons))*-9999).astype(np.float64)
ebleng = (np.ones(len(lons))*-9999).astype(np.float64)
ecleng = (np.ones(len(lons))*-9999).astype(np.float64)
esleng = (np.ones(len(lons))*-9999).astype(np.float64)
edleng = (np.ones(len(lons))*-9999).astype(np.float64)
if args.nCores is not None:
if args.nCores > 1 and args.nCores < 8:
pooling = True
elif args.nCores == 1:
pooling = False
else:
pooling = False
else:
pooling = False
cutcount = 1
allnewnodes = None
for cut in range(len(loncuts)):
theselats = latcuts[cut]
theselons = loncuts[cut]
theseevents = elistcuts[cut]
indices = range(len(theselats))
if cut == 0:
i2 = 0
cutcount+=1
if pooling:
pool1 = Pool(args.nCores)
partial_loop1 = partial(loops.loop1, theselons, theselats, testarea, slab,
depgrid, strgrid, dipgrid, slab1query, theseevents,
seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID,
AA_data, TR_data, maxdist, maxthickness, minstk,
tomo_sets, meanBA,slab1guide,grid,slab1data,dipthresh,datainfo,nodeinfo)
pts = pool1.map(partial_loop1, indices) #$$#
pool1.close()
pool1.join()
for i in range(len(indices)):
thisnode = pts[i]
if thisnode[13]:
lons1[i2] = thisnode[0]
lats1[i2] = thisnode[1]
deps1[i2] = thisnode[2]
strs1[i2] = thisnode[3]
dips1[i2] = thisnode[4]
nIDs1[i2] = thisnode[5]
aleng[i2] = thisnode[6]
bleng[i2] = thisnode[7]
cleng[i2] = thisnode[8]
sleng[i2] = thisnode[14]
dleng[i2] = thisnode[15]
nused_TO = thisnode[9]
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = thisnode[10]
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = thisnode[11]
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
newnodes = thisnode[12]
if len(newnodes)>0:
if allnewnodes is not None:
allnewnodes = np.vstack((allnewnodes,newnodes))
else:
allnewnodes = newnodes
if not thisnode[13] and np.isfinite(thisnode[2]):
elons1[i2] = thisnode[0]
elats1[i2] = thisnode[1]
edeps1[i2] = thisnode[2]
estrs1[i2] = thisnode[3]
edips1[i2] = thisnode[4]
enIDs1[i2] = thisnode[5]
ealeng[i2] = thisnode[6]
ebleng[i2] = thisnode[7]
ecleng[i2] = thisnode[8]
esleng[i2] = thisnode[14]
edleng[i2] = thisnode[15]
i2 += 1
else:
for nodeno in range(len(theselons)):
alon, alat, alocdep, alocstr, alocdip, anID, aaleng, ableng, acleng, aused_TO, aused_tmp, atrimmedAA, newnodes, anydata, asleng, adleng = loops.loop1(theselons, theselats, testarea, slab, depgrid, strgrid, dipgrid, slab1query, theseevents, seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID, AA_data, TR_data, maxdist, maxthickness, minstk, tomo_sets, meanBA, slab1guide, grid, slab1data, dipthresh, datainfo, nodeinfo, nodeno)
if anydata:
lons1[i2] = alon
lats1[i2] = alat
deps1[i2] = alocdep
strs1[i2] = alocstr
dips1[i2] = alocdip
nIDs1[i2] = anID
aleng[i2] = aaleng
bleng[i2] = ableng
cleng[i2] = acleng
sleng[i2] = asleng
dleng[i2] = adleng
nused_TO = aused_TO
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = aused_tmp
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = atrimmedAA
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if len(newnodes)>0:
if allnewnodes is not None:
allnewnodes = np.vstack((allnewnodes,newnodes))
else:
allnewnodes = newnodes
if not anydata and np.isfinite(alocdep):
elons1[i2] = alon
elats1[i2] = alat
edeps1[i2] = alocdep
estrs1[i2] = alocstr
edips1[i2] = alocdip
enIDs1[i2] = anID
ealeng[i2] = aaleng
ebleng[i2] = ableng
ecleng[i2] = acleng
esleng[i2] = asleng
edleng[i2] = adleng
i2 += 1
lons1 = lons1[lons1>-999]
lats1 = lats1[lats1>-999]
deps1 = deps1[(deps1>-999)|np.isnan(deps1)]
strs1 = strs1[strs1>-999]
dips1 = dips1[dips1>-999]
nIDs1 = nIDs1[nIDs1>-999]
aleng = aleng[aleng>-999]
bleng = bleng[bleng>-999]
cleng = cleng[cleng>-999]
sleng = sleng[sleng>-999]
dleng = dleng[dleng>-999]
elons1 = elons1[edleng>-999]
elats1 = elats1[edleng>-999]
edeps1 = edeps1[(edeps1>-999)|np.isnan(edeps1)]
estrs1 = estrs1[edleng>-999]
edips1 = edips1[edleng>-999]
enIDs1 = enIDs1[edleng>-999]
ealeng = ealeng[edleng>-999]
ebleng = ebleng[edleng>-999]
ecleng = ecleng[edleng>-999]
esleng = esleng[edleng>-999]
edleng = edleng[edleng>-999]
testdf = pd.DataFrame({'lon':lons1,'lat':lats1,'depth':deps1,'strike':strs1,'dip':dips1,'id':nIDs1,'alen':aleng,'blen':bleng,'clen':cleng,'slen':sleng,'dlen':dleng})
testdf.to_csv('firstloop.csv',header=True,index=False,na_rep=np.nan)
if allnewnodes is not None:
theseIDs = []
for i in range(len(allnewnodes)):
if allnewnodes[i,1]>0:
thisnID = int('%i%i'%(allnewnodes[i,0]*10,allnewnodes[i,1]*10))
else:
thisnID = int('%i0%i'%(allnewnodes[i,0]*10,allnewnodes[i,1]*-10))
theseIDs.append(thisnID)
newlonsdf1 = pd.DataFrame({'lon':allnewnodes[:,0],'lat':allnewnodes[:,1],'nID':theseIDs})
newlonsdf = newlonsdf1.drop_duplicates(['nID'])
theselons = newlonsdf['lon'].values
theselats = newlonsdf['lat'].values
if grid == 0.2:
grid2 = 0.1
elif grid == 0.1:
grid2 = 0.05
else:
grid2 = grid
slab1guide,slab1query = s2f.makeReference(slab1data,theselons,theselats,grid2,printtest,slab)
newlats = []
newlons = []
newdeps = []
newstrs = []
newdips = []
newnIDs = []
newalen = []
newblen = []
newclen = []
newslen = []
newdlen = []
enewlats = []
enewlons = []
enewdeps = []
enewstrs = []
enewdips = []
enewnIDs = []
enewalen = []
enewblen = []
enewclen = []
enewslen = []
enewdlen = []
if pooling:
indices = range(len(theselons))
pool1 = Pool(args.nCores)
partial_loop1 = partial(loops.loop1, theselons, theselats, testarea, slab,
depgrid, strgrid, dipgrid, slab1query, eventlist,
seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID,
AA_data, TR_data, maxdist, maxthickness, minstk,
tomo_sets, meanBA,slab1guide,grid,slab1data,dipthresh,datainfo,nodeinfo)
pts = pool1.map(partial_loop1, indices)
pool1.close()
pool1.join()
for i in range(len(indices)):
thisnode = pts[i]
if thisnode[13]:
newlons.append(thisnode[0])
newlats.append(thisnode[1])
newdeps.append(thisnode[2])
newstrs.append(thisnode[3])
newdips.append(thisnode[4])
newnIDs.append(thisnode[5])
newalen.append(thisnode[6])
newblen.append(thisnode[7])
newclen.append(thisnode[8])
newslen.append(thisnode[14])
newdlen.append(thisnode[15])
nused_TO = thisnode[9]
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = thisnode[10]
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = thisnode[11]
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if not thisnode[13] and np.isfinite(thisnode[2]):
enewlons.append(thisnode[0])
enewlats.append(thisnode[1])
enewdeps.append(thisnode[2])
enewstrs.append(thisnode[3])
enewdips.append(thisnode[4])
enewnIDs.append(thisnode[5])
enewalen.append(thisnode[6])
enewblen.append(thisnode[7])
enewclen.append(thisnode[8])
enewslen.append(thisnode[14])
enewdlen.append(thisnode[15])
else:
for nodeno in range(len(theselons)):
alon, alat, alocdep, alocstr, alocdip, anID, aalen, ablen, aclen, aused_TO, aused_tmp, atrimmedAA, newnodes, anydata, aslen, adlen = loops.loop1(theselons, theselats, testarea, slab, depgrid, strgrid, dipgrid, slab1query, eventlist, seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID, AA_data, TR_data, maxdist, maxthickness, minstk, tomo_sets, meanBA, slab1guide, grid, slab1data, dipthresh, datainfo, nodeinfo, nodeno)
if anydata:
newlons.append(alon)
newlats.append(alat)
newdeps.append(alocdep)
newstrs.append(alocstr)
newdips.append(alocdip)
newnIDs.append(anID)
newalen.append(aalen)
newblen.append(ablen)
newclen.append(aclen)
newslen.append(aslen)
newdlen.append(adlen)
nused_TO = aused_TO
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = aused_tmp
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = atrimmedAA
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if not anydata and np.isfinite(alocdep):
enewlons.append(alon)
enewlats.append(alat)
enewdeps.append(alocdep)
enewstrs.append(alocstr)
enewdips.append(alocdip)
enewnIDs.append(anID)
enewalen.append(aalen)
enewblen.append(ablen)
enewclen.append(aclen)
enewslen.append(aslen)
enewdlen.append(adlen)
#np.savetxt('%s_diptest.csv'%slab, allnewnodes, header='lon,lat,depth,strike,dip',fmt='%.2f', delimiter=',',comments='')
if printtest:
fig = plt.figure(figsize=(20, 10))
ax1 = fig.add_subplot(131)
con = ax1.scatter(lons1,lats1,c=dips1,s=10,edgecolors='none',cmap='plasma')
ax1.set_ylabel('Latitude')
ax1.axis('equal')
plt.grid()
title = 'Diptest'
ax1.set_title(title)
cbar = fig.colorbar(con)
cbar.set_label('Dip')
ax2 = fig.add_subplot(132)
con = ax2.scatter(allnewnodes[:,0], allnewnodes[:,1],c=allnewnodes[:,1],s=10,edgecolors='none',cmap='plasma')
ax2.set_xlabel('Longitude')
ax2.set_ylabel('Latitude')
ax2.axis('equal')
plt.grid()
cbar = fig.colorbar(con)
cbar.set_label('Dip')
ax3 = fig.add_subplot(133)
con = ax3.scatter(newlons, newlats,c=newdips,s=10,edgecolors='none',cmap='plasma')
ax3.set_xlabel('Longitude')
ax3.set_ylabel('Latitude')
ax3.axis('equal')
plt.grid()
cbar = fig.colorbar(con)
cbar.set_label('Dip')
figtitle = 'diptest.png'
fig.savefig(figtitle)
plt.close()
lons1 = np.append(lons1, [newlons])
lats1 = np.append(lats1, [newlats])
deps1 = np.append(deps1, [newdeps])
strs1 = np.append(strs1, [newstrs])
dips1 = np.append(dips1, [newdips])
nIDs1 = np.append(nIDs1, [newnIDs])
aleng = np.append(aleng, [newalen])
bleng = np.append(bleng, [newblen])
cleng = np.append(cleng, [newclen])
sleng = np.append(sleng, [newslen])
dleng = np.append(dleng, [newdlen])
elons1 = np.append(elons1, [enewlons])
elats1 = np.append(elats1, [enewlats])
edeps1 = np.append(edeps1, [enewdeps])
estrs1 = np.append(estrs1, [enewstrs])
edips1 = np.append(edips1, [enewdips])
enIDs1 = np.append(enIDs1, [enewnIDs])
ealeng = np.append(ealeng, [enewalen])
ebleng = np.append(ebleng, [enewblen])
ecleng = np.append(ecleng, [enewclen])
esleng = np.append(esleng, [enewslen])
edleng = np.append(edleng, [enewdlen])
#print ('lon',len(elons1),'lat',len(elats1),'ogdep',len(edeps1),'ogstr',len(estrs1),'ogdip',len(edips1),'nID',len(enIDs1),'alen',len(ealeng),'blen',len(ebleng),'clen',len(ecleng),'slen',len(esleng),'dlen',len(edleng))
emptynodes = pd.DataFrame({'lon':elons1,'lat':elats1,'ogdep':edeps1,'ogstr':estrs1,'ogdip':edips1,'nID':enIDs1,'alen':ealeng,'blen':ebleng,'clen':ecleng,'slen':esleng,'dlen':edleng})
#emptynodes.to_csv('emptynodes.csv',header=True,index=False)
refdeps = pd.DataFrame({'lon':lons1, 'lat':lats1, 'ogdep':deps1})
if global_average:
''' # need to fix this after adjusting based on BA depth at trench
AA_global['depthtest'] = (AA_global['depth'].values*100).astype(int)
for index, row in elistAA.iterrows():
depthAA = row['depth']
depthtestAA = int(100*row['depth'])
thisdepth = AA_global[AA_global.depthtest == depthtestAA]
uncAA = thisdepth['unc'].values[0]
elistAA.loc[elistAA.depth == depthAA, 'unc'] = uncAA*2
'''
elistAA['unc'] = 10.0
elistcuts.append(elistAA)
eventlist2 = pd.concat(elistcuts,sort=True)
eventlist = eventlist2.reset_index(drop=True)
del eventlist2
eventlist = eventlist.drop_duplicates(['ID'])
eventlist = eventlist.reset_index(drop=True)
# Remove first line of zeros
used_TO = used_TO[~np.all(used_TO ==0, axis=1)]
used_all = used_all[~np.all(used_all ==0, axis=1)]
'''Section 3: Calculate tomography uncertainties
Here we use the output from the first loop to calculate tomography uncertainties.
For each tomography dataset, we calculate the standard deviation of the distribution of "differences".
We apply this standard deviation as the uncertainty value for each tomography datum from that dataset.
'''
print("Start Section 3 of 7: Assigning tomography uncertainties")
if tomo:
for idx, src in enumerate(tomo_sets):
tomog = used_TO[:][used_TO[:, 1] == idx]
tmp_std = np.std(tomog[:, 0])
if tmp_std > 40.:
tmp_std = 40.
elif tmp_std < 15.:
tmp_std = 15.
elif np.isnan(tmp_std):
tmp_std = 40
eventlist['unc'][eventlist['src'] == src] = tmp_std
'''Section 4: Second loop
The purpose of this loop is to determine a set of "pre-shifted" slab points that do not utilize receiver function data.
This output dataset will represent a transition from slab surface at shallow depths to slab center at deeper depths.
The only output from this loop is an array of the form [ lat lon dep unc nodeID ]
'''
print("Start Section 4 of 7: Second loop")
bzlons, bzlats, bzdeps, stds2, nIDs2 = [], [], [], [], []
lats2, lons2, str2, dip2, centsurf = [], [], [], [], []
bilats, bilons, binods, bistds = [], [], [], []
biindx, bistrs, bidips, bideps = [], [], [], []
baleng, bbleng, bcleng, onlyto = [], [], [], []
rlist = pd.DataFrame()
if pooling:
pool2 = Pool(args.nCores)
npass = args.nCores
partial_loop2 = partial(loops.loop2, testarea, lons1, lats1, nIDs1, deps1, strs1, dips1, used_all, eventlist, sdr, ddr, seismo_thick, slab, maxthickness, rlist, mindip, aleng, bleng, cleng)
indices = range(len(lats1))
pts2 = pool2.map(partial_loop2, indices)
pool2.close()
pool2.join()
for i in range(len(indices)):
thisnode = pts2[i]
if np.isfinite(thisnode[0]):
bzlons.append(thisnode[0])
bzlats.append(thisnode[1])
bzdeps.append(thisnode[2])
stds2.append(thisnode[3])
nIDs2.append(thisnode[4])
lats2.append(thisnode[5])
lons2.append(thisnode[6])
str2.append(thisnode[7])
dip2.append(thisnode[8])
centsurf.append(thisnode[9])
baleng.append(thisnode[20])
bbleng.append(thisnode[21])
bcleng.append(thisnode[22])
onlyto.append(thisnode[23])
if np.isfinite(thisnode[10]):
bilats.append(thisnode[10])
bilons.append(thisnode[11])
binods.append(thisnode[12])
bistds.append(thisnode[13])
biindx.append(thisnode[14])
bistrs.append(thisnode[15])
bidips.append(thisnode[16])
bideps.append(thisnode[17])
rlist = thisnode[18]
if len(rlist) > 0:
removeIDs = np.array(rlist.ID)
thisID = np.ones(len(removeIDs))*thisnode[4]
removearray = list(zip(thisID, removeIDs))
removeIDID = np.array(removearray)
used_all = used_all[~(np.in1d(used_all[:, 1], removeIDID) & np.in1d(used_all[:, 0], thisID))]
multi = thisnode[19]
if len(multi) > 0:
premulti = pd.concat([premulti, multi],sort=True)
del pts2
else:
npass = 1
for nodeno in range(len(lats1)):
bpeak_lon, bpeak_lat, bpeak_depth, bstd, bnID, blat, blon, bcstr, bcdip, bcentsurf, bbilats, bbilons, bbinods, bbistds, bbiindx, bbistrs, bbidips, bbideps, brlist, bpremulti, alen, blen, clen, onlyt = loops.loop2(testarea, lons1, lats1, nIDs1, deps1, strs1, dips1, used_all, eventlist, sdr, ddr, seismo_thick, slab, maxthickness, rlist, mindip, aleng, bleng, cleng, nodeno)
if np.isfinite(bpeak_lon):
bzlons.append(bpeak_lon)
bzlats.append(bpeak_lat)
bzdeps.append(bpeak_depth)
stds2.append(bstd)
nIDs2.append(bnID)
lats2.append(blat)
lons2.append(blon)
str2.append(bcstr)
dip2.append(bcdip)
centsurf.append(bcentsurf)
baleng.append(alen)
bbleng.append(blen)
bcleng.append(clen)
onlyto.append(onlyt)
if np.isfinite(bbilats):
bilats.append(bbilats)
bilons.append(bbilons)
binods.append(bbinods)
bistds.append(bbistds)
biindx.append(bbiindx)
bistrs.append(bbistrs)
bidips.append(bbidips)
bideps.append(bbideps)
rlist = brlist
if len(rlist) > 0:
removeIDs = np.array(rlist.ID)
thisID = np.ones(len(removeIDs))*bnID
removearray = list(zip(thisID, removeIDs))
removeIDID = np.array(removearray)
used_all = used_all[~(np.in1d(used_all[:, 1], removeIDID) & np.in1d(used_all[:, 0], thisID))]
multi = bpremulti
if len(multi) > 0:
premulti = pd.concat([premulti, multi],sort=True)
tmp_res = pd.DataFrame({'bzlon':bzlons,'bzlat':bzlats,'depth':bzdeps,'stdv':stds2,'nID':nIDs2,'lat':lats2,'lon':lons2,'ogstr':str2,'ogdip':dip2,'centsurf':centsurf,'alen':baleng,'blen':bbleng,'clen':bcleng,'onlyto':onlyto})
for j in range(len(bilats)):
lon = bilons[j]
lat = bilats[j]
nID = binods[j]
stdv = bistds[j]
stk = bistrs[j]
dep = bideps[j]
dip = bidips[j]
if dip <= mindip:
peak_depth = s2f.findMultiDepth(lon, lat, nID, tmp_res, grid, premulti, stk, slab, dep, alen, printtest)
peak_lon = lon
peak_lat = lat
else:
peak_lon, peak_lat, peak_depth = s2f.findMultiDepthP(lon, lat, nID, tmp_res, grid, premulti, stk, slab, dep, dip, alen, printtest)
tmp_res.loc[tmp_res.nID == nID, 'bzlon'] = peak_lon
tmp_res.loc[tmp_res.nID == nID, 'bzlat'] = peak_lat
tmp_res.loc[tmp_res.nID == nID, 'depth'] = peak_depth
tmp_res = s2f.addGuidePoints(tmp_res, slab)
if slab == 'sol':
tmp_res = tmp_res[(tmp_res.bzlon>142) & (tmp_res.bzlon<164)]
if slab == 'sul':
tmp_res = tmp_res[(tmp_res.bzlon<123.186518923) | (tmp_res.depth<100)]
tmp_res = tmp_res[(tmp_res.bzlon<122.186518923) | (tmp_res.depth<200)]
# Save data used to file
used_IDs = used_all[:, 1]
used_data = eventlist[eventlist['ID'].isin(used_IDs)]
used_data = used_data[['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', 'S1', 'D1', 'R1', 'S2', 'D2', 'R2', 'src']]
used_data = used_data.drop_duplicates(['ID'])
used_data.loc[used_data.lon < 0, 'lon']+=360
if slab == 'hel':
used_data.loc[used_data.etype == 'CP', 'etype']='RF'
used_data.to_csv(dataFile, header=True, index=False, float_format='%0.2f', na_rep = float('nan'), chunksize=100000)
#tmp_res.to_csv('nodetest.csv', header=True, index=False, float_format='%0.2f', na_rep = float('nan'), chunksize=100000)
'''Section 5: Calculate shifts
Here we use the output of the second loop to calculate shifting locations for non-RF results.
A user-specified lithospheric thickness can be read in or lithosphere thickness will be calculated using the nearest oceanic plate age.
The taper and fracshift is set in the paramter file for each subduction zone. fracshift was determined via testing each individual
subduztion zone to match seismicity. Shift direction is determined by the strike and dip of a surface created using the output from the second loop.
A clipping mask is also created in this section using the shifted output data.
'''
print("Start Section 5 of 7: Calculate shifts")
# Calculate shift for each node
print(" Calculating shift...")
surfnode = 0.5
data0 = tmp_res[(tmp_res.stdv > -0.000001)&(tmp_res.stdv < 0.000001)]
tmp_res = tmp_res[(tmp_res.stdv < -0.000001)|(tmp_res.stdv > 0.000001)]
if use_box == 'yes':
if lonmin<0:
lonmin+=360
if lonmax<0:
lonmax+=360
TR_data = TR_data[(TR_data.lon<lonmax)&(TR_data.lon>lonmin)]
TR_data = TR_data[(TR_data.lat<latmax)&(TR_data.lat>latmin)]
TR_data = TR_data.reset_index(drop=True)
# Read in age grid files
ages = gmt.GMTGrid.load(agesFile)
ages_error = gmt.GMTGrid.load(ageerrorsFile)
shift_out, maxthickness = s2f.slabShift_noGMT(tmp_res, node, T, TR_data, seismo_thick, taper, ages, ages_error, filt, slab, maxthickness, grid, 'bzlon', 'bzlat', 'depth', fracS, npass, meanBA, printtest, kdeg, knot_no, rbfs, use_box)
del ages
del ages_error
tmp_res['pslon'] = tmp_res['lon'].values*1.0
tmp_res['pslat'] = tmp_res['lat'].values*1.0
tmp_res['psdepth'] = tmp_res['depth'].values*1.0
tmp_res = tmp_res[['pslon', 'pslat', 'bzlon', 'bzlat', 'psdepth', 'stdv', 'nID', 'ogstr', 'ogdip', 'centsurf', 'alen', 'blen', 'clen']]
shift_out = shift_out.merge(tmp_res)
shift_out.loc[shift_out.pslon < 0, 'pslon']+=360
shift_out['avstr'] = np.nan
shift_out['avdip'] = np.nan
shift_out['avrke'] = np.nan
'''Section 6: Third loop
The purpose of this loop is to produce the final location measurements for the slab.
Here we edit the input data by adding the shift to the depths, then calculate a PDF with receiver functions included.
The only output from this loop is a 10 column array with all results necessary to build the output.
Output is of the format [ lat lon dep unc shift_mag shift_unc avg_str avg_dip avg_rak pre-shift_dep pre-shift_str pre-shift_dip nodeID ]
'''
print("Start Section 6 of 7: Third (final) loop")
bilats, bilons, binods, bistds = [], [], [], []
biindx, bistrs, bidips, bideps = [], [], [], []
if pooling:
pool3 = Pool(args.nCores)
partial_loop3 = partial(loops.loop3, shift_out, testarea, used_all, eventlist, sdr, ddr, seismo_thick, these_params, slab, maxthickness, mindip, taper)
indices = shift_out['nID'].values
pts3 = pool3.map(partial_loop3, indices)
pool3.close()
pool3.join()
for i in range(len(indices)):
thisnode = pts3[i]
if np.isfinite(thisnode[0]):
nID = thisnode[13]
shift_out.loc[shift_out.nID == nID, 'depth'] = thisnode[0]
shift_out.loc[shift_out.nID == nID, 'stdv'] = thisnode[1]
shift_out.loc[shift_out.nID == nID, 'avstr'] = thisnode[2]
shift_out.loc[shift_out.nID == nID, 'avdip'] = thisnode[3]
shift_out.loc[shift_out.nID == nID, 'avrke'] = thisnode[4]
shift_out.loc[shift_out.nID == nID, 'lon'] = thisnode[15]
shift_out.loc[shift_out.nID == nID, 'lat'] = thisnode[16]
if
|
np.isfinite(thisnode[5])
|
numpy.isfinite
|
"""
Collects some simple classes for estimation in Python, including: Tobit,....
"""
import os
import numpy as np
import pandas as pd
from scipy import stats
from scipy import optimize
# Plotting features require:
import matplotlib as mpl
import matplotlib.pyplot as plt
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
plt.style.use('seaborn-whitegrid')
mpl.style.use('seaborn')
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
class Tobit():
"""
"""
def __init__(self,x=None,y=None,**kwargs):
self.x = x
self.y = y
self.base_par()
self.upd_par(kwargs)
def base_par(self):
# Options to pass to minimizer
self.intercept=True
self.method = 'Nelder-Mead'
self.jac = None
self.hess = None
self.hessp = None
self.bounds=None
self.constraints=None
self.tol=None
self.callback=None
self.options=None
def upd_par(self,kwargs):
for key,value in kwargs.items():
setattr(self,key,value)
@staticmethod
def tobit_ll(y,x,theta,intercept=True):
if intercept:
X = np.append(np.ones([x.shape[0],1]),x,axis=1)
else:
X = x
return -sum((y==0) * np.log(1-stats.norm.cdf(np.matmul(X,theta[:-1])/theta[-1])) + (y>0) * np.log(stats.norm.pdf((y-np.matmul(X,theta[:-1]))/theta[-1])/theta[-1]))
@staticmethod
def tobit_sim(theta,X=None,N=100):
if X:
y = np.maximum(np.matmul(X,theta[:-1])+np.random.normal(0,theta[-1],X.shape[0]),0)
else:
y = np.maximum(np.matmul(np.append(np.ones([N,1]), np.random.normal(0,1,[N,theta.shape-1]),axis=1),theta[:-1])+np.random.normal(0,theta[-1],N),0)
@staticmethod
def tobit_novariance(theta,x,intercept=True):
if intercept:
return np.maximum(np.matmul(np.append(np.ones([x.shape[0],1]), x,axis=1), theta[:-1]),0)
else:
return np.maximum(np.matmul(x,theta[:-1]),0)
@staticmethod
def tobit_censor_prob(theta,x,intercept=True):
if intercept:
return 1-stats.norm.cdf(np.matmul(np.append(np.ones([x.shape[0],1]),x,axis=1),theta[:-1])/theta[-1])
else:
return 1-stats.norm.cdf(np.matmul(x,theta[:-1])/theta[-1])
@staticmethod
def tobit_cond_exp(theta,x,intercept=True):
if intercept:
X = np.append(np.ones([x.shape[0],1]),x,axis=1)
else:
X = x
return
|
np.matmul(X,theta[:-1])
|
numpy.matmul
|
"""Tests for `kerasadf.layers.convolutional`. """
from __future__ import absolute_import, division, print_function
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import given, settings
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
import kerasadf.layers
from .strategies import batched_float_array
# convolution layer tests
@settings(deadline=None)
@pytest.mark.parametrize("padding", ["same", "valid"])
@given(
st.integers(min_value=1, max_value=64),
st.integers(min_value=1, max_value=8),
st.integers(min_value=1, max_value=8),
batched_float_array(min_data_dims=2, max_data_dims=2),
)
def test_convolution_1d(padding, filters, kernel_size, strides, x):
K.clear_session()
means, covariances, mode = x
strides = min(strides, means.shape[1])
kernel_size = min(kernel_size, means.shape[1])
im = Input(shape=means.shape[1:], dtype=means.dtype)
ic = Input(shape=covariances.shape[1:], dtype=covariances.dtype)
layer = kerasadf.layers.Conv1D(
filters, kernel_size, strides, padding, mode=mode
)
ms, cs = layer.compute_output_shape([im.shape, ic.shape])
om, oc = layer([im, ic])
model = Model([im, ic], [om, oc])
means_out, covariances_out = model.predict([means, covariances])
if padding == "same":
out_size = np.ceil(means.shape[1] / strides)
elif padding == "valid":
out_size = np.ceil((means.shape[1] - kernel_size + 1) / strides)
assert means.shape[0] == means_out.shape[0]
assert out_size == means_out.shape[1]
assert filters == means_out.shape[2]
assert ms.as_list() == om.shape.as_list()
if mode == "diag":
assert covariances.shape[0] == covariances_out.shape[0]
assert out_size == covariances_out.shape[1]
assert filters == covariances_out.shape[2]
elif mode == "half":
assert covariances.shape[0] == covariances_out.shape[0]
assert covariances.shape[1] == covariances_out.shape[1]
assert out_size == covariances_out.shape[2]
assert filters == covariances_out.shape[3]
elif mode == "full":
assert covariances.shape[0] == covariances_out.shape[0]
assert out_size == covariances_out.shape[1]
assert filters == covariances_out.shape[2]
assert out_size == covariances_out.shape[3]
assert filters == covariances_out.shape[4]
assert cs.as_list() == oc.shape.as_list()
# serialization and deserialization test
config = layer.get_config()
layer_from_config = kerasadf.layers.Conv1D.from_config(config)
layer_deserialized = kerasadf.layers.deserialize(
{"class_name": layer.__class__.__name__, "config": config}
)
assert kerasadf.layers.serialize(layer) == kerasadf.layers.serialize(
layer_from_config
)
assert kerasadf.layers.serialize(layer) == kerasadf.layers.serialize(
layer_deserialized
)
@settings(deadline=None)
@pytest.mark.parametrize("padding", ["same", "valid"])
@given(
st.integers(min_value=1, max_value=64),
st.integers(min_value=1, max_value=8),
st.integers(min_value=1, max_value=8),
batched_float_array(min_data_dims=2, max_data_dims=2),
)
def test_dilated_convolution_1d(
padding, filters, kernel_size, dilation_rate, x
):
K.clear_session()
means, covariances, mode = x
kernel_size = min(kernel_size, means.shape[1])
if kernel_size > 1:
dilation_rate = min(
dilation_rate,
int(np.floor((means.shape[1] - 1) / (kernel_size - 1))),
)
else:
dilation_rate = min(dilation_rate, means.shape[1])
print("kernel", kernel_size)
print("dilation", dilation_rate)
print("input", means.shape[1])
im = Input(shape=means.shape[1:], dtype=means.dtype)
ic = Input(shape=covariances.shape[1:], dtype=covariances.dtype)
layer = kerasadf.layers.Conv1D(
filters,
kernel_size,
1,
padding,
dilation_rate=dilation_rate,
mode=mode,
)
ms, cs = layer.compute_output_shape([im.shape, ic.shape])
om, oc = layer([im, ic])
model = Model([im, ic], [om, oc])
means_out, covariances_out = model.predict([means, covariances])
if padding == "same":
out_size = means.shape[1]
elif padding == "valid":
out_size = np.ceil(
(means.shape[1] - (kernel_size - 1) * dilation_rate)
)
assert means.shape[0] == means_out.shape[0]
assert out_size == means_out.shape[1]
assert filters == means_out.shape[2]
assert ms.as_list() == om.shape.as_list()
if mode == "diag":
assert covariances.shape[0] == covariances_out.shape[0]
assert out_size == covariances_out.shape[1]
assert filters == covariances_out.shape[2]
elif mode == "half":
assert covariances.shape[0] == covariances_out.shape[0]
assert covariances.shape[1] == covariances_out.shape[1]
assert out_size == covariances_out.shape[2]
assert filters == covariances_out.shape[3]
elif mode == "full":
assert covariances.shape[0] == covariances_out.shape[0]
assert out_size == covariances_out.shape[1]
assert filters == covariances_out.shape[2]
assert out_size == covariances_out.shape[3]
assert filters == covariances_out.shape[4]
assert cs.as_list() == oc.shape.as_list()
# serialization and deserialization test
config = layer.get_config()
layer_from_config = kerasadf.layers.Conv1D.from_config(config)
layer_deserialized = kerasadf.layers.deserialize(
{"class_name": layer.__class__.__name__, "config": config}
)
assert kerasadf.layers.serialize(layer) == kerasadf.layers.serialize(
layer_from_config
)
assert kerasadf.layers.serialize(layer) == kerasadf.layers.serialize(
layer_deserialized
)
@settings(deadline=None)
@pytest.mark.parametrize("padding", ["same", "valid"])
@given(
st.integers(min_value=1, max_value=64),
st.tuples(
st.integers(min_value=1, max_value=8),
st.integers(min_value=1, max_value=8),
)
| st.integers(min_value=1, max_value=8),
st.tuples(
st.integers(min_value=1, max_value=8),
st.integers(min_value=1, max_value=8),
)
| st.integers(min_value=1, max_value=8),
batched_float_array(min_data_dims=3, max_data_dims=3),
)
def test_convolution_2d(padding, filters, kernel_size, strides, x):
K.clear_session()
means, covariances, mode = x
if isinstance(strides, tuple):
strides = np.minimum(strides, means.shape[1:3])
else:
strides = min(strides, min(means.shape[1], means.shape[2]))
if isinstance(kernel_size, tuple):
kernel_size = np.minimum(kernel_size, means.shape[1:3])
else:
kernel_size = min(kernel_size, min(means.shape[1], means.shape[2]))
im = Input(shape=means.shape[1:], dtype=means.dtype)
ic = Input(shape=covariances.shape[1:], dtype=covariances.dtype)
layer = kerasadf.layers.Conv2D(
filters, kernel_size, strides, padding, mode=mode
)
ms, cs = layer.compute_output_shape([im.shape, ic.shape])
om, oc = layer([im, ic])
model = Model([im, ic], [om, oc])
means_out, covariances_out = model.predict([means, covariances])
if padding == "same":
out_size = np.ceil(np.asarray(means.shape[1:3]) / strides)
elif padding == "valid":
out_size = np.ceil(
(np.asarray(means.shape[1:3]) - kernel_size + 1) / strides
)
assert means.shape[0] == means_out.shape[0]
assert out_size[0] == means_out.shape[1]
assert out_size[1] == means_out.shape[2]
assert filters == means_out.shape[3]
assert ms.as_list() == om.shape.as_list()
if mode == "diag":
assert covariances.shape[0] == covariances_out.shape[0]
assert out_size[0] == covariances_out.shape[1]
assert out_size[1] == covariances_out.shape[2]
assert filters == covariances_out.shape[3]
elif mode == "half":
assert covariances.shape[0] == covariances_out.shape[0]
assert covariances.shape[1] == covariances_out.shape[1]
assert out_size[0] == covariances_out.shape[2]
assert out_size[1] == covariances_out.shape[3]
assert filters == covariances_out.shape[4]
elif mode == "full":
assert covariances.shape[0] == covariances_out.shape[0]
assert out_size[0] == covariances_out.shape[1]
assert out_size[1] == covariances_out.shape[2]
assert filters == covariances_out.shape[3]
assert out_size[0] == covariances_out.shape[4]
assert out_size[1] == covariances_out.shape[5]
assert filters == covariances_out.shape[6]
assert cs.as_list() == oc.shape.as_list()
# serialization and deserialization test
config = layer.get_config()
layer_from_config = kerasadf.layers.Conv2D.from_config(config)
layer_deserialized = kerasadf.layers.deserialize(
{"class_name": layer.__class__.__name__, "config": config}
)
assert kerasadf.layers.serialize(layer) == kerasadf.layers.serialize(
layer_from_config
)
assert kerasadf.layers.serialize(layer) == kerasadf.layers.serialize(
layer_deserialized
)
@settings(deadline=None)
@pytest.mark.parametrize("padding", ["same", "valid"])
@given(
st.integers(min_value=1, max_value=64),
st.tuples(
st.integers(min_value=1, max_value=8),
st.integers(min_value=1, max_value=8),
)
| st.integers(min_value=1, max_value=8),
st.tuples(
st.integers(min_value=1, max_value=8),
st.integers(min_value=1, max_value=8),
)
| st.integers(min_value=1, max_value=8),
batched_float_array(min_data_dims=3, max_data_dims=3),
)
def test_dilated_convolution_2d(
padding, filters, kernel_size, dilation_rate, x
):
K.clear_session()
means, covariances, mode = x
if isinstance(kernel_size, tuple):
kernel_size =
|
np.minimum(kernel_size, means.shape[1:3])
|
numpy.minimum
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
"""
==============================================================
Hierarchical clustering (:mod:`pyannote.core.utils.hierarchy`)
==============================================================
"""
from typing import Text, Callable, List, Tuple, Union
from collections import Counter
from inspect import signature
import numpy as np
import scipy.cluster.hierarchy
from .distance import to_condensed
from .distance import to_squared
from .distance import l2_normalize
from .distance import pdist
from .distance import cdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
def linkage(X, method="single", metric="euclidean", **kwargs):
"""Same as scipy.cluster.hierarchy.linkage with more metrics and methods
"""
if method == "pool":
return pool(X, metric=metric, **kwargs)
# corner case when using non-euclidean distances with methods
# designed for the euclidean distance
if metric != 'euclidean' and method in ['centroid', 'median', 'ward']:
# Those 3 methods only work with 'euclidean' distance.
# Therefore, one has to unit-normalized embeddings before
# comparison in case they were optimized for 'cosine' (or 'angular')
# distance.
X = l2_normalize(X)
metric = 'euclidean'
distance = pdist(X, metric=metric)
return scipy.cluster.hierarchy.linkage(
distance, method=method, metric=metric, **kwargs
)
def _average_pooling_func(
u: int, v: int, S: np.ndarray = None, C: np.ndarray = None, **kwargs,
) -> np.ndarray:
"""Compute average of newly merged cluster
Parameters
----------
u : int
Cluster index.
v : int
Cluster index.
C : (2 x n_observations - 1, dimension) np.ndarray
Cluster average.
S : (2 x n_observations - 1, ) np.ndarray
Cluster size.
Returns
-------
Cuv : (dimension, ) np.ndarray
Average of newly formed cluster.
"""
return (C[u] * S[u] + C[v] * S[v]) / (S[u] + S[v])
def _centroid_pooling_func(
u: int,
v: int,
X: np.ndarray = None,
d: np.ndarray = None,
K: np.ndarray = None,
**kwargs,
) -> np.ndarray:
"""Compute centroid of newly merged cluster
Parameters
----------
u : int
Cluster index.
v : int
Cluster index.
X : (n_observations, dimension) np.ndarray
Observations.
d : (n_observations, n_obversations) np.ndarray
Distance between observations.
K : (n_observations, ) np.ndarray, optional
Cluster assignment.
Returns
-------
Cuv : (dimension, ) np.ndarray
Centroid of newly formed cluster.
"""
u_or_v = np.where(np.isin(K, [u, v]))[0]
i = np.argmin(np.mean(d[u_or_v][:, u_or_v], axis=0))
return X[u_or_v[i]]
def propagate_constraints(
cannot_link: List[Tuple[int, int]], must_link: List[Tuple[int, int]]
):
# expand list of "cannot link" constraints thanks to the following rule
# (u != v) & (v == w) ==> u != w
cannot_link = set(tuple(sorted(uv)) for uv in cannot_link)
while True:
new_cannot_link = list()
for x, y in must_link:
for u, v in cannot_link:
ij = tuple(sorted({u, v}.symmetric_difference({x, y})))
if not ij:
msg = (
f"Found a conflict between 'must_link' and "
f"'cannot_link' constraints for pair ({u}, {v})."
)
raise ValueError(msg)
if len(ij) == 2 and ij not in cannot_link:
new_cannot_link.append(ij)
if new_cannot_link:
cannot_link.update(new_cannot_link)
else:
break
return list(cannot_link)
def pool(
X: np.ndarray,
metric: Text = "euclidean",
pooling_func: Union[Text, Callable] = "average",
cannot_link: List[Tuple[int, int]] = None,
must_link: List[Tuple[int, int]] = None,
must_link_method: Text = "both",
):
"""'pool' linkage
Parameters
----------
X : np.ndarray
(n_samples, dimension) obversations.
metric : {"euclidean", "cosine", "angular"}, optional
Distance metric. Defaults to "euclidean"
pooling_func: callable, optional
Defaults to "average".
cannot_link : list of pairs, optional
Pairs of indices of observations that cannot be linked. For instance,
[(1, 2), (5, 6)] means that first and second observations cannot end up
in the same cluster, as well as 5th and 6th obversations.
must_link : list of pairs, optional
Pairs of indices of observations that must be linked. For instance,
[(1, 2), (5, 6)] means that first and second observations must end up
in the same cluster, as well as 5th and 6th obversations.
must_link_method : {"merge", "propagate", "both"}, optional
Method used for taking "must link" constraints into account.
* use "merge" to initialize clusters by merging "must link" observations
before any other regular clustering iterations.
* use "propagate" to infer additional "cannot link" constraints by
applying the following propagation rule:
if u and v cannot be linked and v and w must be linked,
then u and w cannot be linked.
* use "both" to apply both methods.
Defaults to "both".
"""
if pooling_func == "average":
pooling_func = _average_pooling_func
elif pooling_func == "centroid":
pooling_func = _centroid_pooling_func
elif isinstance(pooling_func, Text):
msg = (
f"{pooling_func} pooling is not supported. Choose between "
f"'average' and 'centroid', or provide your own function."
)
raise ValueError(msg)
if cannot_link is None:
cannot_link = []
if must_link is None:
must_link = []
# obtain number of original observations
n, dimension = X.shape
# compute similarity matrix
d = pdist(X, metric=metric)
# K[j] contains the index of the cluster to which
# the jth observation is currently assigned
K = np.arange(n)
# S[k] contains the current size of kth cluster
S = np.zeros(2 * n - 1, dtype=np.int16)
S[:n] = 1
# C[k] contains the centroid of kth cluster
C = np.zeros((2 * n - 1, dimension))
# at the beginning, each observation is assigned to its own cluster
C[:n, :] = X
# clustering tree (aka dendrogram)
# Z[i, 0] and Z[i, 1] are merged at ith iteration
# Z[i, 2] is the distance between Z[i, 0] and Z[i, 1]
# Z[i, 3] is the total number of original observation in the newly formed cluster
Z = np.zeros((n - 1, 4))
# convert condensed pdist matrix for the `n` original observation to a
# condensed pdist matrix for the `2n-1` clusters (including the `n`
# original observations) that will exist at some point during the process.
D = np.infty * np.ones((2 * n - 1) * (2 * n - 2) // 2)
D[to_condensed(2 * n - 1, *to_squared(n, np.arange(n * (n - 1) // 2)))] = d
if "d" in signature(pooling_func).parameters:
d = squareform(d)
def merge(u, v, iteration, constraint=False):
"""Merge two clusters
Parameters
----------
u, v : int
Indices of clusters to merge.
iteration : int
Current clustering iteration.
constraint : bool
Set to True to indicate that this merge is coming from a 'must_link'
constraint. This will artificially set Z[iteration, 2] to 0.0.
Returns
-------
uv : int
Indices of resulting cluster.
Raises
------
"ValueError" in case of conflict between "must_link" and "cannot_link"
constraints.
"""
k = to_condensed(2 * n - 1, u, v)
if constraint and D[k] == np.infty:
w = u if u < n else v
msg = f"Found a conflict between 'must_link' and 'cannot_link' constraints for observation {w}."
raise ValueError(msg)
# keep track of ...
# ... which clusters are merged at this iteration
Z[iteration, 0] = v if S[v] > S[u] else v
Z[iteration, 1] = u if Z[iteration, 0] == v else v
# ... their distance
Z[iteration, 2] = 0.0 if constraint else D[k]
# ... the size of the newly formed cluster
Z[iteration, 3] = S[u] + S[v]
S[n + iteration] = S[u] + S[v]
# compute "representation" of newly formed cluster
C[n + iteration] = pooling_func(u, v, X=X, d=d, K=K, S=S, C=C)
# merged clusters are now empty...
S[u] = 0
S[v] = 0
# move observations of merged clusters into the newly formed cluster
K[K == u] = n + iteration
K[K == v] = n + iteration
# compute distance to newly formed cluster
# (only for clusters that still exists, i.e. those that are not empty)
empty = S[: n + iteration] == 0
k = to_condensed(2 * n - 1, n + iteration, np.arange(n + iteration)[~empty])
D[k] = cdist(
C[np.newaxis, n + iteration, :],
C[: n + iteration, :][~empty, :],
metric=metric,
)
# condensed indices of all (u, _) and (v, _) pairs
_u = to_condensed(2 * n - 1, u, np.arange(u))
u_ = to_condensed(2 * n - 1, u, np.arange(u + 1, n + iteration))
_v = to_condensed(2 * n - 1, v, np.arange(v))
v_ = to_condensed(2 * n - 1, v,
|
np.arange(v + 1, n + iteration)
|
numpy.arange
|
import itertools
import random
import csv
import math
import numpy as np
import pandas as pd
from joblib.memory import Memory
import scipy.integrate as integrate
from sklearn.preprocessing import LabelEncoder
from tfspn.SPN import SPN, Splitting
from tfspn.tfspn import ProductNode
from sympy.utilities.iterables import multiset_permutations
from IPython.display import display, Markdown
def query(spn, instance):
return np.exp(spn.root.eval(instance))
def predict_proba(spn, feature, instances):
return spn_predict_proba(spn, feature, instances)
def predict(spn, feature, instances):
return np.argmax(predict_proba(spn, feature, instances), axis = 1)
def get_variance(node, numFeatures):
return node.moment(2, numFeatures) - node.moment(1, numFeatures) ** 2
def printmd(string=''):
display(Markdown(str(string)))
def save_dataset(dataset, file_location):
values = dataset.data
targets = dataset.target.reshape(np.size(dataset.target), 1)
whole = np.append(values, targets, axis=1)
np.savetxt(file_location, whole, delimiter=",")
def learn_spn(dataset="data/iris", precision=25, independence=0.1, header=0, date=None, isotonic=False, histogram=True, types=False):
skiprows = [1] if types else []
df = pd.read_csv(dataset, delimiter=",", header=header, parse_dates=date, skiprows=skiprows)
df = df.dropna(axis=0, how='any')
featureNames = df.columns.values.tolist() if header == 0 else ["X_{}".format(i) for i in range(len(df.columns))]
dtypes = df.dtypes
if types:
featureTypes = []
families = []
with open(dataset, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
csvreader.__next__()
_types = csvreader.__next__()
for featureType in _types:
print(featureType)
if featureType == 'cat':
featureTypes.append('categorical')
if histogram:
families.append('histogram')
elif isotonic:
families.append('isotonic')
else:
families.append('piecewise')
elif featureType == 'con':
featureTypes.append('continuous')
families.append('piecewise' if not isotonic else 'isotonic')
elif featureType == 'dis':
featureTypes.append('discrete')
families.append('piecewise' if not isotonic else 'isotonic')
else:
featureTypes.append('unknown')
families.append('piecewise' if not isotonic else 'isotonic')
def to_featureTypes(types):
featureTypes = []
families = []
for featureType in types:
if featureType.kind == 'O':
featureTypes.append('categorical')
if histogram:
families.append('histogram')
elif isotonic:
families.append('isotonic')
else:
families.append('piecewise')
elif featureType.kind == 'f':
featureTypes.append('continuous')
families.append('piecewise' if not isotonic else 'isotonic')
elif featureType.kind == np.dtype('i'):
featureTypes.append('discrete')
families.append('piecewise' if not isotonic else 'isotonic')
else:
featureTypes.append('unknown')
families.append('piecewise' if not isotonic else 'isotonic')
return featureTypes, families
if not types:
featureTypes, families = to_featureTypes(dtypes)
data_dictionary = {
'features': [{"name": name, "family": family, "type": typ, 'pandas_type': dtypes[i]} for i, (name, family, typ) in enumerate(zip(featureNames, families, featureTypes))],
'num_entries': len(df)
}
# print(df.info())
idx = df.columns
for id, name in enumerate(idx):
if featureTypes[id] == 'categorical':
lb = LabelEncoder()
data_dictionary['features'][id]["encoder"] = lb
df[name] = df[name].astype('category')
df[name] = lb.fit_transform(df[name])
data_dictionary['features'][id]["values"] = lb.transform(lb.classes_)
if dtypes[id].kind == 'M':
df[name] = (df[name] - df[name].min()) / np.timedelta64(1,'D')
# print(df.head())
data = np.array(df)
# print(featureTypes)
spn = SPN.LearnStructure(data,
featureTypes = featureTypes,
featureNames = featureNames,
min_instances_slice=precision,
families=families,
row_split_method=Splitting.KmeansRDCRows(),
col_split_method=Splitting.RDCTest(threshold=independence))
spn.name = dataset
return spn, data_dictionary
def learn_with_cross_valid(search_grid, dataset="data/iris", header=0, date=None, isotonic=True):
spn, d = learn_spn(dataset=dataset, header=header, date=date, isotonic=isotonic)
valid = load_dataset(dataset + '.valid', d)
precisions = np.linspace(search_grid['pre'][0], search_grid['pre'][1], search_grid['pre'][2])
independencies = np.linspace(search_grid['ind'][0], search_grid['ind'][1], search_grid['ind'][2])
spns = np.array([[learn_spn(dataset=dataset, precision=i, independence=j, header=header, date=date, isotonic=isotonic)[0] for i in precisions] for j in independencies])
log_likelihood = np.array([[np.sum(s.root.eval(valid)) for s in spn] for spn in spns])
max_idx = np.unravel_index(np.argmax(log_likelihood), log_likelihood.shape)
print(max_idx)
return spns[max_idx], d, log_likelihood[max_idx]
def spn_query_id(spn, index, value, query=None):
if not query:
query = np.array([[np.nan] * spn.numFeatures])
query[:,index] = value
return spn.root.eval(query)
def get_moment(spn, query_id, moment=1, evidence=None, detail=1000):
root = spn.root
# find the marginalization and conditioning
marg_ids = [query_id]
if evidence is not None:
query_ids = np.isfinite(evidence)
query_ids[:,query_id] = True
marg_ids = np.where(np.any(query_ids, axis=0))[0]
marg_spn = spn.marginalize(marg_ids)
spn.root = marg_spn
mean = integrate.quad(lambda x: np.exp(spn_query_id(spn, query_id, x)) * x, spn.domains[query_id][0], spn.domains[query_id][1])[0]
spn.root = root
if moment == 1:
return mean
return integrate.quad(lambda x: np.exp(spn_query_id(spn, query_id, x)) * (x - mean) ** moment, spn.domains[query_id][0], spn.domains[query_id][1])[0]
def func_from_spn(spn, featureId):
size = spn.numFeatures
marg_spn = spn.marginalize([featureId])
query = np.zeros((1, size))
def func(x):
query[:,featureId] = x
return marg_spn.eval(query)
return func
def validate_feature(spn, featureId, precision=0.1):
if spn.families[featureId] == ('categorical'):
pass
else:
lw = spn.domains[featureId][0]
up = spn.domains[featureId][-1]
x_range = np.arange(lw, up-precision, precision)
z = np.array((1))
for x in x_range:
z = np.append(z, precision * np.exp(func_from_spn(spn, featureId)(x)))
return np.sum(z)
def get_feature_entropy(spn, featureId, precision=0.1, numeric=False):
lw = spn.domains[featureId][0]
up = spn.domains[featureId][-1]
if numeric:
x_range = np.arange(lw_0, up_0-precision, precision)
H_x = np.zeros((1,1))
for x in x_range:
H_x = np.sum(H_x, func_from_spn(spn, featureId)(x))
return -1 * H_x
else:
return integrate.quad(lambda x: -1 * (func_from_spn(spn, featureId)(x) * np.exp(func_from_spn(spn, featureId)(x))), lw, up)
def get_mutual_information(spn, featureIdx, precision=0.1, numeric=False):
lw_0 = spn.domains[featureIdx[0]][0]
lw_1 = spn.domains[featureIdx[1]][0]
up_0 = spn.domains[featureIdx[0]][-1]
up_1 = spn.domains[featureIdx[1]][-1]
def joined(spn, featureIdx):
size = spn.numFeatures
marg_spn = spn.marginalize(featureIdx)
query = np.zeros((1, size))
def func(x, y):
query[:, featureIdx[0]] = x
query[:, featureIdx[1]] = y
return marg_spn.eval(query)
return func
p_x = func_from_spn(spn, featureIdx[0])
p_y = func_from_spn(spn, featureIdx[1])
p_x_y = joined(spn, featureIdx)
mi = lambda x, y: np.exp(p_x_y(x, y)) * np.log2(np.exp(1)) * (p_x_y(x, y) - p_x(x) - p_y(y))
if numeric:
x_range = np.arange(lw_0, up_0-precision, precision)
y_range = np.arange(lw_1, up_1-precision, precision)
z = np.array([])
for x in x_range:
for y in y_range:
z = np.append(z, mi(x, y)*(precision*precision))
return np.sum(z)
else:
return integrate.dblquad(lambda y, x: mi(x,y), lw_0, up_0, lambda x: lw_1, lambda x: up_1)
def get_feature_decomposition(spn, feature, instance):
"""
spn: a valid tfspn.SPN
feature: the feature for which the influence should be computed
instance: a list of query instances over which the difference is to be computed
for each feature. The final reported weight for each feature is the
mean over all query instances
computes the log information difference, based on Robnik et.al.
log(p(y|A)) - log(p(y|A/a))
"""
marginalized_likelihood = [i for i in range(spn.numFeatures) if i != feature]
marg_likelihood = spn.marginalize(marginalized_likelihood)
influence = np.zeros((instance.shape[0], spn.numFeatures))
for i in range(spn.numFeatures):
if i != feature:
marginalize_decomposition = [j for j in range(spn.numFeatures) if j != i]
marginalize_all = [j for j in range(spn.numFeatures)
if j != i and j != feature]
marg_decomposition = spn.marginalize(marginalize_decomposition)
marg_all = spn.marginalize(marginalize_all)
influence[:,i] = spn.root.eval(instance) - marg_likelihood.eval(instance) - marg_decomposition.eval(instance) + marg_all.eval(instance)
return influence
def get_gradient(spn, instance, fixed_instances=[], step_size=0.0001):
"""
implements a quick and dirty approach to calculating gradients of the spn
"""
instances = np.repeat(instance, spn.numFeatures, axis=0)
h = np.identity(spn.numFeatures) * step_size
h[fixed_instances] = np.zeros(spn.numFeatures)
query_minus = instances - h
query_plus = instances + h
prob_minus = np.exp(spn.root.eval(query_minus))
prob_plus = np.exp(spn.root.eval(query_plus))
return (prob_plus-prob_minus)/step_size*2
def get_spn_depth(node, depth=0):
if node.leaf:
return depth
else:
return max((get_spn_depth(node, depth=depth+1) for node in node.children))
def get_strongly_related_features(spn):
"""
spn: a valid tfspn.SPN object
Tries to see how strongly two features are connected by analyzing the level
of the spn that generally seperates them. The interconnection is measured
between [0,1], 0 meaning that they are seperated as soon as possible and 1
that the two features are never separated until the final split.
"""
root = spn.root
root.validate()
features = root.scope
combinations = list(itertools.combinations(features, 2))
def recurse(node, feature1, feature2, depth, depth_list):
features_in_children = any(((feature1 not in c.scope) != (feature2 not in c.scope) for c in node.children))
if features_in_children:
depth_list.append(depth)
else:
for c in node.children:
recurse(c, feature1, feature2, depth+1, depth_list)
return depth_list
return {c: np.array(recurse(root, c[0], c[1], 0, [])).mean() for c in combinations}
def get_examples_from_nodes(spn):
query = np.array([[np.nan] * spn.numFeatures])
return [c.mpe_eval(query) for c in spn.root.children]
def get_category_examples(spn, categories):
samples = {}
for categoryId in categories:
name = categoryId
samples[name] = []
for example in categories[categoryId][1]:
query = np.array([[np.nan] * spn.numFeatures])
query[:,categoryId] = example
categorical_name = categories[categoryId][0].inverse_transform(example)
row = [categorical_name] + np.round(spn.root.mpe_eval(query)[1], 2).tolist()[0]
samples[name].append(row)
return samples
def get_covariance_matrix(spn):
size = spn.numFeatures
joined_means = spn.root.joined_mean(size)
means = joined_means.diagonal().reshape(1, size)
squared_means = means.T.dot(means)
covariance = joined_means - squared_means
diagonal_correlations = spn.root.moment(2, size) - spn.root.moment(1, size) ** 2
idx = np.diag_indices_from(covariance)
covariance[idx] = diagonal_correlations
return covariance
def get_correlation_matrix(spn):
size = spn.numFeatures
covariance = get_covariance_matrix(spn)
sigmas = np.sqrt(spn.root.moment(2, size) - spn.root.moment(1, size) ** 2).reshape(1,size)
sigma_matrix = sigmas.T.dot(sigmas)
correlations = covariance / sigma_matrix
return correlations
def get_node_feature_probability(spn, featureIdx, instance):
root = spn.root
probabilities = np.array([])
for c in spn.root.children:
# print("x")
spn.root = c
# print(func_from_spn(spn, featureIdx)(instance))
probabilities = np.append(probabilities, np.exp(func_from_spn(spn, featureIdx)(instance)))
spn.root = root
return probabilities
def calculate_overlap(inv, invs, area=0.9):
overlap = []
for i in invs:
#print(overlap)
covered = 0
smaller = inv if inv[0] < i[0] else i
bigger = inv if smaller == i else i
if smaller[1] > bigger[0]:
covered += smaller[1] - bigger[0]
if smaller[1] > bigger[1]:
covered -= smaller[1] - bigger[1]
if covered/(i[1]-i[0]) >= area and covered/(inv[1]-inv[0]) >= area:
overlap.append(i)
return overlap
def get_node_description(spn, parent_node, size):
root = spn.root
parent_node.validate()
parent_type = type(parent_node).__name__
node_descriptions = dict()
node_descriptions['num'] = len(parent_node.children)
nodes = list()
for i, node in enumerate(parent_node.children):
spn.root = node
node_dir = dict()
node_dir['weight'] = parent_node.weights[i] if parent_type == 'SumNode' else 1
node_dir['size'] = node.size() - 1
node_dir['num_children'] = len(node.children) if not node.leaf else 0
node_dir['leaf'] = node.leaf
node_dir['type'] = type(node).__name__
node_dir['split_features'] = [list(c.scope) for c in node.children] if not node.leaf else node.scope
node_dir['split_features'].sort(key=lambda x: len(x))
node_dir['depth'] = get_spn_depth(node)
node_dir['child_depths'] = [get_spn_depth(c) for c in node.children]
descriptor = node_dir['type']
if all((d == 0 for d in node_dir['child_depths'])):
descriptor = 'shallow ' + descriptor
node_dir['quick'] = 'shallow'
elif len([d for d in node_dir['child_depths'] if d == 0]) == 1:
node_dir['quick'] = 'split_one'
descriptor += ', which seperates one feature'
else:
node_dir['quick'] = 'deep'
descriptor = 'deep ' + descriptor
descriptor = 'a ' + descriptor
node_dir['descriptor'] = descriptor
node_dir['short_descriptor'] = descriptor
node_dir['representative'] = node.mpe_eval(np.array([[np.nan] * size]))[1]
nodes.append(node_dir)
node_descriptions['shallow'] = len([d for d in nodes if d['quick'] == 'shallow'])
node_descriptions['split_one'] = len([d for d in nodes if d['quick'] == 'split_one'])
node_descriptions['deep'] = len([d for d in nodes if d['quick'] == 'deep'])
nodes.sort(key=lambda x: x['weight'])
nodes.reverse()
node_descriptions['nodes'] = nodes
spn.root = root
return node_descriptions
def spn_predict_proba(spn, feature, query):
from concurrent.futures import ThreadPoolExecutor
domain = np.linspace(spn.domains[feature][0], spn.domains[feature][-1], len(spn.domains[feature]))
proba = []
marg = spn.marginalize([i for i in range(spn.numFeatures) if i is not feature])
def predict(q):
_query = np.copy(q).reshape((1, spn.numFeatures))
_query = np.repeat(_query, len(spn.domains[feature]), axis=0)
_query[:, feature] = domain
prediction = np.exp(spn.root.eval(_query))
prediction /= np.sum(prediction)
return prediction
with ThreadPoolExecutor(max_workers = 50) as thread_executor:
results = []
for q in query:
#all_pos = list(enumerate(pos))
#results = thread_executor.map(calculate_feature, all_pos)
results.append(thread_executor.submit(predict, q))
proba.append([r.result() for r in results])
return np.array(proba)[0]
def spn_predict_proba_single_class(spn, feature, instance, query):
proba = spn_predict_proba(spn, feature, query)
y_true = proba[:,instance]
return y_true
def load_dataset(data, dictionary, types=False, header=0, date=False):
skiprows = [1] if types else []
df = pd.read_csv(data, delimiter=",", header=header, parse_dates=date, skiprows=skiprows)
categoricals = (i for i, d in enumerate(dictionary['features'])
if d['type'] == 'categorical')
for i in categoricals:
df.iloc[:,i] = dictionary['features'][i]['encoder'].transform(df.iloc[:,i])
return df.as_matrix()
def get_mean(spn):
return spn.root.moment(1, spn.numFeatures)
def get_var(spn):
return spn.root.moment(2, spn.numFeatures) - spn.root.moment(1, spn.numFeatures) ** 2
def shapley_variance_of_nodes(spn, sample_size):
from datetime import datetime
root = spn.root
startTime = datetime.now()
total_var = get_var(spn)
num_nodes = len(spn.root.children)
len_permutations = math.factorial(num_nodes)
node_permutations = np.array(list(itertools.permutations([i for i in range(num_nodes)])))
if sample_size < len_permutations:
sample = np.random.choice(len_permutations, sample_size)
node_permutations = node_permutations[sample]
node_permutations = np.array(node_permutations)
nodes = spn.root.children
weights = spn.root.weights
shapley_values = np.zeros((num_nodes, spn.numFeatures))
for i in range(num_nodes):
pos = np.where(node_permutations==i)[1]
query_sets = [node_permutations[n,:j] for n, j in enumerate(pos)]
for query_set in query_sets:
spn.root.children = np.array(nodes)[query_set]
spn.root.weights = np.array(weights)[query_set]
var_without = get_var(spn)
query_set = np.append(query_set, i)
spn.root.children = np.array(nodes)[query_set]
spn.root.weights = np.array(weights)[query_set]
var_with = get_var(spn)
shapley_values[i] += (var_with - var_without)/min(sample_size, len_permutations)
spn.root.children = nodes
spn.root.weights = weights
return shapley_values, datetime.now() - startTime
def feature_contribution(spn, query, categorical, sample_size = 10000):
from concurrent.futures import ThreadPoolExecutor
len_permutations = math.factorial(spn.numFeatures-1)
sets = np.array(list(itertools.permutations([i for i in range(spn.numFeatures) if i != categorical])))
if sample_size < len_permutations:
sample = np.random.randint(len_permutations, size=sample_size)
sets = sets[sample]
#sets = np.array(sets)
sample = len(sets)
weights = spn.root.weights
shapley_values = np.zeros((query.shape[0], spn.numFeatures))
def calculate_feature(i, n, j, cat):
s = sets[n,:j]
marg_ci = spn.marginalize(np.append(s, categorical)).eval(query)
marg_c = spn.marginalize(np.append(s[:j-1], categorical)).eval(query)
marg_i = spn.marginalize(s[:j]).eval(query)
if len(s[:j-1]) == 0:
marg = 0
else:
marg = spn.marginalize(s[:j-1]).eval(query)
return (np.exp(marg_ci - marg_i) - np.exp(marg_c - marg))
for i in range(spn.numFeatures):
pos = np.where(sets==i)[1] + 1
results = []
with ThreadPoolExecutor(max_workers = 50) as thread_executor:
for n,j in enumerate(pos):
results.append(thread_executor.submit(calculate_feature, i, n, j, categorical))
results = [r.result() for r in results]
shapley_values[:,i] = np.sum(np.array(results))/len(pos)
return shapley_values
def node_likelihood_contribution(spn, query):
children = spn.root.children
children_weights = spn.root.weights
children_log_weights = spn.root.log_weights
nodes = spn.root.children.copy()
weights = spn.root.weights.copy()
log_weights = spn.root.log_weights.copy()
weighted_nodes = list(zip(nodes, weights, log_weights))
weighted_nodes.sort(key=lambda x: x[1])
weighted_nodes.reverse()
nodes = [x[0] for x in weighted_nodes]
weights = [x[1] for x in weighted_nodes]
log_weights = [x[2] for x in weighted_nodes]
log_likelihood = []
for i, n in enumerate(nodes):
spn.root.children = nodes[:i+1]
spn.root.weights = weights[:i+1]
spn.root.log_weights = log_weights[:i+1]
log_likelihood.append(np.sum(spn.root.eval(query)))
spn.root.children = children
spn.root.weights = children_weights
return log_likelihood
def get_categoricals(spn):
return [i for i in range(spn.numFeatures) if spn.featureTypes[i] == 'categorical']
def categorical_nodes_description(spn):
#TODO: That threshold needs some evidence or theoretical grounding
root = spn.root
categoricals = get_categoricals(spn)
total_analysis = {}
for cat in categoricals:
marg_total = spn.marginalize([cat])
categorical_probabilities = []
for i, n in enumerate(root.children):
node_weight = root.log_weights[i]
node_probabilities = []
for cat_instance in spn.domains[cat]:
spn.root = n
marg = spn.marginalize([cat])
query = np.zeros((1, spn.numFeatures))
query[:,:] = np.nan
query[:,cat] = cat_instance
proba = np.exp(marg.eval(query)+node_weight-marg_total.eval(query))
node_probabilities.append(proba)
categorical_probabilities.append(node_probabilities)
total_analysis[cat] = np.sum(np.array(categorical_probabilities), axis=2)
spn.root = root
node_categoricals = {}
for cat in categoricals:
node_categoricals[cat] = {}
node_categoricals[cat]['contrib'] = []
node_categoricals[cat]['explained'] = []
domain_length = len(spn.domains[cat])
for cat_instance in spn.domains[cat]:
probs = total_analysis[cat]
contrib_nodes = np.where(probs[:,cat_instance]/(np.sum(probs, axis=1))>0.4)
explained_probs = np.sum(probs[contrib_nodes], axis=0)
node_categoricals[cat]['contrib'].append(contrib_nodes)
node_categoricals[cat]['explained'].append(explained_probs)
return node_categoricals, total_analysis
def get_marginal_arrays(spn):
vals = []
tps = []
for feature in range(spn.numFeatures):
if spn.featureTypes[feature] != 'categorical':
marg = spn.marginalize([feature])
_min = spn.domains[feature][0]
_max = spn.domains[feature][-1]
turning_points = np.concatenate([n.x_range for n in marg.children])
turning_points = np.unique(np.sort(np.array([p for p in turning_points if (p > _min) and (p < _max)])))
query = np.zeros((len(turning_points), spn.numFeatures))
query[:,:] = np.nan
query[:,feature] = turning_points
values = np.exp(marg.eval(query))
tps.append(turning_points)
vals.append(values)
return np.array(tps), np.array(vals)
class ProbabilityTree():
def __init__(self, value):
self.value = value
self.children = []
def append(self, child):
self.children.append(child)
def probability_tree(spn, query, categoricalId):
categorical_values = spn.domains[categoricalId]
_query = np.repeat(query, len(categorical_values), axis=0)
_query[:, categoricalId] = categorical_values
def recursive_feature_contribution(spn, query, categorical):
root = spn.root
marg = spn.marginalize([i for i in range(spn.numFeatures) if i != categorical])
proba = spn.root.eval(query) - marg.eval(query)
norm = marg.eval(query)
results = []
node_probas = []
for i, node in enumerate(spn.root.children):
spn.root = node
results.append(np.exp(root.log_weights[i] + spn.root.eval(query) - norm))
proba_query = np.zeros((len(spn.domains[categorical]), spn.numFeatures))
proba_query[:,categorical] = spn.domains[categorical]
node_probas.append(np.exp(spn.marginalize([categorical]).eval(proba_query)))
spn.root = root
return np.array(results), np.array(node_probas)
def get_explanation_vector(spn, data, categorical):
marg = spn.marginalize([i for i in range(spn.numFeatures) if i != categorical])
results = []
for d in data:
d.shape = (1,-1)
gradients_xy = get_gradient(spn, d, categorical)
root = spn.root
spn.root = marg
gradient_x = get_gradient(spn, d, categorical)
spn.root = root
result_xy = np.exp(spn.root.eval(d)).reshape((-1,1))
result_x = np.exp(marg.eval(d)).reshape((-1,1))
results.append((gradients_xy * result_x - result_xy * gradient_x)/(result_x ** 2))
return np.array(results).reshape(len(data),spn.numFeatures)
def gradient(spn, data, categorical):
marg = spn.marginalize([i for i in range(spn.numFeatures) if i != categorical])
gradients_xy = spn.root.gradient(data)
gradient_x = marg.gradient(data)
result_xy = np.exp(spn.root.eval(data)).reshape((-1,1))
result_x = np.exp(marg.eval(data)).reshape((-1,1))
return np.delete((gradients_xy * result_x - result_xy * gradient_x)/(result_x ** 2), categorical, axis = 1)
def prediction_nodes(spn, query, categorical):
root = spn.root
norm = spn.marginalize(
[i for i in range(spn.numFeatures) if i != categorical]).eval(query)
result = []
for i, n in enumerate(spn.root.children):
spn.root = n
prob = spn.marginalize(
[i for i in range(spn.numFeatures) if i != categorical]).eval(query)
result.append(np.exp(root.log_weights[i] + prob - norm))
spn.root = root
return np.array(result)
def bin_gradient_data(data, gradients, bins):
bin_borders = np.linspace(-1, 1, num=bins+1)
query_list = [
|
np.where((gradients >= bin_borders[i]) & (gradients < bin_borders[i+1]))
|
numpy.where
|
import numpy as np
from .regression import ee_regression
from delicatessen.utilities import logit, inverse_logit, identity
#################################################################
# Causal Inference (ATE) Estimating Equations
def ee_gformula(theta, y, X, X1, X0=None, force_continuous=False):
r"""Default stacked estimating equation for parametric g-computation in the time-fixed setting. The parameter of
interest can either be the mean under a single interventions or plans on an action, or the mean difference between
two interventions or plans on an action. This is accomplished by providing the estimating equation the observed
data (``X``, ``y``), and the same data under the actions (``X1`` and optionally ``X0``).
For continuous Y, the linear regression estimating equation is
.. math::
\sum_i^n \psi_m(Y_i, X_i, \theta) = \sum_i^n (Y_i - X_i^T \theta) X_i = 0
and for logistic regression, the estimating equation is
.. math::
\sum_i^n \psi_m(Y_i, X_i, \beta) = \sum_i^n (Y_i - expit(X_i^T \beta)) X_i = 0
By default, `ee_gformula` detects whether `y` is all binary (zero or one), and applies logistic regression if that
is evaluated to be true. See the parameters for further details.
There are two variations on the parameter of interest. The first could be the mean under a plan, where the plan sets
the values of action :math:`A` (e.g., exposure, treatment, vaccination, etc.). The estimating equation for this
causal mean is
.. math::
\sum_i^n \psi_1(Y_i, X_i, \theta_1) = \sum_i^n g(\hat{Y}_i) - \theta_1 = 0
Here, the function :math:`g(.)` is a generic function. If linear regression was used, :math:`g(.)` is the identity
function. If logistic regression was used, :math:`g(.)` is the expit or inverse-logit function.
Note
----
This variation includes :math:`1+b` parameters, where the first parameter is the causal mean, and the remainder are
the parameters for the regression model.
The alternative parameter of interest could be the mean difference between two plans. A common example of this would
be the average causal effect, where the plans are all-action-one versus all-action-zero. Therefore, the estimating
equations consist of the following three equations
.. math::
\sum_i^n \psi_0(Y_i, X_i, \theta_0) = \sum_i^n (\theta_1 - \theta_2) - \theta_0 = 0
\sum_i^n \psi_1(Y_i, X_i, \theta_1) = \sum_i^n g(\hat{Y}_i) - \theta_1 = 0
\sum_i^n \psi_0(Y_i, X_i, \theta_2) = \sum_i^n g(\hat{Y}_i) - \theta_2 = 0
Note
----
This variation includes :math:`3+b` parameters, where the first parameter is the causal mean difference, the second
is the causal mean under plan 1, the third is the causal mean under plan 0, and the remainder are the parameters
for the regression model.
The parameter of interest is designated by the user via whether the optional argument ``X0`` is left as ``None``
(which estimates the causal mean) or is given an array (which estimates the causal mean difference and the
corresponding causal means).
Note
----
All provided estimating equations are meant to be wrapped inside a user-specified function. Throughtout, these
user-defined functions are defined as ``psi``.
See the examples below for how action plans are specified.
Parameters
----------
theta : ndarray, list, vector
Array of parameters to estimate. For the Cox model, corresponds to the log hazard ratios
y : ndarray, list, vector
1-dimensional vector of n observed values. The Y values should all be 0 or 1. No missing data should be
included (missing data may cause unexpected behavior).
X : ndarray, list, vector
2-dimensional vector of n observed values for b variables. No missing data should be included (missing data
may cause unexpected behavior).
X1 : ndarray, list, vector
2-dimensional vector of n observed values for b variables under the action plan. If the action is indicated by
``A``, then ``X1`` will take the original data ``X`` and update the values of ``A`` to follow the deterministic
plan. No missing data should be included (missing data may cause unexpected behavior).
X0 : ndarray, list, vector, None, optional
2-dimensional vector of n observed values for b variables under the action plan. This second argument is
optional and should be specified if a causal mean difference between two action plans is of interest. If the
action is indicated by ``A``, then ``X0`` will take the original data ``X`` and update the values of ``A`` to
follow the deterministic reference plan. No missing data should be included (missing data may cause unexpected
behavior).
force_continuous : bool, optional
Option to force the use of linear regression despite detection of a binary variable.
Returns
-------
array :
Returns a (1+b)-by-n NumPy array if ``X0=None``, or returns a (3+b)-by-n NumPy array if ``X0!=None``
Examples
--------
Construction of a estimating equation(s) with ``ee_gformula`` should be done similar to the following
>>> import numpy as np
>>> import pandas as pd
>>> from delicatessen import MEstimator
>>> from delicatessen.estimating_equations import ee_gformula
Some generic confounded data
>>> n = 200
>>> d = pd.DataFrame()
>>> d['W'] = np.random.binomial(1, p=0.5, size=n)
>>> d['A'] = np.random.binomial(1, p=(0.25 + 0.5*d['W']), size=n)
>>> d['Ya0'] = np.random.binomial(1, p=(0.75 - 0.5*d['W']), size=n)
>>> d['Ya1'] = np.random.binomial(1, p=(0.75 - 0.5*d['W'] - 0.1*1), size=n)
>>> d['Y'] = (1-d['A'])*d['Ya0'] + d['A']*d['Ya1']
>>> d['C'] = 1
In the first example, we will estimate the causal mean had everyone been set to ``A=1``. Therefore, the optional
argument ``X0`` is left as ``None``. Before creating the estimating equation, we need to do some data prep. First,
we will create an interaction term between ``A`` and ``W`` in the original data. Then we will generate a copy of
the data and update the values of ``A`` to be all ``1``.
>>> d['AW'] = d['A']*d['W']
>>> d1 = d.copy()
>>> d1['A'] = 1
>>> d1['AW'] = d1['A']*d1['W']
Having setup our data, we can now define the psi function.
>>> def psi(theta):
>>> return ee_gformula(theta,
>>> y=d['Y'],
>>> X=d[['C', 'A', 'W', 'AW']],
>>> X1=d1[['C', 'A', 'W', 'AW']])
Notice that ``y`` corresponds to the observed outcomes, ``X`` corresponds to the observed covariate data, and ``X1``
corresponds to the covariate data *under the action plan*.
Now we can call the M-Estimation procedure. Since we are estimating the causal mean, and the regression parameters,
the length of the initial values needs to correspond with this. Our linear regression model consists of 4
coefficients, so we need 1+4=5 initial values. When the outcome is binary (like it is in this example), we can be
nice to the optimizer and give it a starting value of 0.5 for the causal mean (since 0.5 is in the middle of that
distribution). Below is the call to ``MEstimator``
>>> estr = MEstimator(psi, init=[0.5, 0., 0., 0., 0.])
>>> estr.estimate(solver='lm')
Inspecting the parameter estimates, variance, and 95% confidence intervals
>>> estr.theta
>>> estr.variance
>>> estr.confidence_intervals()
More specifically, the causal mean is
>>> estr.theta[0]
Continuing from the previous example, let's say we wanted to estimate the average causal effect. Therefore, we want
to contrast two plans (all ``A=1`` versus all ``A=0``). As before, we need to create the reference data for ``X0``
>>> d0 = d.copy()
>>> d0['A'] = 0
>>> d0['AW'] = d0['A']*d0['W']
Having setup our data, we can now define the psi function.
>>> def psi(theta):
>>> return ee_gformula(theta,
>>> y=d['Y'],
>>> X=d[['C', 'A', 'W', 'AW']],
>>> X1=d1[['C', 'A', 'W', 'AW']],
>>> X0=d0[['C', 'A', 'W', 'AW']], )
Notice that ``y`` corresponds to the observed outcomes, ``X`` corresponds to the observed covariate data, ``X1``
corresponds to the covariate data under ``A=1``, and ``X0`` corresponds to the covariate data under ``A=0``. Here,
we need 3+4=7 starting values, since there are two additional parameters from the previous example. For the
difference, a starting value of 0 is generally a good choice. Since ``Y`` is binary, we again provide 0.5 as
starting values for the causal means
>>> estr = MEstimator(psi, init=[0., 0.5, 0.5, 0., 0., 0., 0.])
>>> estr.estimate(solver='lm')
Inspecting the parameter estimates
>>> estr.theta[0] # causal mean difference of 1 versus 0
>>> estr.theta[1] # causal mean under X1
>>> estr.theta[2] # causal mean under X0
>>> estr.theta[3:] # logistic regression coefficients
References
----------
<NAME>, <NAME>, & <NAME>. (2011). Implementation of G-computation on a simulated data set: demonstration
of a causal inference technique. *American Journal of Epidemiology*, 173(7), 731-738.
<NAME>, & <NAME>. (2006). Estimating causal effects from epidemiological data.
*Journal of Epidemiology & Community Health*, 60(7), 578-586.
"""
# Ensuring correct typing
X = np.asarray(X) # Convert to NumPy array
y = np.asarray(y) # Convert to NumPy array
X1 = np.asarray(X1) # Convert to NumPy array
# Error checking for misaligned shapes
if X.shape != X1.shape:
raise ValueError("The dimensions of X and X1 must be the same.")
# Processing data depending on if two plans were specified
if X0 is None: # If no reference was specified
mu1 = theta[0] # ... only a single mean
beta = theta[1:] # ... immediately followed by the regression parameters
else: # Otherwise difference and both plans are to be returned
X0 = np.asarray(X0) # ... reference data to NumPy array
if X.shape != X0.shape: # ... error checking for misaligned shapes
raise ValueError("The dimensions of X and X0 must be the same.")
mud = theta[0] # ... first parameter is mean difference
mu1 = theta[1] # ... second parameter is mean under X1
mu0 = theta[2] # ... third parameter is mean under X0
beta = theta[3:] # ... remainder are for the regression model
# Checking outcome variable type
if np.isin(y, [0, 1]).all() and not force_continuous:
model = 'logistic' # Use a logistic regression model
transform = inverse_logit # ... and need to inverse-logit transformation
else:
model = 'linear' # Use a linear regression model
transform = identity # ... and need to apply the identity (no) transformation
# Estimating regression parameters
preds_reg = ee_regression(theta=beta, # beta coefficients
X=X, y=y, # ... along with observed X and observed y
model=model) # ... and specified model type
# Calculating mean under X1
ya1 = transform(np.dot(X1, beta)) - mu1 # mean under X1
if X0 is None: # if no X0, then nothing left to do
# Output (1+b)-by-n stacked array
return np.vstack((ya1[None, :], # theta[0] is the mean under X1
preds_reg)) # theta[1:] is the regression coefficients
else: # if X0, then need to predict mean under X0 and difference
# Calculating mean under X0
ya0 = transform(np.dot(X0, beta)) - mu0
# Calculating mean difference between X1 and X0
ace = np.ones(y.shape[0])*(mu1 - mu0) - mud
# Output (3+b)-by-n stacked array
return np.vstack((ace, # theta[0] is the mean difference between X1 and X0
ya1[None, :], # theta[1] is the mean under X1
ya0[None, :], # theta[2] is the mean under X0
preds_reg)) # theta[3:] is for the regression coefficients
def ee_ipw(theta, y, A, W, truncate=None):
r"""Default stacked estimating equation for inverse probability weighting in the time-fixed setting. The
parameter of interest is the average causal effect. For estimation of the weights (or propensity scores), a
logistic model is used.
Note
----
Unlike ``ee_gformula``, ``ee_ipw`` only provides the average causal effect (and the causal means for ``A=1`` and
``A=0``). In other words, the implementation of IPW does not support generic action plans off-the-shelf,
unlike ``ee_gformula``.
The first estimating equation for the logistic regression model is
.. math::
\sum_i^n \psi_g(A_i, W_i, \alpha) = \sum_i^n (A_i - expit(W_i^T \alpha)) W_i = 0
where A is the treatment and W is the set of confounders.
For the implementation of the inverse probability weighting estimator, stacked estimating equations are used
for the mean had everyone been set to ``A=1``, the mean had everyone been set to ``A=0``, and the mean difference
between the two causal means. The estimating equations are
.. math::
\sum_i^n \psi_d(Y_i, A_i, \pi_i, \theta_0) = \sum_i^n (\theta_1 - \theta_2) - \theta_0 = 0
\sum_i^n \psi_1(Y_i, A_i, \pi_i, \theta_1) = \sum_i^n \frac{A_i \times Y_i}{\pi_i} - \theta_1 = 0
\sum_i^n \psi_0(Y_i, A_i, \pi_i, \theta_2) = \sum_i^n \frac{(1-A_i) \times Y_i}{1-\pi_i} - \theta_2 = 0
Due to these 3 extra values, the length of the theta vector is 3+b, where b is the number of parameters in the
regression model.
Note
----
All provided estimating equations are meant to be wrapped inside a user-specified function. Throughtout, these
user-defined functions are defined as ``psi``.
Here, theta corresponds to a variety of different quantities. The *first* value in theta vector is the mean
difference (or average causal effect), the *second* is the mean had everyone been set to ``A=1``, the *third* is the
mean had everyone been set to ``A=0``. The remainder of the parameters correspond to the logistic regression model
coefficients.
Parameters
----------
theta : ndarray, list, vector
Array of parameters to estimate. For the Cox model, corresponds to the log hazard ratios
y : ndarray, list, vector
1-dimensional vector of n observed values. No missing data should be included (missing data may cause
unexpected behavior).
A : ndarray, list, vector
1-dimensional vector of n observed values. The A values should all be 0 or 1. No missing data should be
included (missing data may cause unexpected behavior).
W : ndarray, list, vector
2-dimensional vector of n observed values for b variables to model the probability of ``A`` with. No missing
data should be included (missing data may cause unexpected behavior).
truncate : None, list, set, ndarray, optional
Bounds to truncate the estimated probabilities of ``A`` at. For example, estimated probabilities above 0.99 or
below 0.01 can be set to 0.99 or 0.01, respectively. This is done by specifying ``truncate=(0.01, 0.99)``. Note
this step is done via ``numpy.clip(.., a_min=truncate[0], a_max=truncate[1])``, so order is important. Default
is None, which applies to no truncation.
Returns
-------
array :
Returns a (3+b)-by-n NumPy array evaluated for the input theta and y
Examples
--------
Construction of a estimating equation(s) with ``ee_ipw`` should be done similar to the following
>>> import numpy as np
>>> import pandas as pd
>>> from delicatessen import MEstimator
>>> from delicatessen.estimating_equations import ee_ipw
Some generic causal data
>>> n = 200
>>> d = pd.DataFrame()
>>> d['W'] = np.random.binomial(1, p=0.5, size=n)
>>> d['A'] = np.random.binomial(1, p=(0.25 + 0.5*d['W']), size=n)
>>> d['Ya0'] = np.random.binomial(1, p=(0.75 - 0.5*d['W']), size=n)
>>> d['Ya1'] = np.random.binomial(1, p=(0.75 - 0.5*d['W'] - 0.1*1), size=n)
>>> d['Y'] = (1-d['A'])*d['Ya0'] + d['A']*d['Ya1']
>>> d['C'] = 1
Defining psi, or the stacked estimating equations. Note that 'A' is the action.
>>> def psi(theta):
>>> return ee_ipw(theta, y=d['Y'], A=d['A'],
>>> W=d[['C', 'W']])
Calling the M-estimation procedure. Since `X` is 2-by-n here and IPW has 3 additional parameters, the initial
values should be of length 3+2=5. In general, it will be best to start with [0., 0.5, 0.5, ...] as the initials when
``Y`` is binary. Otherwise, starting with all 0. as initials is reasonable.
>>> estr = MEstimator(stacked_equations=psi, init=[0., 0.5, 0.5, 0., 0.])
>>> estr.estimate(solver='lm')
Inspecting the parameter estimates, variance, and 95% confidence intervals
>>> estr.theta
>>> estr.variance
>>> estr.confidence_intervals()
More specifically, the corresponding parameters are
>>> estr.theta[0] # causal mean difference of 1 versus 0
>>> estr.theta[1] # causal mean under A=1
>>> estr.theta[2] # causal mean under A=0
>>> estr.theta[3:] # logistic regression coefficients
If you want to see how truncating the probabilities works, try repeating the above code but specifying
``truncate=(0.1, 0.9)`` as an optional argument in ``ee_ipw``.
References
----------
<NAME>, & <NAME>. (2006). Estimating causal effects from epidemiological data.
*Journal of Epidemiology & Community Health*, 60(7), 578-586.
<NAME>, & <NAME>. (2008). Constructing inverse probability weights for marginal structural models.
*American Journal of Epidemiology*, 168(6), 656-664.
"""
# Ensuring correct typing
W = np.asarray(W) # Convert to NumPy array
A = np.asarray(A) # Convert to NumPy array
y = np.asarray(y) # Convert to NumPy array
beta = theta[3:] # Extracting out theta's for the regression model
# Estimating propensity score
preds_reg = ee_regression(theta=beta, # Using logistic regression
X=W, # ... plug-in covariates for X
y=A, # ... plug-in treatment for Y
model='logistic') # ... use a logistic model
# Estimating weights
pi = inverse_logit(np.dot(W, beta)) # Getting Pr(A|W) from model
if truncate is not None: # Truncating Pr(A|W) when requested
if truncate[0] > truncate[1]:
raise ValueError("truncate values must be specified in ascending order")
pi = np.clip(pi, a_min=truncate[0], a_max=truncate[1])
# Calculating Y(a=1)
ya1 = (A * y) / pi - theta[1] # i's contribution is (AY) / \pi
# Calculating Y(a=0)
ya0 = ((1-A) * y) / (1-pi) - theta[2] # i's contribution is ((1-A)Y) / (1-\pi)
# Calculating Y(a=1) - Y(a=0)
ate = np.ones(y.shape[0]) * (theta[1] - theta[2]) - theta[0]
# Output (3+b)-by-n stacked array
return np.vstack((ate, # theta[0] is for the ATE
ya1[None, :], # theta[1] is for R1
ya0[None, :], # theta[2] is for R0
preds_reg)) # theta[3:] is for the regression coefficients
def ee_aipw(theta, y, A, W, X, X1, X0, truncate=None, force_continuous=False):
r"""Default stacked estimating equation for augmented inverse probability weighting (AIPW) in the time-fixed
setting. The parameter of interest is the average causal effect.
Note
----
Unlike ``ee_gformula``, ``ee_ipw`` only provides the average causal effect (and the causal means for ``A=1`` and
``A=0``). In other words, the implementation of IPW does not support generic action plans off-the-shelf,
unlike ``ee_gformula``.
AIPW consists of two nuisance models (the propensity score model and the outcome model). For estimation of the
propensity scores, a logistic model is used.
.. math::
\sum_i^n \psi_g(A_i, W_i, \alpha) = \sum_i^n (A_i - expit(W_i^T \alpha)) W_i = 0
where ``A`` is the treatment and ``W`` is the set of confounders.
Next, an outcome model is specified. For continuous Y, the linear regression estimating equation is
.. math::
\sum_i^n \psi_m(Y_i, X_i, \beta) = \sum_i^n (Y_i - X_i^T \beta) X_i = 0
and for logistic regression, the estimating equation is
.. math::
\sum_i^n \psi_m(Y_i, X_i, \beta) = \sum_i^n (Y_i - expit(X_i^T \beta)) X_i = 0
By default, `ee_aipw` detects whether `y` is all binary (zero or one), and applies logistic regression if that
happens. See the parameters for more details. Notice that ``X`` here should consists of both ``A`` and ``W`` (with
possible interaction terms or other differences in functional forms from the propensity score model).
For the implementation of the AIPW estimator, stacked estimating equations further include the mean had everyone
been set to ``A=1``, the mean had everyone been set to ``A=0``, and the mean difference. Those estimating equations
look like
.. math::
\sum_i^n \psi_0(Y_i, A_i, \pi_i, \theta_0) = \sum_i^n (\theta_1 - \theta_2) - \theta_0 = 0
\sum_i^n \psi_1(Y_i, A_i, W_i, \pi_i, \theta_1) = \sum_i^n (\frac{A_i \times Y_i}{\pi_i} -
\frac{\hat{Y^1}(A_i-\pi_i}{\pi_i}) - \theta_1 = 0
\sum_i^n \psi_0(Y_i, A_i, \pi_i, \theta_2) = \sum_i^n (\frac{(1-A_i) \times Y_i}{1-\pi_i} +
\frac{\hat{Y^0}(A_i-\pi_i}{1-\pi_i})) - \theta_2 = 0
where :math:`Y^a` is the predicted values of :math:`Y` from the outcome model under action
assignment :math:`A=a`.
Due to these 3 extra values and two nuisance models, the length of the theta vector is 3+b+c, where b is the number
of columns in ``W``, and c is the number of columns in ``X``.
Note
----
All provided estimating equations are meant to be wrapped inside a user-specified function. Throughtout, these
user-defined functions are defined as ``psi``.
Here, theta corresponds to a variety of different quantities. The *first* value in theta vector is mean
difference (or average causal effect), the *second* is the mean had everyone been given ``A=1``, the *third* is the
mean had everyone been given ``A=0``. The remainder of the parameters correspond to the regression model
coefficients, in the order input. The first 'chunk' of coefficients correspond to the propensity score model
and the last 'chunk' correspond to the outcome model.
Parameters
----------
theta : ndarray, list, vector
Array of parameters to estimate. For the Cox model, corresponds to the log hazard ratios
y : ndarray, list, vector
1-dimensional vector of n observed values. No missing data should be included (missing data may cause
unexpected behavior).
A : ndarray, list, vector
1-dimensional vector of n observed values. The A values should all be 0 or 1. No missing data should be
included (missing data may cause unexpected behavior).
W : ndarray, list, vector
2-dimensional vector of n observed values for b variables to model the probability of ``A`` with. No missing
data should be included (missing data may cause unexpected behavior).
X : ndarray, list, vector
2-dimensional vector of n observed values for c variables to model the outcome ``y``. No missing data should
be included (missing data may cause unexpected behavior).
X1 : ndarray, list, vector
2-dimensional vector of n observed values for b variables under the action plan. If the action is indicated by
``A``, then ``X1`` will take the original data ``X`` and update the values of ``A`` to follow the deterministic
plan where ``A=1`` for all observations. No missing data should be included (missing data may cause unexpected
behavior).
X0 : ndarray, list, vector, None, optional
2-dimensional vector of n observed values for b variables under the action plan. This second argument is
optional and should be specified if a causal mean difference between two action plans is of interest. If the
action is indicated by ``A``, then ``X0`` will take the original data ``X`` and update the values of ``A`` to
follow the deterministic plan where ``A=0`` for all observatons. No missing data should be included (missing
data may cause unexpected behavior).
truncate : None, list, set, ndarray, optional
Bounds to truncate the estimated probabilities of ``A`` at. For example, estimated probabilities above 0.99 or
below 0.01 can be set to 0.99 or 0.01, respectively. This is done by specifying ``truncate=(0.01, 0.99)``. Note
this step is done via ``numpy.clip(.., a_min=truncate[0], a_max=truncate[1])``, so order is important. Default
is None, which applies to no truncation.
force_continuous : bool, optional
Option to force the use of linear regression despite detection of a binary variable.
Returns
-------
array :
Returns a (3+b+c)-by-n NumPy array evaluated for the input theta and y
Examples
--------
Construction of a estimating equation(s) with ``ee_aipw`` should be done similar to the following
>>> import numpy as np
>>> import pandas as pd
>>> from delicatessen import MEstimator
>>> from delicatessen.estimating_equations import ee_aipw
Some generic causal data
>>> n = 200
>>> d = pd.DataFrame()
>>> d['W'] = np.random.binomial(1, p=0.5, size=n)
>>> d['A'] = np.random.binomial(1, p=(0.25 + 0.5*d['W']), size=n)
>>> d['Ya0'] = np.random.binomial(1, p=(0.75 - 0.5*d['W']), size=n)
>>> d['Ya1'] = np.random.binomial(1, p=(0.75 - 0.5*d['W'] - 0.1*1), size=n)
>>> d['Y'] = (1-d['A'])*d['Ya0'] + d['A']*d['Ya1']
>>> d['C'] = 1
Defining psi, or the stacked estimating equations. Note that ``A`` is the action of interest. First, we will apply
some necessary data processing. We will create an interaction term between ``A`` and ``W`` in the original data.
Then we will generate a copy of the data and update the values of ``A=1``, and then generate another
copy but set ``A=0`` in that copy.
>>> d['AW'] = d['A']*d['W']
>>> d1 = d.copy() # Copy where all A=1
>>> d1['A'] = 1
>>> d1['AW'] = d1['A']*d1['W']
>>> d0 = d.copy() # Copy where all A=0
>>> d0['A'] = 0
>>> d0['AW'] = d0['A']*d0['W']
Having setup our data, we can now define the psi function.
>>> def psi(theta):
>>> return ee_aipw(theta,
>>> y=d['Y'],
>>> A=d['A'],
>>> W=d[['C', 'W']],
>>> X=d[['C', 'A', 'W', 'AW']],
>>> X1=d1[['C', 'A', 'W', 'AW']],
>>> X0=d0[['C', 'A', 'W', 'AW']])
Calling the M-estimation procedure. AIPW has 3 parameters with 2 coefficients in the propensity score model, and
4 coefficients in the outcome model, the total number of initial values should be 3+2+4=9. When Y is binary, it
will be best to start with ``[0., 0.5, 0.5, ...]`` followed by all ``0.`` for the initial values. Otherwise,
starting with all 0. as initials is reasonable.
>>> estr = MEstimator(psi,
>>> init=[0., 0.5, 0.5, 0., 0., 0., 0., 0., 0.])
>>> estr.estimate(solver='lm')
Inspecting the parameter estimates, variance, and 95% confidence intervals
>>> estr.theta
>>> estr.variance
>>> estr.confidence_intervals()
More specifically, the corresponding parameters are
>>> estr.theta[0] # causal mean difference of 1 versus 0
>>> estr.theta[1] # causal mean under A=1
>>> estr.theta[2] # causal mean under A=0
>>> estr.theta[3:5] # propensity score regression coefficients
>>> estr.theta[5:] # outcome regression coefficients
References
----------
<NAME>, & <NAME>. (2006). Estimating causal effects from epidemiological data.
*Journal of Epidemiology & Community Health*, 60(7), 578-586.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, & <NAME>. (2011). Doubly robust estimation of causal
effects. *American Journal of Epidemiology*, 173(7), 761-767.
<NAME>. (2006). Semiparametric theory and missing data. Springer, New York, NY.
"""
# Ensuring correct typing
y =
|
np.asarray(y)
|
numpy.asarray
|
from __future__ import division, absolute_import, print_function
from past.builtins import xrange
import numpy as np
import scipy.interpolate as interpolate
import scipy.integrate as integrate
import os
import sys
from pkg_resources import resource_filename
from .modtranGenerator import ModtranGenerator
from .fgcmAtmosphereTable import FgcmAtmosphereTable
from .sharedNumpyMemManager import SharedNumpyMemManager as snmm
from .fgcmLogger import FgcmLogger
class FgcmLUTMaker(object):
"""
Class to make a look-up table.
parameters
----------
lutConfig: dict
Dictionary with LUT config variables
makeSeds: bool, default=False
Make a SED-table in the look-up table (experimental)
"""
def __init__(self,lutConfig,makeSeds=False):
self._checkLUTConfig(lutConfig)
self.magConstant = 2.5/np.log(10)
self._setThroughput = False
self.makeSeds = makeSeds
try:
self.stellarTemplateFile = resource_filename(__name__,'data/templates/stellar_templates_master.fits')
except:
raise IOError("Could not find stellar template file")
if (not os.path.isfile(self.stellarTemplateFile)):
raise IOError("Could not find stellar template file")
if 'logger' in lutConfig:
self.fgcmLog = lutConfig['logger']
else:
self.fgcmLog = FgcmLogger('dummy.log', 'INFO', printLogger=True)
def _checkLUTConfig(self,lutConfig):
"""
Internal method to check the lutConfig dictionary
parameters
----------
lutConfig: dict
"""
self.runModtran = False
requiredKeys = ['filterNames', 'stdFilterNames', 'nCCD']
# first: check if there is a tableName here!
if 'atmosphereTableName' in lutConfig:
# Can we find this table?
# load parameters from it and stuff into config dict
self.atmosphereTable = FgcmAtmosphereTable.initWithTableName(lutConfig['atmosphereTableName'])
# look for consistency between configs?
# Note that we're assuming if somebody asked for a table they wanted
# what was in the table
for key in self.atmosphereTable.atmConfig:
if key in lutConfig:
if 'Range' in key:
if (not np.isclose(lutConfig[key][0], self.atmosphereTable.atmConfig[key][0]) or
not np.isclose(lutConfig[key][1], self.atmosphereTable.atmConfig[key][1])):
print("Warning: input config %s is %.5f-%.5f but precomputed table is %.5f-%.5f" %
(key, lutConfig[key][0], lutConfig[key][1],
self.atmosphereTable.atmConfig[key][0],
self.atmosphereTable.atmConfig[key][1]))
else:
if not np.isclose(lutConfig[key], self.atmosphereTable.atmConfig[key]):
print("Warning: input config %s is %.5f but precomputed table is %.5f" %
(key, lutConfig[key], self.atmosphereTable.atmConfig[key]))
else:
# regular config with parameters
self.runModtran = True
self.atmosphereTable = FgcmAtmosphereTable(lutConfig)
for key in requiredKeys:
if (key not in lutConfig):
raise ValueError("required %s not in lutConfig" % (key))
if ('Range' in key):
if (len(lutConfig[key]) != 2):
raise ValueError("%s must have 2 elements" % (key))
self.lutConfig = lutConfig
self.filterNames = self.lutConfig['filterNames']
self.stdFilterNames = self.lutConfig['stdFilterNames']
if len(self.filterNames) != len(self.stdFilterNames):
raise ValueError("Length of filterNames must be same as stdFilterNames")
for stdFilterName in self.stdFilterNames:
if stdFilterName not in self.filterNames:
raise ValueError("stdFilterName %s not in list of filterNames" % (stdFilterName))
self.nCCD = self.lutConfig['nCCD']
self.nCCDStep = self.nCCD+1
# and record the standard values out of the config
# (these will also come out of the save file)
self.pmbStd = self.atmosphereTable.atmConfig['pmbStd']
self.pwvStd = self.atmosphereTable.atmConfig['pwvStd']
self.o3Std = self.atmosphereTable.atmConfig['o3Std']
self.tauStd = self.atmosphereTable.atmConfig['tauStd']
self.lnTauStd = np.log(self.tauStd)
self.alphaStd = self.atmosphereTable.atmConfig['alphaStd']
self.secZenithStd = self.atmosphereTable.atmConfig['airmassStd']
self.zenithStd = np.arccos(1./self.secZenithStd)*180./np.pi
if ('lambdaRange' in self.atmosphereTable.atmConfig):
self.lambdaRange = np.array(self.atmosphereTable.atmConfig['lambdaRange'])
else:
self.lambdaRange = np.array([3000.0,11000.0])
if ('lambdaStep' in self.atmosphereTable.atmConfig):
self.lambdaStep = self.atmosphereTable.atmConfig['lambdaStep']
else:
self.lambdaStep = 0.5
if ('lambdaNorm' in self.atmosphereTable.atmConfig):
self.lambdaNorm = self.atmosphereTable.atmConfig['lambdaNorm']
else:
self.lambdaNorm = 7750.0
def setThroughputs(self, throughputDict):
"""
Set the throughputs per CCD
parameters
----------
throughputDict: dict
Dict with throughput information
The throughput dict should have one entry for each filterName.
Each of these elements should be a dictionary with the following keys:
'LAMBDA': numpy float array with wavelength values
ccd_index: numpy float array with throughput values for the ccd_index
There should be one entry for each CCD.
"""
self.inThroughputs=[]
for filterName in self.filterNames:
try:
lam = throughputDict[filterName]['LAMBDA']
except:
raise ValueError("Wavelength LAMBDA not found for filter %s in throughputDict!" % (filterName))
tput = np.zeros(lam.size, dtype=[('LAMBDA','f4'),
('THROUGHPUT_AVG','f4'),
('THROUGHPUT_CCD','f4',self.nCCD)])
tput['LAMBDA'][:] = lam
for ccdIndex in xrange(self.nCCD):
try:
tput['THROUGHPUT_CCD'][:,ccdIndex] = throughputDict[filterName][ccdIndex]
except:
raise ValueError("CCD Index %d not found for filter %s in throughputDict!" % (ccdIndex,filterName))
# check if the average is there, if not compute it
if ('AVG' in throughputDict[filterName]):
tput['THROUGHPUT_AVG'][:] = throughputDict[filterName]['AVG']
else:
self.fgcmLog.info("Average throughput not found in throughputDict for filter %s. Computing now..." % (filterName))
for i in xrange(lam.size):
use,=np.where(tput['THROUGHPUT_CCD'][i,:] > 0.0)
if (use.size > 0):
tput['THROUGHPUT_AVG'][i] = np.mean(tput['THROUGHPUT_CCD'][i,use])
self.inThroughputs.append(tput)
self._setThroughput = True
def makeLUT(self):
"""
Make the look-up table. This can either be saved with saveLUT or accessed via
attributes.
parameters
----------
None
output attributes
-----------------
pmb: float array
Pressure (millibars)
pmbFactor: float array
Pressure factor
pmbElevation: float
Standard PMB at elevation
pwv: float array
Water vapor array
o3: float array
Ozone array
tau: float array
Aerosol optical index array
lambdaNorm: float
Aerosol normalization wavelength
alpha: float array
Aerosol slope array
zenith: float array
Zenith angle array
nccd: int
Number of CCDs in table
pmbStd: float
Standard PMB
pwvStd: float
Standard PWV
o3Std: float
Standard O3
tauStd: float
Standard tau
alphaStd: float
Standard alpha
zenithStd: float
Standard zenith angle (deg)
lambdaRange: numpy float array
Wavelength range (A)
lambdaStep: float
Wavelength step (A)
lambdaStd: numpy float array
Standard wavelength for each filterName (A) at each standard band
lambdaStdRaw: numpy float array
Standard wavelength for each filterName (A)
I0Std: numpy float array
Standard value for I0 for each filterName
I1Std: numpy float array
Standard value for I1 for each filterName
I10Std: numpy float array
Standard value for I10 for each filterName
lambdaB: numpy float array
Standard wavelength for each filterName (A) assuming no atmosphere
atmLambda: numpy float array
Standard atmosphere wavelength array (A)
atmStdTrans: numpy float array
Standard atmosphere transmission array
lut: Look-up table recarray
lut['I0']: I0 LUT (multi-dimensional)
lut['I1']: I1 LUT (multi-dimensional)
lutDeriv: Derivative recarray
lutDeriv['D_PMB']: I0 PMB derivative
lutDeriv['D_PWV']: I0 PWV derivative
lutDeriv['D_O3']: I0 O3 derivative
lutDeriv['D_LNTAU']: I0 log(tau) derivative
lutDeriv['D_ALPHA']: I0 alpha derivative
lutDeriv['D_SECZENITH']: I0 sec(zenith) derivative
lutDeriv['D_PMB_I1']: I1 PMB derivative
lutDeriv['D_PWV_I1']: I1 PWV derivative
lutDeriv['D_O3_I1']: I1 O3 derivative
lutDeriv['D_LNTAU_I1']: I1 log(tau) derivative
lutDeriv['D_ALPHA_I1']: I1 alpha derivative
lutDeriv['D_SECZENITH_I1']: I0 sec(zenith) derivative
"""
if not self._setThroughput:
raise ValueError("Must set the throughput before running makeLUT")
if self.runModtran:
# need to build the table
self.atmosphereTable.generateTable()
else:
# load from data
self.atmosphereTable.loadTable()
# and grab from the table
self.atmLambda = self.atmosphereTable.atmLambda
self.atmStdTrans = self.atmosphereTable.atmStdTrans
self.pmbElevation = self.atmosphereTable.pmbElevation
self.pmb = self.atmosphereTable.pmb
self.pmbDelta = self.atmosphereTable.pmbDelta
pmbPlus = np.append(self.pmb, self.pmb[-1] + self.pmbDelta)
self.pwv = self.atmosphereTable.pwv
self.pwvDelta = self.atmosphereTable.pwvDelta
pwvPlus = np.append(self.pwv, self.pwv[-1] + self.pwvDelta)
self.o3 = self.atmosphereTable.o3
self.o3Delta = self.atmosphereTable.o3Delta
o3Plus = np.append(self.o3, self.o3[-1] + self.o3Delta)
self.lnTau = self.atmosphereTable.lnTau
self.lnTauDelta = self.atmosphereTable.lnTauDelta
self.tau = np.exp(self.lnTau)
lnTauPlus = np.append(self.lnTau, self.lnTau[-1] + self.lnTauDelta)
tauPlus = np.exp(lnTauPlus)
self.alpha = self.atmosphereTable.alpha
self.alphaDelta = self.atmosphereTable.alphaDelta
alphaPlus = np.append(self.alpha, self.alpha[-1] + self.alphaDelta)
self.secZenith = self.atmosphereTable.secZenith
self.secZenithDelta = self.atmosphereTable.secZenithDelta
self.zenith = np.arccos(1./self.secZenith)*180./np.pi
secZenithPlus = np.append(self.secZenith, self.secZenith[-1] + self.secZenithDelta)
zenithPlus = np.arccos(1./secZenithPlus)*180./np.pi
# and compute the proper airmass...
self.airmass = self.secZenith - 0.0018167*(self.secZenith-1.0) - 0.002875*(self.secZenith-1.0)**2.0 - 0.0008083*(self.secZenith-1.0)**3.0
airmassPlus = secZenithPlus - 0.0018167*(secZenithPlus-1.0) - 0.002875*(secZenithPlus-1.0)**2.0 - 0.0008083*(secZenithPlus-1.0)**3.0
pwvAtmTable = self.atmosphereTable.pwvAtmTable
o3AtmTable = self.atmosphereTable.o3AtmTable
o2AtmTable = self.atmosphereTable.o2AtmTable
rayleighAtmTable = self.atmosphereTable.rayleighAtmTable
# get the filters over the same lambda ranges...
self.fgcmLog.info("\nInterpolating filters...")
self.throughputs = []
for i in xrange(len(self.filterNames)):
inLam = self.inThroughputs[i]['LAMBDA']
tput = np.zeros(self.atmLambda.size, dtype=[('LAMBDA','f4'),
('THROUGHPUT_AVG','f4'),
('THROUGHPUT_CCD','f4',self.nCCD)])
tput['LAMBDA'][:] = self.atmLambda
for ccdIndex in xrange(self.nCCD):
ifunc = interpolate.interp1d(inLam, self.inThroughputs[i]['THROUGHPUT_CCD'][:,ccdIndex])
tput['THROUGHPUT_CCD'][:,ccdIndex] = np.clip(ifunc(self.atmLambda),
0.0,
1e100)
ifunc = interpolate.interp1d(inLam, self.inThroughputs[i]['THROUGHPUT_AVG'])
tput['THROUGHPUT_AVG'][:] = np.clip(ifunc(self.atmLambda), 0.0, 1e100)
self.throughputs.append(tput)
# and now we can get the standard atmosphere and lambda_b
self.fgcmLog.info("Computing lambdaB")
self.lambdaB = np.zeros(len(self.filterNames))
for i in xrange(len(self.filterNames)):
num = integrate.simps(self.atmLambda * self.throughputs[i]['THROUGHPUT_AVG'] / self.atmLambda, self.atmLambda)
denom = integrate.simps(self.throughputs[i]['THROUGHPUT_AVG'] / self.atmLambda, self.atmLambda)
self.lambdaB[i] = num / denom
self.fgcmLog.info("Filter: %s, lambdaB = %.3f" % (self.filterNames[i], self.lambdaB[i]))
self.fgcmLog.info("Computing lambdaStdFilter")
self.lambdaStdFilter = np.zeros(len(self.filterNames))
for i in xrange(len(self.filterNames)):
num = integrate.simps(self.atmLambda * self.throughputs[i]['THROUGHPUT_AVG'] * self.atmStdTrans / self.atmLambda, self.atmLambda)
denom = integrate.simps(self.throughputs[i]['THROUGHPUT_AVG'] * self.atmStdTrans / self.atmLambda, self.atmLambda)
self.lambdaStdFilter[i] = num / denom
self.fgcmLog.info("Filter: %s, lambdaStdFilter = %.3f" % (self.filterNames[i],self.lambdaStdFilter[i]))
# now compute lambdaStd based on the desired standards...
self.fgcmLog.info("Calculating lambdaStd")
self.lambdaStd = np.zeros(len(self.filterNames))
for i, filterName in enumerate(self.filterNames):
ind = self.filterNames.index(self.stdFilterNames[i])
#ind, = np.where(self.filterNames == self.stdFilterNames[i])
#self.lambdaStd[i] = self.lambdaStdFilter[ind[0]]
self.lambdaStd[i] = self.lambdaStdFilter[ind]
self.fgcmLog.info("Filter: %s (from %s) lambdaStd = %.3f" %
(filterName, self.stdFilterNames[i], self.lambdaStd[i]))
self.fgcmLog.info("Computing I0Std/I1Std")
self.I0Std = np.zeros(len(self.filterNames))
self.I1Std = np.zeros(len(self.filterNames))
for i in xrange(len(self.filterNames)):
self.I0Std[i] = integrate.simps(self.throughputs[i]['THROUGHPUT_AVG'] * self.atmStdTrans / self.atmLambda, self.atmLambda)
self.I1Std[i] = integrate.simps(self.throughputs[i]['THROUGHPUT_AVG'] * self.atmStdTrans * (self.atmLambda - self.lambdaStd[i]) / self.atmLambda, self.atmLambda)
self.I10Std = self.I1Std / self.I0Std
#################################
## Make the I0/I1 LUT
#################################
self.fgcmLog.info("Building look-up table...")
lutPlus = np.zeros((len(self.filterNames),
pwvPlus.size,
o3Plus.size,
tauPlus.size,
alphaPlus.size,
zenithPlus.size,
self.nCCDStep),
dtype=[('I0','f4'),
('I1','f4')])
# pre-compute pmb factors
pmbMolecularScattering = np.exp(-(pmbPlus - self.pmbElevation)/self.pmbElevation)
pmbMolecularAbsorption = pmbMolecularScattering ** 0.6
pmbFactorPlus = pmbMolecularScattering * pmbMolecularAbsorption
self.pmbFactor = pmbFactorPlus[:-1]
# this set of nexted for loops could probably be vectorized in some way
for i in xrange(len(self.filterNames)):
self.fgcmLog.info("Working on filter %s" % (self.filterNames[i]))
for j in xrange(pwvPlus.size):
self.fgcmLog.info(" and on pwv #%d" % (j))
for k in xrange(o3Plus.size):
self.fgcmLog.info(" and on o3 #%d" % (k))
for m in xrange(tauPlus.size):
for n in xrange(alphaPlus.size):
for o in xrange(zenithPlus.size):
aerosolTauLambda = np.exp(-1.0*tauPlus[m]*airmassPlus[o]*(self.atmLambda/self.lambdaNorm)**(-alphaPlus[n]))
for p in xrange(self.nCCDStep):
if (p == self.nCCD):
Sb = self.throughputs[i]['THROUGHPUT_AVG'] * o2AtmTable[o,:] * rayleighAtmTable[o,:] * pwvAtmTable[j,o,:] * o3AtmTable[k,o,:] * aerosolTauLambda
else:
Sb = self.throughputs[i]['THROUGHPUT_CCD'][:,p] * o2AtmTable[o,:] * rayleighAtmTable[o,:] * pwvAtmTable[j,o,:] * o3AtmTable[k,o,:] * aerosolTauLambda
lutPlus['I0'][i,j,k,m,n,o,p] = integrate.simps(Sb / self.atmLambda, self.atmLambda)
lutPlus['I1'][i,j,k,m,n,o,p] = integrate.simps(Sb * (self.atmLambda - self.lambdaStd[i]) / self.atmLambda, self.atmLambda)
# and create the LUT (not plus)
self.lut = np.zeros((len(self.filterNames),
self.pwv.size,
self.o3.size,
self.tau.size,
self.alpha.size,
self.zenith.size,
self.nCCDStep),
dtype=lutPlus.dtype)
temp = np.delete(lutPlus['I0'], self.pwv.size, axis=1)
temp = np.delete(temp, self.o3.size, axis=2)
temp = np.delete(temp, self.tau.size, axis=3)
temp = np.delete(temp, self.alpha.size, axis=4)
temp = np.delete(temp, self.zenith.size, axis=5)
self.lut['I0'] = temp
temp = np.delete(lutPlus['I1'], self.pwv.size, axis=1)
temp = np.delete(temp, self.o3.size, axis=2)
temp = np.delete(temp, self.tau.size, axis=3)
temp = np.delete(temp, self.alpha.size, axis=4)
temp = np.delete(temp, self.zenith.size, axis=5)
self.lut['I1'] = temp
#################################
## Make the I0/I1 derivative LUTs
#################################
# This is *not* done plus-size
self.lutDeriv = np.zeros((len(self.filterNames),
self.pwv.size,
self.o3.size,
self.tau.size,
self.alpha.size,
self.zenith.size,
self.nCCDStep),
dtype=[('D_PMB','f4'),
('D_PWV','f4'),
('D_O3','f4'),
('D_LNTAU','f4'),
('D_ALPHA','f4'),
('D_SECZENITH','f4'),
('D_PMB_I1','f4'),
('D_PWV_I1','f4'),
('D_O3_I1','f4'),
('D_LNTAU_I1','f4'),
('D_ALPHA_I1','f4'),
('D_SECZENITH_I1','f4')])
self.fgcmLog.info("Computing derivatives...")
## FIXME: figure out PMB derivative?
for i in xrange(len(self.filterNames)):
self.fgcmLog.info("Working on filter %s" % (self.filterNames[i]))
for j in xrange(self.pwv.size):
for k in xrange(self.o3.size):
for m in xrange(self.tau.size):
for n in xrange(self.alpha.size):
for o in xrange(self.zenith.size):
for p in xrange(self.nCCDStep):
self.lutDeriv['D_PWV'][i,j,k,m,n,o,p] = (
((lutPlus['I0'][i,j+1,k,m,n,o,p] -
lutPlus['I0'][i,j,k,m,n,o,p]) /
self.pwvDelta)
)
self.lutDeriv['D_PWV_I1'][i,j,k,m,n,o,p] = (
((lutPlus['I1'][i,j+1,k,m,n,o,p] -
lutPlus['I1'][i,j,k,m,n,o,p]) /
self.pwvDelta)
)
self.lutDeriv['D_O3'][i,j,k,m,n,o,p] = (
((lutPlus['I0'][i,j,k+1,m,n,o,p] -
lutPlus['I0'][i,j,k,m,n,o,p]) /
self.o3Delta)
)
self.lutDeriv['D_O3_I1'][i,j,k,m,n,o,p] = (
((lutPlus['I1'][i,j,k+1,m,n,o,p] -
lutPlus['I1'][i,j,k,m,n,o,p]) /
self.o3Delta)
)
self.lutDeriv['D_LNTAU'][i,j,k,m,n,o,p] = (
((lutPlus['I0'][i,j,k,m+1,n,o,p] -
lutPlus['I0'][i,j,k,m,n,o,p]) /
self.lnTauDelta)
)
self.lutDeriv['D_LNTAU_I1'][i,j,k,m,n,o,p] = (
((lutPlus['I1'][i,j,k,m+1,n,o,p] -
lutPlus['I1'][i,j,k,m,n,o,p]) /
self.lnTauDelta)
)
self.lutDeriv['D_ALPHA'][i,j,k,m,n,o,p] = (
((lutPlus['I0'][i,j,k,m,n+1,o,p] -
lutPlus['I0'][i,j,k,m,n,o,p]) /
self.alphaDelta)
)
self.lutDeriv['D_ALPHA_I1'][i,j,k,m,n,o,p] = (
((lutPlus['I1'][i,j,k,m,n+1,o,p] -
lutPlus['I1'][i,j,k,m,n,o,p]) /
self.alphaDelta)
)
self.lutDeriv['D_SECZENITH'][i,j,k,m,n,o,p] = (
((lutPlus['I0'][i,j,k,m,n,o+1,p] -
lutPlus['I0'][i,j,k,m,n,o,p]) /
self.secZenithDelta)
)
self.lutDeriv['D_SECZENITH_I1'][i,j,k,m,n,o,p] = (
((lutPlus['I1'][i,j,k,m,n,o+1,p] -
lutPlus['I1'][i,j,k,m,n,o,p]) /
self.secZenithDelta)
)
if (self.makeSeds):
# and the SED LUT
self.fgcmLog.info("Building SED LUT")
# arbitrary. Configure? Fit? Seems stable...
delta = 600.0
# blah on fits here...
import fitsio
# how many extensions?
fits=fitsio.FITS(self.stellarTemplateFile)
fits.update_hdu_list()
extNames = []
for hdu in fits.hdu_list:
extName = hdu.get_extname()
if ('TEMPLATE_' in extName):
extNames.append(extName)
# set up SED look-up table
nTemplates = len(extNames)
self.sedLUT = np.zeros(nTemplates, dtype=[('TEMPLATE','i4'),
('SYNTHMAG','f4',len(self.filterNames)),
('FPRIME','f4',len(self.filterNames))])
# now do it...looping is no problem since there aren't that many.
for i in xrange(nTemplates):
data = fits[extNames[i]].read()
templateLambda = data['LAMBDA']
templateFLambda = data['FLUX']
templateFnu = templateFLambda * templateLambda * templateLambda
parts=extNames[i].split('_')
self.sedLUT['TEMPLATE'][i] = int(parts[1])
# interpolate to atmLambda
intFunc = interpolate.interp1d(templateLambda, templateFnu)
fnu = np.zeros(self.atmLambda.size)
good,=np.where((self.atmLambda >= templateLambda[0]) &
(self.atmLambda <= templateLambda[-1]))
fnu[good] = intFunc(self.atmLambda[good])
# out of range, let it hit the limit
lo,=np.where(self.atmLambda < templateLambda[0])
if (lo.size > 0):
fnu[lo] = intFunc(self.atmLambda[good[0]])
hi,=np.where(self.atmLambda > templateLambda[-1])
if (hi.size > 0):
fnu[hi] = intFunc(self.atmLambda[good[-1]])
# compute synthetic mags
for j in xrange(len(self.filterNames)):
num = integrate.simps(fnu * self.throughputs[j]['THROUGHPUT_AVG'][:] * self.atmStdTrans / self.atmLambda, self.atmLambda)
denom = integrate.simps(self.throughputs[j]['THROUGHPUT_AVG'][:] * self.atmStdTrans / self.atmLambda, self.atmLambda)
self.sedLUT['SYNTHMAG'][i,j] = -2.5*np.log10(num/denom)
# and compute fprimes
for j in xrange(len(self.filterNames)):
use,=np.where((templateLambda >= (self.lambdaStd[j]-delta)) &
(templateLambda <= (self.lambdaStd[j]+delta)))
fit = np.polyfit(templateLambda[use] - self.lambdaStd[j],
templateFnu[use],
1)
self.sedLUT['FPRIME'][i,j] = fit[0] / fit[1]
fits.close()
def saveLUT(self,lutFile,clobber=False):
"""
"""
import fitsio
if (os.path.isfile(lutFile) and not clobber):
self.fgcmLog.info("lutFile %s already exists, and clobber is False." % (lutFile))
return
self.fgcmLog.info("Saving LUT to %s" % (lutFile))
# first, save the LUT itself
fitsio.write(lutFile,self.lut.flatten(),extname='LUT',clobber=True)
# and now save the indices
maxFilterLen = len(max(self.filterNames, key=len))
indexVals = np.zeros(1,dtype=[('FILTERNAMES', 'a%d' % (maxFilterLen), len(self.filterNames)),
('STDFILTERNAMES', 'a%d' % (maxFilterLen), len(self.stdFilterNames)),
('PMB','f8',self.pmb.size),
('PMBFACTOR','f8',self.pmb.size),
('PMBELEVATION','f8'),
('PWV','f8',self.pwv.size),
('O3','f8',self.o3.size),
('TAU','f8',self.tau.size),
('LAMBDANORM','f8'),
('ALPHA','f8',self.alpha.size),
('ZENITH','f8',self.zenith.size),
('NCCD','i4')])
indexVals['FILTERNAMES'] = self.filterNames
indexVals['STDFILTERNAMES'] = self.stdFilterNames
indexVals['PMB'] = self.pmb
indexVals['PMBFACTOR'] = self.pmbFactor
indexVals['PMBELEVATION'] = self.pmbElevation
indexVals['PWV'] = self.pwv
indexVals['O3'] = self.o3
indexVals['TAU'] = self.tau
indexVals['LAMBDANORM'] = self.lambdaNorm
indexVals['ALPHA'] = self.alpha
indexVals['ZENITH'] = self.zenith
indexVals['NCCD'] = self.nCCD
fitsio.write(lutFile,indexVals,extname='INDEX')
# and the standard values
stdVals = np.zeros(1,dtype=[('PMBSTD','f8'),
('PWVSTD','f8'),
('O3STD','f8'),
('TAUSTD','f8'),
('ALPHASTD','f8'),
('ZENITHSTD','f8'),
('LAMBDARANGE','f8',2),
('LAMBDASTEP','f8'),
('LAMBDASTD','f8',len(self.filterNames)),
('LAMBDASTDFILTER','f8',len(self.filterNames)),
('LAMBDANORM','f8'),
('I0STD','f8',len(self.filterNames)),
('I1STD','f8',len(self.filterNames)),
('I10STD','f8',len(self.filterNames)),
('LAMBDAB','f8',len(self.filterNames)),
('ATMLAMBDA','f8',self.atmLambda.size),
('ATMSTDTRANS','f8',self.atmStdTrans.size)])
stdVals['PMBSTD'] = self.pmbStd
stdVals['PWVSTD'] = self.pwvStd
stdVals['O3STD'] = self.o3Std
stdVals['TAUSTD'] = self.tauStd
stdVals['ALPHASTD'] = self.alphaStd
stdVals['ZENITHSTD'] = self.zenithStd
stdVals['LAMBDARANGE'] = self.lambdaRange
stdVals['LAMBDASTEP'] = self.lambdaStep
stdVals['LAMBDASTD'][:] = self.lambdaStd
stdVals['LAMBDASTDFILTER'][:] = self.lambdaStdFilter
stdVals['LAMBDANORM'][:] = self.lambdaNorm
stdVals['I0STD'][:] = self.I0Std
stdVals['I1STD'][:] = self.I1Std
stdVals['I10STD'][:] = self.I10Std
stdVals['LAMBDAB'][:] = self.lambdaB
stdVals['ATMLAMBDA'][:] = self.atmLambda
stdVals['ATMSTDTRANS'][:] = self.atmStdTrans
fitsio.write(lutFile,stdVals,extname='STD')
# and the derivatives
self.fgcmLog.info("Writing Derivative LUT")
fitsio.write(lutFile,self.lutDeriv.flatten(),extname='DERIV')
# and the SED LUT
if (self.makeSeds):
self.fgcmLog.info("Writing SED LUT")
fitsio.write(lutFile,self.sedLUT,extname='SED')
class FgcmLUT(object):
"""
Class to hold the main throughput look-up table and apply it. If loading from
a fits table, initialize with initFromFits(lutFile).
parameters
----------
indexVals: numpy recarray
With LUT index values
lutFlat: numpy recarray
Flattened I0/I1 arrays
lutDerivFlat: numpy recarray
Flattened I0/I1 derivative arrays
stdVals: numpy recarray
Standard atmosphere and associated values
sedLUT: bool, default=False
Use SED look-up table instead of colors (experimental).
filterToBand: dict, optional
Dictionary to map filterNames to bands if not unique
"""
def __init__(self, indexVals, lutFlat, lutDerivFlat, stdVals, sedLUT=None, filterToBand=None):
#self.filterNames = indexVals['FILTERNAMES'][0]
#self.stdFilterNames = indexVals['STDFILTERNAMES'][0]
self.filterNames = [n.decode('utf-8') for n in indexVals['FILTERNAMES'][0]]
self.stdFilterNames = [n.decode('utf-8') for n in indexVals['STDFILTERNAMES'][0]]
self.pmb = indexVals['PMB'][0]
self.pmbFactor = indexVals['PMBFACTOR'][0]
self.pmbDelta = self.pmb[1] - self.pmb[0]
self.pmbElevation = indexVals['PMBELEVATION'][0]
self.lambdaNorm = indexVals['LAMBDANORM'][0]
self.pwv = indexVals['PWV'][0]
self.pwvDelta = self.pwv[1] - self.pwv[0]
self.o3 = indexVals['O3'][0]
self.o3Delta = self.o3[1] - self.o3[0]
self.tau = indexVals['TAU'][0]
self.lnTau = np.log(self.tau)
self.lnTauDelta = self.lnTau[1] - self.lnTau[0]
self.alpha = indexVals['ALPHA'][0]
self.alphaDelta = self.alpha[1] - self.alpha[0]
self.zenith = indexVals['ZENITH'][0]
self.secZenith = 1./np.cos(self.zenith*np.pi/180.)
self.secZenithDelta = self.secZenith[1] - self.secZenith[0]
self.nCCD = indexVals['NCCD'][0]
self.nCCDStep = self.nCCD+1
# make shared memory arrays for LUTs
sizeTuple = (len(self.filterNames),self.pwv.size,self.o3.size,
self.tau.size,self.alpha.size,self.zenith.size,self.nCCDStep)
self.lutI0Handle = snmm.createArray(sizeTuple,dtype='f4')
snmm.getArray(self.lutI0Handle)[:,:,:,:,:,:,:] = lutFlat['I0'].reshape(sizeTuple)
self.lutI1Handle = snmm.createArray(sizeTuple,dtype='f4')
snmm.getArray(self.lutI1Handle)[:,:,:,:,:,:,:] = lutFlat['I1'].reshape(sizeTuple)
# and read in the derivatives
# create shared memory
self.lutDPWVHandle = snmm.createArray(sizeTuple,dtype='f4')
self.lutDO3Handle = snmm.createArray(sizeTuple,dtype='f4')
self.lutDLnTauHandle = snmm.createArray(sizeTuple,dtype='f4')
self.lutDAlphaHandle = snmm.createArray(sizeTuple,dtype='f4')
self.lutDSecZenithHandle = snmm.createArray(sizeTuple,dtype='f4')
self.lutDPWVI1Handle = snmm.createArray(sizeTuple,dtype='f4')
self.lutDO3I1Handle = snmm.createArray(sizeTuple,dtype='f4')
self.lutDLnTauI1Handle = snmm.createArray(sizeTuple,dtype='f4')
self.lutDAlphaI1Handle = snmm.createArray(sizeTuple,dtype='f4')
self.lutDSecZenithI1Handle = snmm.createArray(sizeTuple,dtype='f4')
snmm.getArray(self.lutDPWVHandle)[:,:,:,:,:,:,:] = lutDerivFlat['D_PWV'].reshape(sizeTuple)
snmm.getArray(self.lutDO3Handle)[:,:,:,:,:,:,:] = lutDerivFlat['D_O3'].reshape(sizeTuple)
snmm.getArray(self.lutDLnTauHandle)[:,:,:,:,:,:,:] = lutDerivFlat['D_LNTAU'].reshape(sizeTuple)
snmm.getArray(self.lutDAlphaHandle)[:,:,:,:,:,:,:] = lutDerivFlat['D_ALPHA'].reshape(sizeTuple)
snmm.getArray(self.lutDSecZenithHandle)[:,:,:,:,:,:,:] = lutDerivFlat['D_SECZENITH'].reshape(sizeTuple)
self.hasI1Derivatives = False
try:
snmm.getArray(self.lutDPWVI1Handle)[:,:,:,:,:,:,:] = lutDerivFlat['D_PWV_I1'].reshape(sizeTuple)
snmm.getArray(self.lutDO3I1Handle)[:,:,:,:,:,:,:] = lutDerivFlat['D_O3_I1'].reshape(sizeTuple)
snmm.getArray(self.lutDLnTauI1Handle)[:,:,:,:,:,:,:] = lutDerivFlat['D_LNTAU_I1'].reshape(sizeTuple)
snmm.getArray(self.lutDAlphaI1Handle)[:,:,:,:,:,:,:] = lutDerivFlat['D_ALPHA_I1'].reshape(sizeTuple)
snmm.getArray(self.lutDSecZenithI1Handle)[:,:,:,:,:,:,:] = lutDerivFlat['D_SECZENITH_I1'].reshape(sizeTuple)
self.hasI1Derivatives = True
except:
# just fill with zeros
pass
#print("No I1 derivative information")
# get the standard values
self.pmbStd = stdVals['PMBSTD'][0]
self.pwvStd = stdVals['PWVSTD'][0]
self.o3Std = stdVals['O3STD'][0]
self.tauStd = stdVals['TAUSTD'][0]
self.lnTauStd = np.log(self.tauStd)
self.alphaStd = stdVals['ALPHASTD'][0]
self.zenithStd = stdVals['ZENITHSTD'][0]
self.secZenithStd = 1./np.cos(np.radians(self.zenithStd))
self.lambdaRange = stdVals['LAMBDARANGE'][0]
self.lambdaStep = stdVals['LAMBDASTEP'][0]
self.lambdaStd = stdVals['LAMBDASTD'][0]
self.lambdaStdFilter = stdVals['LAMBDASTDFILTER'][0]
self.I0Std = stdVals['I0STD'][0]
self.I1Std = stdVals['I1STD'][0]
self.I10Std = stdVals['I10STD'][0]
self.lambdaB = stdVals['LAMBDAB'][0]
self.atmLambda = stdVals['ATMLAMBDA'][0]
self.atmStdTrans = stdVals['ATMSTDTRANS'][0]
self.magConstant = 2.5/np.log(10)
if (filterToBand is None):
# just set up a 1-1 mapping
self.filterToBand = {}
for filterName in self.filterNames:
self.filterToBand[filterName] = filterName
else:
self.filterToBand = filterToBand
# finally, read in the sedLUT
## this is experimental
self.hasSedLUT = False
if (sedLUT is not None):
self.sedLUT = sedLUT
self.hasSedLUT = True
## FIXME: make general
self.sedColor = self.sedLUT['SYNTHMAG'][:,0] - self.sedLUT['SYNTHMAG'][:,2]
st = np.argsort(self.sedColor)
self.sedColor = self.sedColor[st]
self.sedLUT = self.sedLUT[st]
@classmethod
def initFromFits(cls, lutFile, filterToBand=None):
"""
Initials FgcmLUT using fits file.
parameters
----------
lutFile: string
Name of the LUT file
"""
import fitsio
lutFlat = fitsio.read(lutFile, ext='LUT')
lutDerivFlat = fitsio.read(lutFile, ext='DERIV')
indexVals = fitsio.read(lutFile, ext='INDEX')
stdVals = fitsio.read(lutFile, ext='STD')
try:
sedLUT = fitsio.read(lutFile, ext='SED')
except:
sedLUT = None
return cls(indexVals, lutFlat, lutDerivFlat, stdVals,
sedLUT=sedLUT, filterToBand=filterToBand)
def getIndices(self, filterIndex, pwv, o3, lnTau, alpha, secZenith, ccdIndex, pmb):
"""
Compute indices in the look-up table. These are in regular (non-normalized) units.
parameters
----------
filterIndex: int array
Array with values pointing to the filterName index
pwv: float array
o3: float array
lnTau: float array
alpha: float array
secZenith: float array
ccdIndex: int array
Array with values point to the ccd index
pmb: float array
"""
return (filterIndex,
np.clip(((pwv - self.pwv[0])/self.pwvDelta).astype(np.int32), 0,
self.pwv.size-1),
np.clip(((o3 - self.o3[0])/self.o3Delta).astype(np.int32), 0,
self.o3.size-1),
np.clip(((lnTau - self.lnTau[0])/self.lnTauDelta).astype(np.int32), 0,
self.lnTau.size-1),
np.clip(((alpha - self.alpha[0])/self.alphaDelta).astype(np.int32), 0,
self.alpha.size-1),
np.clip(((secZenith - self.secZenith[0])/self.secZenithDelta).astype(np.int32), 0,
self.secZenith.size-1),
ccdIndex,
(np.exp(-(pmb - self.pmbElevation)/self.pmbElevation)) ** 1.6)
def computeI0(self, pwv, o3, lnTau, alpha, secZenith, pmb, indices):
"""
Compute I0 from the look-up table.
parameters
----------
pwv: float array
o3: float array
lnTau: float array
alpha: float array
secZenith: float array
pmb: float array
indices: tuple, from getIndices()
"""
# do a simple linear interpolation
dPWV = pwv - (self.pwv[0] + indices[1] * self.pwvDelta)
dO3 = o3 - (self.o3[0] + indices[2] * self.o3Delta)
dlnTau = lnTau - (self.lnTau[0] + indices[3] * self.lnTauDelta)
dAlpha = alpha - (self.alpha[0] + indices[4] * self.alphaDelta)
dSecZenith = secZenith - (self.secZenith[0] + indices[5] * self.secZenithDelta)
indicesSecZenithPlus = np.array(indices[:-1])
indicesSecZenithPlus[5] += 1
indicesPWVPlus = np.array(indices[:-1])
indicesPWVPlus[1] = np.clip(indicesPWVPlus[1] + 1, 0, self.pwv.size-1)
# also include cross-terms for tau and pwv
# and a second-derivative term for pwv
# note that indices[-1] is the PMB vactor
return indices[-1]*(snmm.getArray(self.lutI0Handle)[indices[:-1]] +
dPWV * snmm.getArray(self.lutDPWVHandle)[indices[:-1]] +
dO3 * snmm.getArray(self.lutDO3Handle)[indices[:-1]] +
dlnTau * snmm.getArray(self.lutDLnTauHandle)[indices[:-1]] +
dAlpha * snmm.getArray(self.lutDAlphaHandle)[indices[:-1]] +
dSecZenith * snmm.getArray(self.lutDSecZenithHandle)[indices[:-1]] +
dlnTau * dSecZenith * (snmm.getArray(self.lutDLnTauHandle)[tuple(indicesSecZenithPlus)] -
snmm.getArray(self.lutDLnTauHandle)[indices[:-1]])/self.secZenithDelta +
dPWV * dSecZenith * (snmm.getArray(self.lutDPWVHandle)[tuple(indicesSecZenithPlus)] -
snmm.getArray(self.lutDPWVHandle)[indices[:-1]])/self.secZenithDelta +
dPWV * (dPWV - self.pwvDelta) * (snmm.getArray(self.lutDPWVHandle)[tuple(indicesPWVPlus)] -
snmm.getArray(self.lutDPWVHandle)[indices[:-1]]))
def computeI1(self, pwv, o3, lnTau, alpha, secZenith, pmb, indices):
"""
Compute I1 from the look-up table.
parameters
----------
pwv: float array
o3: float array
lnTau: float array
alpha: float array
secZenith: float array
pmb: float array
indices: tuple, from getIndices()
"""
# do a simple linear interpolation
dPWV = pwv - (self.pwv[0] + indices[1] * self.pwvDelta)
dO3 = o3 - (self.o3[0] + indices[2] * self.o3Delta)
dlnTau = lnTau - (self.lnTau[0] + indices[3] * self.lnTauDelta)
dAlpha = alpha - (self.alpha[0] + indices[4] * self.alphaDelta)
dSecZenith = secZenith - (self.secZenith[0] + indices[5] * self.secZenithDelta)
indicesSecZenithPlus = np.array(indices[:-1])
indicesSecZenithPlus[5] += 1
indicesPWVPlus = np.array(indices[:-1])
indicesPWVPlus[1] = np.clip(indicesPWVPlus[1] + 1, 0, self.pwv.size-1)
# also include a cross-term for tau
# note that indices[-1] is the PMB vactor
return indices[-1]*(snmm.getArray(self.lutI1Handle)[indices[:-1]] +
dPWV * snmm.getArray(self.lutDPWVI1Handle)[indices[:-1]] +
dO3 * snmm.getArray(self.lutDO3I1Handle)[indices[:-1]] +
dlnTau * snmm.getArray(self.lutDLnTauI1Handle)[indices[:-1]] +
dAlpha * snmm.getArray(self.lutDAlphaI1Handle)[indices[:-1]] +
dSecZenith * snmm.getArray(self.lutDSecZenithI1Handle)[indices[:-1]] +
dlnTau * dSecZenith * (snmm.getArray(self.lutDLnTauI1Handle)[tuple(indicesSecZenithPlus)] -
snmm.getArray(self.lutDLnTauI1Handle)[indices[:-1]])/self.secZenithDelta +
dPWV * dSecZenith * (snmm.getArray(self.lutDPWVI1Handle)[tuple(indicesSecZenithPlus)] -
snmm.getArray(self.lutDPWVI1Handle)[indices[:-1]])/self.secZenithDelta +
dPWV * (dPWV - self.pwvDelta) * (snmm.getArray(self.lutDPWVI1Handle)[tuple(indicesPWVPlus)] -
snmm.getArray(self.lutDPWVI1Handle)[indices[:-1]]))
def computeI1Old(self, indices):
"""
Unused
"""
return indices[-1] * snmm.getArray(self.lutI1Handle)[indices[:-1]]
def computeLogDerivatives(self, indices, I0):
"""
Compute log derivatives. Used in FgcmChisq.
parameters
----------
indices: tuple, from getIndices()
I0: float array, from computeI0()
"""
# dL(i,j|p) = d/dp(2.5*log10(LUT(i,j|p)))
# = 1.086*(LUT'(i,j|p)/LUT(i,j|p))
return (self.magConstant*snmm.getArray(self.lutDPWVHandle)[indices[:-1]] / I0,
self.magConstant*snmm.getArray(self.lutDO3Handle)[indices[:-1]] / I0,
self.magConstant*snmm.getArray(self.lutDLnTauHandle)[indices[:-1]] / (I0),
self.magConstant*snmm.getArray(self.lutDAlphaHandle)[indices[:-1]] / I0)
def computeLogDerivativesI1(self, indices, I0, I10, sedSlope):
"""
Compute log derivatives for I1. Used in FgcmChisq.
parameters
----------
indices: tuple, from getIndices()
I0: float array, from computeI0()
I10: float array, from computeI1()/computeI0()
sedSlope: float array, fnuprime
"""
# dL(i,j|p) += d/dp(2.5*log10((1+F'*I10^obs) / (1+F'*I10^std)))
# the std part cancels...
# = 1.086*(F'/(1+F'*I10)*((I0*LUT1' - I1*LUT0')/(I0^2))
preFactor = (self.magConstant * (sedSlope / (1 + sedSlope*I10))) / I0**2.
return (preFactor * (I0 * snmm.getArray(self.lutDPWVHandle)[indices[:-1]] -
I10 * I0 * snmm.getArray(self.lutDPWVI1Handle)[indices[:-1]]),
preFactor * (I0 * snmm.getArray(self.lutDO3Handle)[indices[:-1]] -
I10 * I0 * snmm.getArray(self.lutDO3I1Handle)[indices[:-1]]),
preFactor * (I0 * snmm.getArray(self.lutDLnTauHandle)[indices[:-1]] -
I10 * I0 * snmm.getArray(self.lutDLnTauI1Handle)[indices[:-1]]),
preFactor * (I0 * snmm.getArray(self.lutDAlphaHandle)[indices[:-1]] -
I10 * I0 * snmm.getArray(self.lutDAlphaI1Handle)[indices[:-1]]))
def computeSEDSlopes(self, objectSedColor):
"""
Compute SED slopes using the SED look-up table. Experimental.
parameters
----------
objectSedColor: float array
Color used for SED look-up (typically g-i)
"""
indices = np.clip(np.searchsorted(self.sedColor, objectSedColor),0,self.sedColor.size-2)
# right now, a straight matching to the nearest sedColor (g-i)
# though I worry about this.
# in fact, maybe the noise will make this not work? Or this is real?
# but the noise in g-r is going to cause things to bounce around. Pout.
return self.sedLUT['FPRIME'][indices,:]
def computeStepUnits(self, stepUnitReference, stepGrain, meanNightDuration,
meanWashIntervalDuration, fitBands, bands, nCampaignNights):
"""
Compute normalization factors for fit step units. Note that this might need
to be tweaked.
parameters
----------
stepUnitReference: float
How much should a typical step move things? 0.001 mag is default.
stepGrain: float
Additional fudge factor to apply to all steps.
meanNightDuration: float
Mean duration of a night (days).
meanWashIntervalDuration: float
Mean duration between washes (days).
fitBands: string array
Which bands are used for the fit?
bands: string array
What are all the bands?
nCampaignNights: int
Total number of nights in observing campaign to be calibrated.
"""
unitDict = {}
# bigger unit, smaller step
# compute tau units
deltaMagLnTau = (2.5*np.log10(np.exp(-self.secZenithStd*np.exp(self.lnTauStd))) -
2.5*np.log10(np.exp(-self.secZenithStd*np.exp(self.lnTauStd+1.0))))
unitDict['lnTauUnit'] = np.abs(deltaMagLnTau) / stepUnitReference / stepGrain
unitDict['lnTauUnit'] /= 5.0
# FIXME?
unitDict['lnTauSlopeUnit'] = unitDict['lnTauUnit'] * meanNightDuration
# look for first use of 'g' or 'r' band in filterToBand...
# this is the reference filter for tau/alpha
alphaFilterIndex = -1
for i,filterName in enumerate(self.filterNames):
if (self.filterToBand[filterName] == 'g' or
self.filterToBand[filterName] == 'r'):
alphaFilterIndex = i
break
if alphaFilterIndex == -1:
# We don't have anything here...
# Just set this to 1.0, since it's not sensitive?
unitDict['alphaUnit'] = 1.0 / stepUnitReference / stepGrain
else:
deltaMagAlpha = (2.5*np.log10(np.exp(-self.secZenithStd*self.tauStd*(self.lambdaStd[alphaFilterIndex]/self.lambdaNorm)**self.alphaStd)) -
2.5*np.log10(np.exp(-self.secZenithStd*self.tauStd*(self.lambdaStd[alphaFilterIndex]/self.lambdaNorm)**(self.alphaStd+1.0))))
unitDict['alphaUnit'] = np.abs(deltaMagAlpha) / stepUnitReference / stepGrain
# and scale these by fraction of bands affected...
alphaNAffectedBands = 0
for filterName in self.filterNames:
if ((self.filterToBand[filterName] == 'u' and
'u' in fitBands) or
(self.filterToBand[filterName] == 'g' and
'g' in fitBands) or
(self.filterToBand[filterName] == 'r' and
'r' in fitBands)):
alphaNAffectedBands += 1
unitDict['alphaUnit'] *= float(alphaNAffectedBands) / float(len(fitBands))
# pwv units -- reference to z or y or Y
pwvFilterIndex = -1
for i,filterName in enumerate(self.filterNames):
if (self.filterToBand[filterName] == 'z' or
self.filterToBand[filterName] == 'y' or
self.filterToBand[filterName] == 'Y'):
pwvFilterIndex = i
break
if pwvFilterIndex == -1:
unitDict['pwvUnit'] = 1.0 / stepUnitReference / stepGrain
else:
indicesStd = self.getIndices(pwvFilterIndex,self.pwvStd,self.o3Std,np.log(self.tauStd),self.alphaStd,self.secZenithStd,self.nCCD,self.pmbStd)
i0Std = self.computeI0(self.pwvStd,self.o3Std,np.log(self.tauStd),self.alphaStd,self.secZenithStd,self.pmbStd,indicesStd)
indicesPlus = self.getIndices(pwvFilterIndex,self.pwvStd+1.0,self.o3Std,
|
np.log(self.tauStd)
|
numpy.log
|
"""Tests for chebyshev module.
"""
from functools import reduce
import numpy as np
import numpy.polynomial.chebyshev as cheb
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
def trim(x):
return cheb.chebtrim(x, tol=1e-6)
T0 = [1]
T1 = [0, 1]
T2 = [-1, 0, 2]
T3 = [0, -3, 0, 4]
T4 = [1, 0, -8, 0, 8]
T5 = [0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [0, -7, 0, 56, 0, -112, 0, 64]
T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestPrivate:
def test__cseries_to_zseries(self):
for i in range(5):
inp = np.array([2] + [1]*i, np.double)
tgt = np.array([.5]*i + [2] + [.5]*i, np.double)
res = cheb._cseries_to_zseries(inp)
assert_equal(res, tgt)
def test__zseries_to_cseries(self):
for i in range(5):
inp = np.array([.5]*i + [2] + [.5]*i, np.double)
tgt = np.array([2] + [1]*i, np.double)
res = cheb._zseries_to_cseries(inp)
assert_equal(res, tgt)
class TestConstants:
def test_chebdomain(self):
assert_equal(cheb.chebdomain, [-1, 1])
def test_chebzero(self):
assert_equal(cheb.chebzero, [0])
def test_chebone(self):
assert_equal(cheb.chebone, [1])
def test_chebx(self):
assert_equal(cheb.chebx, [0, 1])
class TestArithmetic:
def test_chebadd(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = cheb.chebadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebsub(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = cheb.chebsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebmulx(self):
assert_equal(cheb.chebmulx([0]), [0])
assert_equal(cheb.chebmulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [.5, 0, .5]
assert_equal(cheb.chebmulx(ser), tgt)
def test_chebmul(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(i + j + 1)
tgt[i + j] += .5
tgt[abs(i - j)] += .5
res = cheb.chebmul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebdiv(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = cheb.chebadd(ci, cj)
quo, rem = cheb.chebdiv(tgt, ci)
res = cheb.chebadd(cheb.chebmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebpow(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(cheb.chebmul, [c]*j, np.array([1]))
res = cheb.chebpow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 2., 1.5])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_chebval(self):
#check empty input
assert_equal(cheb.chebval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Tlist]
for i in range(10):
msg = f"At i={i}"
tgt = y[i]
res = cheb.chebval(x, [0]*i + [1])
|
assert_almost_equal(res, tgt, err_msg=msg)
|
numpy.testing.assert_almost_equal
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Calculate the totality of the eclipse.
#
import ephem
from datetime import datetime
import numpy as n
import matplotlib.pyplot as plt
#Hack to fix missing PROJ4 env var
import os
import conda
conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib
from mpl_toolkits.basemap import Basemap
#%% numerically approximate eclipse fraction
def intersection(r0, r1, d, n_s=100):
A1 = n.zeros([n_s, n_s])
A2 = n.zeros([n_s, n_s])
I = n.zeros([n_s, n_s])
x = n.linspace(-2.0 * r0, 2.0 * r0, num=n_s)
y = n.linspace(-2.0 * r0, 2.0 * r0, num=n_s)
xx, yy = n.meshgrid(x, y)
A1[
|
n.sqrt((xx + d)**2.0 + yy**2.0)
|
numpy.sqrt
|
'''
Here we are implementing TD-Actor Critic Algorithm, using to Q as critic.
Parameter to tweek: Environment, n_episodes, alpha1, alpha2 , lambda, gamma, thershold for
MountainCar environment, Save variable to save the plots, weights, rewards(score)
and video, and actor_info and critic_info which are layer info
(at most you can use 2 layers) - "1_8" for single layer, "2_16_16" for two layers
'''
import argparse
import gym
import matplotlib.pyplot as plt
import numpy as np
from TDac import Actor, Critic
import pandas as pd
import os
# from google.colab import files
###Caluculation of mean and standard deviation and plot
def plot(episode_rewards, policy, label, alpha, gamma, plot_path):
plt.figure()
plt.suptitle(policy)
plt.title(environment+r"$\alpha $ = "+str(alpha)+r", $\gamma$ = "+str(gamma))
plt.plot(range(len(episode_rewards)),episode_rewards, '.-',label=label)
plt.xlabel('Number of Episodes')
plt.ylabel('Rewards')
plt.legend()
plt.savefig(plot_path+"/" + policy +"Reward.png")
plt.figure()
plt.suptitle(policy)
z1=pd.Series(episode_rewards).rolling(50).mean()
plt.title(environment+r"$\alpha $ = "+str(alpha)+r", $\gamma$ = "+str(gamma)+ ", Best average reward: "+ str(np.max(z1)))
plt.plot(z1,label=label)
plt.xlabel('Number of Episodes')
plt.ylabel('Average Rewards over previous 50 episodes')
plt.legend()
plt.savefig(plot_path+"/" + policy +"cumulative.png")
# plt.show()
def policy_sampling(env,agent,label, alpha, gamma, plot_path,ep=1000):
score_history = []
n_episodes = ep
for i in range(n_episodes):
done = False
score = 0
observation = env.reset()
while not done:
action = agent.choose_action(observation)
observation_,reward, done, info = env.step(action)
observation = observation_
score += reward
score_history.append(score)
print('episode ', i,'score %.1f' % score,
'average_score %.1f' % np.mean(score_history[-50:]))
plot(score_history, "Sampling_Policy",label, alpha, gamma, plot_path)
return [np.mean(score_history), np.std(score_history)]
def policy_max(env,agent,label, alpha, gamma, plot_path,ep=1000):
score_history = []
n_episodes = ep
for i in range(n_episodes):
done = False
score = 0
observation = env.reset()
while not done:
#Get the probability of performing actions
action_prob = agent.policy.predict(observation[np.newaxis, :])
#Get the location(action number) by finding the max position
action = np.argmax(action_prob)
observation_,reward, done, info = env.step(action)
observation = observation_
score += reward
score_history.append(score)
print('episode ', i,'score %.1f' % score,
'average_score %.1f' % np.mean(score_history[-50:]))
plot(score_history,"Max_Policy",label, alpha, gamma, plot_path)
return [np.mean(score_history),
|
np.std(score_history)
|
numpy.std
|
import os
import scipy
import scipy.misc
import torch
import numpy as np
from carla.agent import Agent
from carla.carla_server_pb2 import Control
from agents.imitation.modules.carla_net import CarlaNet
class ImitationLearning(Agent):
def __init__(self, city_name,
avoid_stopping=True,
model_path="model/policy.pth",
visualize=False,
log_name="test_log",
image_cut=[115, 510]):
super(ImitationLearning, self).__init__()
# Agent.__init__(self)
self._image_size = (88, 200, 3)
self._avoid_stopping = avoid_stopping
dir_path = os.path.dirname(__file__)
self._models_path = os.path.join(dir_path, model_path)
self.model = CarlaNet()
if torch.cuda.is_available():
self.model.cuda()
self.load_model()
self.model.eval()
self._image_cut = image_cut
def load_model(self):
if not os.path.exists(self._models_path):
raise RuntimeError('failed to find the models path: %s'
% self._models_path)
checkpoint = torch.load(self._models_path, map_location='cuda:0')
self.model.load_state_dict(checkpoint['state_dict'])
def run_step(self, measurements, sensor_data, directions, target):
control = self._compute_action(
sensor_data['CameraRGB'].data,
measurements.player_measurements.forward_speed,
directions)
return control
def _compute_action(self, rgb_image, speed, direction=None):
rgb_image = rgb_image[self._image_cut[0]:self._image_cut[1], :]
image_input = scipy.misc.imresize(rgb_image, [self._image_size[0],
self._image_size[1]])
image_input = image_input.astype(np.float32)
image_input = np.expand_dims(
np.transpose(image_input, (2, 0, 1)),
axis=0)
image_input =
|
np.multiply(image_input, 1.0 / 255.0)
|
numpy.multiply
|
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.dataloading import DataLoaderBase
class MockBatchGenerator(DataLoaderBase):
def generate_train_batch(self):
#Sample randomly from data
idx = np.random.choice(self._data[0].shape[0], self.BATCH_SIZE, False, None)
# copy data to ensure that we are not modifying the original dataset with subsequeng augmentation techniques!
x = np.array(self._data[0][idx])
y = np.array(self._data[1][idx])
data_dict = {"data": x,
"seg": y}
return data_dict
class MockRepeatBatchGenerator(DataLoaderBase):
def generate_train_batch(self):
# copy data to ensure that we are not modifying the original dataset with subsequeng augmentation techniques!
x = np.repeat(self._data[0], repeats=self.BATCH_SIZE, axis=0)
y = np.repeat(self._data[1], repeats=self.BATCH_SIZE, axis=0)
data_dict = {"data": x,
"seg": y}
return data_dict
class DummyGenerator(DataLoaderBase):
def __init__(self, dataset_size, batch_size, fill_data='random', fill_seg='ones'):
if fill_data == "random":
data = np.random.random(dataset_size)
else:
raise NotImplementedError
if fill_seg == "ones":
seg = np.ones(dataset_size)
else:
raise NotImplementedError
super(DummyGenerator, self).__init__((data, seg), batch_size, None, False)
def generate_train_batch(self):
idx =
|
np.random.choice(self._data[0].shape[0])
|
numpy.random.choice
|
import gym
import copy
import math
import random
import numpy as np
from HAL_9000.activation_functions import Sigmoid, ReLU, LeakyReLU, TanH, ELU, Softmax
class Layer(object):
def set_input_shape(self, shape):
self.input_shape = shape
def layer_name(self):
return self.__class__.__name__
def n_parameters(self):
return 0
def forward_propagation(self, X, training):
raise NotImplementedError()
def backward_propagation(self, grad):
raise NotImplementedError()
def output_shape(self):
raise NotImplementedError()
class Dense(Layer):
def __init__(self, n_units, input_shape=None):
self.n_units = n_units
self.input_shape = input_shape
self.input = None
self.trainable = True
self.w = None
self.b = None
def init_parameters(self, opt):
i = 1 / math.sqrt(self.input_shape[0])
self.w = np.random.uniform(-i, i, (self.input_shape[0], self.n_units))
self.b = np.zeros((1, self.n_units))
self.w_opt = copy.copy(opt)
self.b_opt = copy.copy(opt)
def n_parameters(self):
return np.prod(np.shape(self.w) + np.shape(self.b))
def forward_propagation(self, X, training=True):
self.layer_input = X
a = X.dot(self.w) + self.b
return a
def backward_propagation(self, grad):
w = self.w
if self.trainable:
grad_w = self.layer_input.T.dot(grad)
grad_b = np.sum(grad, axis=0, keepdims=True)
self.w = self.w_opt.update(self.w, grad_w)
self.b = self.b_opt.update(self.b, grad_b)
grad = grad.dot(w.T)
return grad
def output_shape(self):
return (self.n_units, )
activ_fns = {
'relu': ReLU,
'sigmoid': Sigmoid,
'elu': ELU,
'softmax': Softmax,
'leaky_relu': LeakyReLU,
'tanh': TanH,
}
class Activation(Layer):
def __init__(self, name):
self.activ_name = name
self.activ_fn = activ_fns[name]()
self.trainable = True
def layer_name(self):
return "Activation (%s)" % (self.activ_fn.__class__.__name__)
def forward_propagation(self, X, training=True):
self.layer_input = X
return self.activ_fn(X)
def backward_propagation(self, grad):
return grad * self.activ_fn.gradient(self.layer_input)
def output_shape(self):
return self.input_shape
class Conv2D(Layer):
def __init__(self, n_filters, filter_shape, input_shape=None, padding="same shape", stride=1):
self.n_filters = n_filters
self.filter_shape = filter_shape
self.input_shape = input_shape
self.padding = padding
self.stride = stride
self.trainable = True
def init_parameters(self, opt):
fh, fw = self.filter_shape
c = self.input_shape[0]
i = 1 / math.sqrt(np.prod(self.filter_shape))
self.w = np.random.uniform(-i, i, size=(self.n_filters, c, fh, fw))
self.b = np.zeros((self.n_filters, 1))
self.w_opt = copy.copy(opt)
self.b_opt = copy.copy(opt)
def n_parameters(self):
return np.prod(self.w.shape) + np.prod(self.b.shape)
def forward_propagation(self, X, training=True):
bs, c, h, w = X.shape
self.layer_input = X
self.X_lat = img_2_lat(X, self.filter_shape,
stride=self.stride, output_shape=self.padding)
self.w_lat = self.w.reshape((self.n_filters, -1))
a = self.w_lat.dot(self.X_lat) + self.b
a = a.reshape(self.output_shape() + (bs, ))
return a.transpose(3, 0, 1, 2)
def backward_propagation(self, grad):
grad = grad.transpose(1, 2, 3, 0).reshape(self.n_filters, -1)
if self.trainable:
grad_w = grad.dot(self.X_lat.T).reshape(self.w.shape)
grad_b = np.sum(grad, axis=1, keepdims=True)
self.w = self.w_opt.update(self.w, grad_w)
self.b = self.b_opt.update(self.b, grad_b)
grad = self.w_lat.T.dot(grad)
grad = lat_2_img(grad, self.layer_input.shape, self.filter_shape,
stride=self.stride, output_shape=self.padding)
return grad
def output_shape(self):
c, h, w = self.input_shape
fh, fw = self.filter_shape
ph, pw = get_pads(self.filter_shape, output_shape=self.padding)
oh = (h + np.sum(ph) - fh) / self.stride + 1
ow = (w + np.sum(pw) - fw) / self.stride + 1
return self.n_filters, int(oh), int(ow)
class SlowConv2D(Layer):
def __init__(self, n_filters, filter_shape, input_shape=None, pad=0, stride=1):
self.n_filters = n_filters
self.filter_shape = filter_shape
self.input_shape = input_shape
self.pad = pad
self.stride = stride
self.trainable = True
def init_parameters(self, opt):
fh, fw = self.filter_shape
c = self.input_shape[0]
i = 1 / math.sqrt(np.prod(self.filter_shape))
self.w = np.random.uniform(-i, i, size=(self.n_filters, c, fh, fw))
self.b = np.zeros((self.n_filters))
self.w_opt = copy.copy(opt)
self.b_opt = copy.copy(opt)
def n_parameters(self):
return np.prod(self.w.shape) + np.prod(self.b.shape)
def forward_propagation(self, X, training=True):
self.layer_input = X
N, C, H, W = X.shape
_, _, FH, FW = self.w.shape
X_pad = np.pad(X, [(0,), (0,), (self.pad,), (self.pad,)])
F, H_out, W_out = self.output_shape()
output = np.zeros((N, F, H_out, W_out))
for n in range(N):
for f in range(F):
for h_out in range(H_out):
for w_out in range(W_out):
height, width = h_out * self.stride, w_out * self.stride
output[n, f, h_out, w_out] = np.sum(
X_pad[n, :, height:height+FH, width:width+FW] * self.w[f, :]) + self.b[f]
return output
def backward_propagation(self, grad):
if self.trainable:
grad_w = np.zeros_like(self.w)
grad_b = np.sum(grad, axis=(0, 2, 3))
self.b = self.b_opt.update(self.b, grad_b)
X = self.layer_input
N, C, H, W = X.shape
_, _, FH, FW = self.w.shape
F, H_out, W_out = self.output_shape()
X_pad = np.pad(X, [(0,), (0,), (self.pad,), (self.pad,)])
output = np.zeros_like(X)
grad_xpad = np.zeros_like(X_pad)
for n in range(N):
for f in range(F):
for h_out in range(H_out):
for w_out in range(W_out):
height, width = h_out * self.stride, w_out * self.stride
if self.trainable:
grad_w[f, :] += X_pad[n, :, height:height+FH,
width:width+FW] * grad[n, f, h_out, w_out]
grad_xpad[n, :, height:height+FH, width:width +
FW] += grad[n, f, h_out, w_out] * self.w[f, :]
if self.trainable:
self.w = self.w_opt.update(self.w, grad_w)
output = grad_xpad[:, :, self.pad:self.pad+H, self.pad:self.pad+W]
return output
def output_shape(self):
c, h, w = self.input_shape
fh, fw = self.filter_shape
oh = (h + (2*self.pad) - fh) / self.stride + 1
ow = (w + (2*self.pad) - fw) / self.stride + 1
return self.n_filters, int(oh), int(ow)
class LSTM(Layer):
def __init__(self, n_units, input_shape=None):
self.input_shape = input_shape
self.n_units = n_units
self.trainable = True
self.W_out = None
self.Wx = None
self.Wh = None
def init_parameters(self, opt):
T, D = self.input_shape
i = 1 / math.sqrt(D)
self.Wout = np.random.uniform(-i, i, (self.n_units, D))
i = 1 / math.sqrt(4 * self.n_units)
self.Wx = np.random.uniform(-i, i, (D, 4 * self.n_units))
self.Wh = np.random.uniform(-i, i, (self.n_units, 4 * self.n_units))
self.b = np.zeros((4 * self.n_units,))
self.Wx_opt = copy.copy(opt)
self.Wh_opt = copy.copy(opt)
self.Wout_opt = copy.copy(opt)
self.b_opt = copy.copy(opt)
def n_parameters(self):
return np.prod(self.W_out.shape) + np.prod(self.Wx.shape) + np.prod(self.Wh.shape) + np.prod(self.b.shape)
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def forward_propagation(self, X, training=True):
self.cache = []
self.layer_input = X
N, T, D = X.shape
self.h = np.zeros((N, T, self.n_units))
prev_h = np.zeros((N, self.n_units))
prev_c = np.zeros((N, self.n_units))
i = 0
f = 0
o = 0
g = 0
gate_i = 0
gate_f = 0
gate_o = 0
gate_g = 0
next_c = np.zeros((N, self.n_units))
next_h = np.zeros((N, self.n_units))
for t in range(T):
self.x = X[:, t, :]
if t == 0:
self.cache.append((self.x, prev_h, prev_c, i, f, o, g, gate_i, gate_f,
gate_o, gate_g, next_c, next_h))
if t == 1:
self.cache.pop(0)
self._step_forward()
next_h = self.cache[-1][-1]
self.h[:, t, :] = next_h
output = np.dot(self.h, self.Wout)
return output
def backward_propagation(self, grad):
X = self.layer_input
N, T, D = X.shape
grad_ = np.zeros_like(X)
grad_Wx = np.zeros_like(self.Wx)
grad_Wh = np.zeros_like(self.Wh)
grad_Wout = np.zeros_like(self.Wout)
grad_b = np.zeros_like(self.b)
self.dprev_c = np.zeros((N, self.n_units))
self.dprev_h = np.zeros((N, self.n_units))
for t in reversed(range(T)):
self.grad_next = grad[:, t, :]
self._step_backward(t)
grad_[:, t, :] = self.dx
grad_Wx += self.dWx
grad_Wh += self.dWh
grad_Wout += self.dWout
grad_b += self.b
for g in [grad_Wh, grad_Wx, grad_Wout, grad_b, grad_]:
np.clip(g, -5, 5, out=g)
self.Wh = self.Wh_opt.update(self.Wh, grad_Wh)
self.Wx = self.Wx_opt.update(self.Wx, grad_Wx)
self.Wout = self.Wout_opt.update(self.Wout, grad_Wout)
self.b = self.b_opt.update(self.b, grad_b)
return grad_
def _step_forward(self):
prev_c, prev_h = self.cache[-1][-2], self.cache[-1][-1]
x = self.x
a = np.dot(prev_h, self.Wh) + np.dot(x, self.Wx) + self.b
i, f, o, g = np.split(a, 4, axis=1)
gate_i, gate_f, gate_o, gate_g = self.sigmoid(
i), self.sigmoid(f), self.sigmoid(o), np.tanh(g)
next_c = gate_f * prev_c + gate_i * gate_g
next_h = gate_o * np.tanh(next_c)
self.cache.append((x, prev_h, prev_c, i, f, o, g, gate_i,
gate_f, gate_o, gate_g, next_c, next_h))
def _step_backward(self, t):
(x, prev_h, prev_c, i, f, o, g, gate_i, gate_f,
gate_o, gate_g, next_c, next_h) = self.cache[t]
self.dWout = np.dot(next_h.T, self.grad_next)
dnext_h = np.dot(self.grad_next, self.Wout.T)
dnext_h += self.dprev_h
dgate_o = dnext_h * np.tanh(next_c)
dnext_c = dnext_h * gate_o * (1 - np.tanh(next_c)**2)
dnext_c += self.dprev_c
dgate_f = dnext_c * prev_c
dgate_i = dnext_c * gate_g
dgate_g = dnext_c * gate_i
self.dprev_c = dnext_c * gate_f
dg = dgate_g * (1 - np.tanh(g) ** 2)
do = dgate_o * self.sigmoid(o) * (1 - self.sigmoid(o))
df = dgate_f * self.sigmoid(f) * (1 - self.sigmoid(f))
di = dgate_i * self.sigmoid(i) * (1 - self.sigmoid(i))
dinputs = np.concatenate((di, df, do, dg), axis=1)
self.dx = np.dot(dinputs, self.Wx.T)
self.dprev_h = np.dot(dinputs, self.Wh.T)
self.dWx = np.dot(x.T, dinputs)
self.dWh = np.dot(prev_h.T, dinputs)
self.db = np.sum(dinputs, axis=0)
def output_shape(self):
return self.input_shape
class VanillaRNN(Layer):
def __init__(self, n_units, input_shape=None):
self.input_shape = input_shape
self.n_units = n_units
self.trainable = True
self.Whh = None
self.Wxh = None
self.Why = None
def init_parameters(self, opt):
timesteps, input_dim = self.input_shape
i = 1 / math.sqrt(input_dim)
self.Wxh = np.random.uniform(-i, i, (self.n_units, input_dim))
i = 1 / math.sqrt(self.n_units)
self.Whh = np.random.uniform(-i, i, (self.n_units, self.n_units))
self.Why = np.random.uniform(-i, i, (input_dim, self.n_units))
self.b = np.zeros((self.n_units,))
self.Whh_opt = copy.copy(opt)
self.Wxh_opt = copy.copy(opt)
self.Why_opt = copy.copy(opt)
self.b_opt = copy.copy(opt)
def n_parameters(self):
return np.prod(self.Whh.shape) + np.prod(self.Wxh.shape) + np.prod(self.Why.shape) + np.prod(self.b.shape)
def forward_propagation(self, X, training=True):
self.layer_input = X
N, T, D = X.shape
self.total_h_prev = np.zeros((N, T, self.n_units))
self.h = np.zeros((N, T, self.n_units))
self.h_prev = np.zeros((N, self.n_units))
for t in range(T):
self.x = X[:, t, :]
self._step_forward()
self.h[:, t, :] = self.h_next
self.total_h_prev[:, t, :] = self.h_prev
self.h_prev = self.h_next
output = np.dot(self.h, self.Why.T)
return output
def backward_propagation(self, grad):
X = self.layer_input
N, T, D = X.shape
grad_ = np.zeros((N, T, D))
grad_Wxh = np.zeros((self.n_units, D))
grad_Whh = np.zeros((self.n_units, self.n_units))
grad_Why = np.zeros((D, self.n_units))
grad_b = np.zeros((self.n_units,))
self.dh_prev = 0
for t in reversed(range(T)):
self.grad_next = grad[:, t, :]
self._step_backward(t)
grad_[:, t, :] = self.dx
grad_Wxh += self.dWxh
grad_Whh += self.dWhh
grad_Why += self.dWhy
grad_b += self.db
for g in [grad_Whh, grad_Wxh, grad_Why, grad_b, grad_]:
np.clip(g, -5, 5, out=g)
self.Whh = self.Whh_opt.update(self.Whh, grad_Whh)
self.Wxh = self.Wxh_opt.update(self.Wxh, grad_Wxh)
self.Why = self.Why_opt.update(self.Why, grad_Why)
self.b = self.b_opt.update(self.b, grad_b)
return grad_
def _step_forward(self):
h_linear = np.dot(self.h_prev, self.Whh) + \
np.dot(self.x, self.Wxh.T) + self.b
self.h_next = np.tanh(h_linear)
def _step_backward(self, t):
self.dWhy = np.dot(self.grad_next.T, self.h[:, t, :])
dh = np.dot(self.grad_next, self.Why)
dh += self.dh_prev
dh = (1 - (self.h[:, t, :] ** 2)) * dh
self.dh_prev = np.dot(dh, self.Whh.T)
self.dWhh = np.dot(self.total_h_prev[:, t, :].T, dh)
self.dWxh = np.dot(dh.T, self.layer_input[:, t, :])
self.dx =
|
np.dot(dh, self.Wxh)
|
numpy.dot
|
import matplotlib.pyplot as plt
import numpy as np
from modules.Optimizers import *
from modules.Simulate import *
def alphaFn(x):
return (3 if x > 1 else 4) if x < 5 else 0
def grad_approx(fn, x, qual, *params):
return (
fn(x + qual, *params)
-
fn(x - qual, *params)
) / (2 * qual)
acc_data = AccelData_CreateFromRotary(RotaryData_CreateFromAlphaFunction(alphaFn, 52, 0.1), 4)
def cost_SimpleRadial(r, ar, ar_next, at, dt):
ardot = (ar_next - ar) / dt
term2 = np.square(at) * dt / r
term3 = 2 * at *
|
np.sqrt(ar / r)
|
numpy.sqrt
|
import os
import numpy as np
import pandas as pd
import pyvista
# VTK imports:
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
import PVGeo
from base import TestBase
from PVGeo import interface
# Functionality to test:
from PVGeo.filters import (
AddCellConnToPoints,
ArrayMath,
ArraysToRGBA,
BuildSurfaceFromPoints,
CombineTables,
ExtractPoints,
LonLatToUTM,
ManySlicesAlongAxis,
ManySlicesAlongPoints,
NormalizeArray,
PercentThreshold,
PointsToTube,
ReshapeTable,
RotatePoints,
RotationTool,
SliceThroughTime,
SlideSliceAlongPoints,
VoxelizePoints,
)
RTOL = 0.000001
###############################################################################
###############################################################################
class TestCombineTables(TestBase):
"""
Test the `CombineTables` filter
"""
def setUp(self):
TestBase.setUp(self)
# Create some input tables
self.t0 = vtk.vtkTable()
self.t1 = vtk.vtkTable()
# Populate the tables
self.n = 100
self.titles = ('Array 0', 'Array 1', 'Array 2')
self.arrs = [None, None, None]
self.arrs[0] = np.random.random(self.n) # Table 0
self.arrs[1] = np.random.random(self.n) # Table 0
self.arrs[2] = np.random.random(self.n) # Table 1
self.t0.AddColumn(interface.convert_array(self.arrs[0], self.titles[0]))
self.t0.AddColumn(interface.convert_array(self.arrs[1], self.titles[1]))
self.t1.AddColumn(interface.convert_array(self.arrs[2], self.titles[2]))
# Now use the `CombineTables` filter:
f = CombineTables()
f.SetInputDataObject(0, self.t0)
f.SetInputDataObject(1, self.t1)
f.Update()
self.TABLE = f.GetOutputDataObject(0)
#########################
def test_shape(self):
"""`CombineTables`: table shape"""
self.assertEqual(self.TABLE.GetNumberOfColumns(), len(self.titles))
self.assertEqual(self.TABLE.GetNumberOfRows(), self.n)
def test_data_array_names(self):
"""`CombineTables`: data array names"""
for i, title in enumerate(self.titles):
self.assertEqual(self.TABLE.GetColumnName(i), title)
def test_data_fidelity(self):
"""`CombineTables`: data fidelity"""
wpdi = dsa.WrapDataObject(self.TABLE)
for i, title in enumerate(self.titles):
arr = wpdi.RowData[title]
self.assertTrue(np.allclose(arr, self.arrs[i], rtol=RTOL))
###############################################################################
class TestReshapeTable(TestBase):
"""
Test the `ReshapeTable` filter
"""
def setUp(self):
TestBase.setUp(self)
# Create some input tables
self.t0 = vtk.vtkTable()
# Populate the tables
self.arrs = [None, None, None]
self.n = 400
self.ncols = 2
self.nrows = int(self.n * len(self.arrs) / self.ncols)
self.titles = ('Array 0', 'Array 1', 'Array 2')
self.arrs[0] = np.random.random(self.n) # Table 0
self.arrs[1] = np.random.random(self.n) # Table 0
self.arrs[2] = np.random.random(self.n) # Table 1
self.t0.AddColumn(interface.convert_array(self.arrs[0], self.titles[0]))
self.t0.AddColumn(interface.convert_array(self.arrs[1], self.titles[1]))
self.t0.AddColumn(interface.convert_array(self.arrs[2], self.titles[2]))
return
def _check_shape(self, table):
self.assertEqual(table.GetNumberOfRows(), self.nrows)
self.assertEqual(table.GetNumberOfColumns(), self.ncols)
return
def _check_data_fidelity(self, table, order):
wpdi = dsa.WrapDataObject(table)
tarr = np.zeros((self.nrows, self.ncols))
for i in range(self.ncols):
tarr[:, i] = wpdi.RowData[i]
arrs = np.array(self.arrs).T
arrs = arrs.flatten()
arrs = np.reshape(arrs, (self.nrows, self.ncols), order=order)
self.assertEqual(tarr.shape, arrs.shape)
self.assertTrue(np.allclose(tarr, arrs, rtol=RTOL))
return
def _check_data_array_titles(self, table, titles):
for i, title in enumerate(titles):
self.assertEqual(table.GetColumnName(i), title)
return
def _generate_output(self, order, titles=None):
f = ReshapeTable()
f.SetInputDataObject(0, self.t0)
f.set_number_of_columns(self.ncols)
f.set_number_of_rows(self.nrows)
f.set_order(order)
if titles is not None:
f.set_names(titles)
f.Update()
return f.GetOutputDataObject(0)
###############
def test_reshape_f(self):
"""`ReshapeTable`: F-order, no input names"""
order = 'F'
table = self._generate_output(order, titles=None)
# Check output:
self._check_shape(table)
self._check_data_fidelity(table, order)
self._check_data_array_titles(
table, ['Field %d' % i for i in range(self.ncols)]
)
return
def test_reshape_f_names(self):
"""`ReshapeTable`: F-order, input names given"""
order = 'F'
titles = ['Title %d' % i for i in range(self.ncols)]
table = self._generate_output(order, titles=titles)
# Check output:
self._check_shape(table)
self._check_data_fidelity(table, order)
self._check_data_array_titles(table, titles)
return
def test_reshape_c(self):
"""`ReshapeTable`: C-order, input names given as string"""
order = 'C'
titles = ['Title %d' % i for i in range(self.ncols)]
ts = ';'.join(t for t in titles)
table = self._generate_output(order, titles=ts)
# Check output:
self._check_shape(table)
self._check_data_fidelity(table, order)
self._check_data_array_titles(table, titles)
return
def test_reshape_c_names(self):
"""`ReshapeTable`: C-order, few input names given"""
order = 'C'
fewtitles = ['Title %d' % i for i in range(self.ncols - 2)]
rest = ['Field %d' % i for i in range(2)]
table = self._generate_output(order, titles=fewtitles)
# Check output:
self._check_shape(table)
self._check_data_fidelity(table, order)
self._check_data_array_titles(table, fewtitles + rest)
return
###############################################################################
ROTATED_TEXT = """326819.497,4407450.636,1287.5
326834.34,4407470.753,1287.5
326849.183,4407490.87,1287.5
326864.027,4407510.986,1287.5
326878.87,4407531.103,1287.5
326893.713,4407551.22,1287.5
326908.556,4407571.336,1287.5
326923.399,4407591.453,1287.5
326938.242,4407611.57,1287.5
326953.086,4407631.686,1287.5"""
ROTATED_POINTS = np.genfromtxt(
(line.encode('utf8') for line in ROTATED_TEXT.split('\n')),
delimiter=',' '',
dtype=float,
)
class TestRotationTool(TestBase):
"""
Test the `RotationTool` filter
"""
# An example voxel:
# voxel = np.array([
# [0,0,0],
# [0,0,1],
# [0,1,1],
# [1,1,1],
# [0,1,0],
# [1,0,0],
# [1,1,0],
# ])
def setUp(self):
TestBase.setUp(self)
self.RTOL = 0.00001 # As high as rotation precision can get
return
def test_recovery(self):
"""`RotationTool`: Test a simple rotation recovery"""
r = RotationTool()
# Input points
x = np.array([1.1, 1.1, 1.1, 2.1, 2.1, 2.1])
y = np.array([1.0, 2.0, 3.0, 1.0, 2.0, 3.0])
z = np.zeros(len(x))
x = np.reshape(x, (len(x), -1))
y = np.reshape(y, (len(y), -1))
z = np.reshape(z, (len(z), -1))
pts = np.concatenate((x, y, z), axis=1)
rot = np.deg2rad(-33.3)
pts[:, 0:2] = r.rotate(pts[:, 0:2], rot)
xx, yy, zz, dx, dy, angle = r.estimate_and_rotate(
pts[:, 0], pts[:, 1], pts[:, 2]
)
rpts = np.vstack((xx, yy, zz)).T
self.assertTrue(
np.allclose(angle,
|
np.deg2rad(33.3)
|
numpy.deg2rad
|
import autoarray as aa
import numpy as np
import pytest
class TestVisiblities:
def test__real_visibilities__intensity_image_all_ones__simple_cases(self):
uv_wavelengths = np.ones(shape=(4, 2))
grid_radians = aa.grid.manual_2d([[[1.0, 1.0]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
image = aa.array.ones(shape_2d=(1, 1))
real_visibilities = transformer.real_visibilities_from_image(image=image)
assert (real_visibilities == np.ones(shape=4)).all()
uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]])
grid_radians = aa.grid.manual_2d([[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
image = aa.array.ones(shape_2d=(1, 2))
real_visibilities = transformer.real_visibilities_from_image(image=image)
print(real_visibilities)
assert real_visibilities == pytest.approx(
np.array([-0.091544, -0.73359736, -0.613160]), 1.0e-4
)
def test__real_visibilities__intensity_image_varies__simple_cases(self):
uv_wavelengths = np.ones(shape=(4, 2))
grid_radians = aa.grid.manual_2d([[[1.0, 1.0]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
image = aa.array.manual_2d([[2.0]])
real_visibilities = transformer.real_visibilities_from_image(image=image)
assert (real_visibilities == np.array([2.0])).all()
uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]])
grid_radians = aa.grid.manual_2d([[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
image = aa.array.manual_2d([[3.0, 6.0]])
real_visibilities = transformer.real_visibilities_from_image(image=image)
assert real_visibilities == pytest.approx(
np.array([-2.46153, -5.14765, -3.11681]), 1.0e-4
)
def test__real_visibilities__preload_and_non_preload_give_same_answer(self):
uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]])
grid_radians = aa.grid.manual_2d([[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0)
transformer_preload = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=True,
)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
image = aa.array.manual_2d([[2.0, 6.0]])
real_visibilities_via_preload = transformer_preload.real_visibilities_from_image(
image=image
)
real_visibilities = transformer.real_visibilities_from_image(image=image)
assert (real_visibilities_via_preload == real_visibilities).all()
def test__imag_visibilities__intensity_image_all_ones__simple_cases(self):
uv_wavelengths = np.ones(shape=(4, 2))
grid_radians = aa.grid.manual_2d([[[1.0, 1.0]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
image = aa.array.ones(shape_2d=(1, 1))
imag_visibilities = transformer.imag_visibilities_from_image(image=image)
assert imag_visibilities == pytest.approx(np.zeros(shape=4), 1.0e-4)
uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]])
grid_radians = aa.grid.manual_2d([[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
image = aa.array.ones(shape_2d=(2, 1))
imag_visibilities = transformer.imag_visibilities_from_image(image=image)
assert imag_visibilities == pytest.approx(
np.array([-1.45506, -0.781201, -0.077460]), 1.0e-4
)
def test__imag_visibilities__intensity_image_varies__simple_cases(self):
uv_wavelengths = np.ones(shape=(4, 2))
grid_radians = aa.grid.manual_2d([[[1.0, 1.0]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
image = aa.array.manual_2d([[2.0]])
imag_visibilities = transformer.imag_visibilities_from_image(image=image)
assert imag_visibilities == pytest.approx(np.zeros((4,)), 1.0e-4)
uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]])
grid_radians = aa.grid.manual_2d([[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
image = aa.array.manual_2d([[3.0, 6.0]])
imag_visibilities = transformer.imag_visibilities_from_image(image=image)
assert imag_visibilities == pytest.approx(
np.array([-6.418822, -1.78146, 2.48210]), 1.0e-4
)
def test__imag_visibilities__preload_and_non_preload_give_same_answer(self):
uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]])
grid_radians = aa.grid.manual_2d([[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0)
transformer_preload = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=True,
)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
image = aa.array.manual_2d([[2.0, 6.0]])
imag_visibilities_via_preload = transformer_preload.imag_visibilities_from_image(
image=image
)
imag_visibilities = transformer.imag_visibilities_from_image(image=image)
assert (imag_visibilities_via_preload == imag_visibilities).all()
def test__visiblities_from_image__same_as_individual_calculations_above(self):
uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]])
grid_radians = aa.grid.manual_2d([[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
image = aa.array.manual_2d([[3.0, 6.0]])
visibilities = transformer.visibilities_from_image(image=image)
assert visibilities[:, 0] == pytest.approx(
np.array([-2.46153, -5.14765, -3.11681]), 1.0e-4
)
assert visibilities[:, 1] == pytest.approx(
np.array([-6.418822, -1.78146, 2.48210]), 1.0e-4
)
real_visibilities = transformer.real_visibilities_from_image(image=image)
imag_visibilities = transformer.imag_visibilities_from_image(image=image)
assert (visibilities[:, 0] == real_visibilities).all()
assert (visibilities[:, 1] == imag_visibilities).all()
class TestVisiblitiesMappingMatrix:
def test__real_visibilities__mapping_matrix_all_ones__simple_cases(self):
uv_wavelengths = np.ones(shape=(4, 2))
grid_radians = aa.grid.manual_2d([[[1.0, 1.0]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
mapping_matrix = np.ones(shape=(1, 1))
transformed_mapping_matrix = transformer.real_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
assert (transformed_mapping_matrix == np.ones(shape=(4, 1))).all()
uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]])
grid_radians = aa.grid.manual_2d([[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
mapping_matrix = np.ones(shape=(2, 1))
transformed_mapping_matrix = transformer.real_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
print(transformed_mapping_matrix)
assert transformed_mapping_matrix == pytest.approx(
np.array([[-0.091544], [-0.733597], [-0.613160]]), 1.0e-4
)
mapping_matrix = np.ones(shape=(2, 2))
transformed_mapping_matrix = transformer.real_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
assert transformed_mapping_matrix == pytest.approx(
np.array(
[[-0.091544, -0.091544], [-0.733597, -0.733597], [-0.61316, -0.61316]]
),
1.0e-4,
)
def test__real_visibilities__more_complex_mapping_matrix(self):
grid_radians = aa.grid.manual_2d(
[[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]], pixel_scales=1.0
)
uv_wavelengths = np.array([[0.7, 0.8], [0.9, 1.0]])
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
mapping_matrix = np.array([[1.0], [0.0], [0.0]])
transformed_mapping_matrix = transformer.real_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
assert transformed_mapping_matrix == pytest.approx(
np.array([[0.18738], [-0.18738]]), 1.0e-4
)
mapping_matrix = np.array([[0.0], [1.0], [0.0]])
transformed_mapping_matrix = transformer.real_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
print(transformed_mapping_matrix)
assert transformed_mapping_matrix == pytest.approx(
np.array([[-0.992111], [-0.53582]]), 1.0e-4
)
mapping_matrix = np.array([[0.0, 0.5], [0.0, 0.2], [1.0, 0.0]])
transformed_mapping_matrix = transformer.real_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
assert transformed_mapping_matrix == pytest.approx(
np.array([[0.42577, -0.10473], [0.968583, -0.20085]]), 1.0e-4
)
def test__real_visibilities__preload_and_non_preload_give_same_answer(self):
uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]])
grid_radians = aa.grid.manual_2d([[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0)
transformer_preload = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=True,
)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
mapping_matrix = np.array([[3.0, 5.0], [1.0, 2.0]])
transformed_mapping_matrix_preload = transformer_preload.real_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
transformed_mapping_matrix = transformer.real_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
assert (transformed_mapping_matrix_preload == transformed_mapping_matrix).all()
def test__imag_visibilities__mapping_matrix_all_ones__simple_cases(self):
uv_wavelengths = np.ones(shape=(4, 2))
grid_radians = aa.grid.manual_2d([[[1.0, 1.0]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
mapping_matrix = np.ones(shape=(1, 1))
transformed_mapping_matrix = transformer.imag_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
assert transformed_mapping_matrix == pytest.approx(
np.zeros(shape=(4, 1)), 1.0e-4
)
uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]])
grid_radians = aa.grid.manual_2d([[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0)
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
mapping_matrix = np.ones(shape=(2, 1))
transformed_mapping_matrix = transformer.imag_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
assert transformed_mapping_matrix == pytest.approx(
np.array([[-1.455060], [-0.78120], [-0.07746]]), 1.0e-4
)
mapping_matrix = np.ones(shape=(2, 2))
transformed_mapping_matrix = transformer.imag_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
assert transformed_mapping_matrix == pytest.approx(
np.array(
[[-1.45506, -1.45506], [-0.78120, -0.78120], [-0.07746, -0.07746]]
),
1.0e-4,
)
def test__imag_visibilities__more_complex_mapping_matrix(self):
grid_radians = aa.grid.manual_2d(
[[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]], pixel_scales=1.0
)
uv_wavelengths = np.array([[0.7, 0.8], [0.9, 1.0]])
transformer = aa.transformer(
uv_wavelengths=uv_wavelengths,
grid_radians=grid_radians,
preload_transform=False,
)
mapping_matrix = np.array([[1.0], [0.0], [0.0]])
transformed_mapping_matrix = transformer.imag_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
assert transformed_mapping_matrix == pytest.approx(
np.array([[-0.982287], [-0.982287]]), 1.0e-4
)
mapping_matrix = np.array([[0.0], [1.0], [0.0]])
transformed_mapping_matrix = transformer.imag_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
assert transformed_mapping_matrix == pytest.approx(
np.array([[0.12533], [0.84432]]), 1.0e-4
)
mapping_matrix = np.array([[0.0, 0.5], [0.0, 0.2], [1.0, 0.0]])
transformed_mapping_matrix = transformer.imag_transformed_mapping_matrix_from_mapping_matrix(
mapping_matrix=mapping_matrix
)
assert transformed_mapping_matrix == pytest.approx(
|
np.array([[0.90482, -0.46607], [-0.24868, -0.32227]])
|
numpy.array
|
"""Rank genes according to differential expression.
"""
from math import floor
from typing import Iterable, Union, Optional
import numpy as np
import pandas as pd
from anndata import AnnData
from scipy.sparse import issparse, vstack
from .. import _utils
from .. import logging as logg
from ..preprocessing._simple import _get_mean_var
from .._compat import Literal
from ..get import _get_obs_rep
_Method = Optional[Literal['logreg', 't-test', 'wilcoxon', 't-test_overestim_var']]
_CorrMethod = Literal['benjamini-hochberg', 'bonferroni']
def _select_top_n(scores, n_top):
n_from = scores.shape[0]
reference_indices = np.arange(n_from, dtype=int)
partition = np.argpartition(scores, -n_top)[-n_top:]
partial_indices = np.argsort(scores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
return global_indices
def _ranks(X, mask=None, mask_rest=None):
CONST_MAX_SIZE = 10000000
n_genes = X.shape[1]
if issparse(X):
merge = lambda tpl: vstack(tpl).toarray()
adapt = lambda X: X.toarray()
else:
merge = np.vstack
adapt = lambda X: X
masked = mask is not None and mask_rest is not None
if masked:
n_cells = np.count_nonzero(mask) + np.count_nonzero(mask_rest)
get_chunk = lambda X, left, right: merge(
(X[mask, left:right], X[mask_rest, left:right])
)
else:
n_cells = X.shape[0]
get_chunk = lambda X, left, right: adapt(X[:, left:right])
# Calculate chunk frames
max_chunk = floor(CONST_MAX_SIZE / n_cells)
for left in range(0, n_genes, max_chunk):
right = min(left + max_chunk, n_genes)
df = pd.DataFrame(data=get_chunk(X, left, right))
ranks = df.rank()
yield ranks, left, right
def _tiecorrect(ranks):
size = np.float64(ranks.shape[0])
if size < 2:
return np.repeat(ranks.shape[1], 1.0)
arr = np.sort(ranks, axis=0)
tf = np.insert(arr[1:] != arr[:-1], (0, arr.shape[0] - 1), True, axis=0)
idx = np.where(tf, np.arange(tf.shape[0])[:, None], 0)
idx = np.sort(idx, axis=0)
cnt = np.diff(idx, axis=0).astype(np.float64)
return 1.0 - (cnt ** 3 - cnt).sum(axis=0) / (size ** 3 - size)
class _RankGenes:
def __init__(
self,
adata,
groups,
groupby,
reference='rest',
use_raw=True,
layer=None,
comp_pts=False,
):
if 'log1p' in adata.uns_keys() and adata.uns['log1p']['base'] is not None:
self.expm1_func = lambda x: np.expm1(x * np.log(adata.uns['log1p']['base']))
else:
self.expm1_func = np.expm1
self.groups_order, self.groups_masks = _utils.select_groups(
adata, groups, groupby
)
adata_comp = adata
if layer is not None:
if use_raw:
raise ValueError("Cannot specify `layer` and have `use_raw=True`.")
X = adata_comp.layers[layer]
else:
if use_raw and adata.raw is not None:
adata_comp = adata.raw
X = adata_comp.X
# for correct getnnz calculation
if issparse(X):
X.eliminate_zeros()
self.X = X
self.var_names = adata_comp.var_names
self.ireference = None
if reference != 'rest':
self.ireference = np.where(self.groups_order == reference)[0][0]
self.means = None
self.vars = None
self.means_rest = None
self.vars_rest = None
self.comp_pts = comp_pts
self.pts = None
self.pts_rest = None
self.stats = None
# for logreg only
self.grouping_mask = adata.obs[groupby].isin(self.groups_order)
self.grouping = adata.obs.loc[self.grouping_mask, groupby]
def _basic_stats(self):
n_genes = self.X.shape[1]
n_groups = self.groups_masks.shape[0]
self.means = np.zeros((n_groups, n_genes))
self.vars = np.zeros((n_groups, n_genes))
self.pts = np.zeros((n_groups, n_genes)) if self.comp_pts else None
if self.ireference is None:
self.means_rest = np.zeros((n_groups, n_genes))
self.vars_rest = np.zeros((n_groups, n_genes))
self.pts_rest = np.zeros((n_groups, n_genes)) if self.comp_pts else None
else:
mask_rest = self.groups_masks[self.ireference]
X_rest = self.X[mask_rest]
self.means[self.ireference], self.vars[self.ireference] = _get_mean_var(
X_rest
)
# deleting the next line causes a memory leak for some reason
del X_rest
if issparse(self.X):
get_nonzeros = lambda X: X.getnnz(axis=0)
else:
get_nonzeros = lambda X: np.count_nonzero(X, axis=0)
for imask, mask in enumerate(self.groups_masks):
X_mask = self.X[mask]
if self.comp_pts:
self.pts[imask] = get_nonzeros(X_mask) / X_mask.shape[0]
if self.ireference is not None and imask == self.ireference:
continue
self.means[imask], self.vars[imask] = _get_mean_var(X_mask)
if self.ireference is None:
mask_rest = ~mask
X_rest = self.X[mask_rest]
self.means_rest[imask], self.vars_rest[imask] = _get_mean_var(X_rest)
# this can be costly for sparse data
if self.comp_pts:
self.pts_rest[imask] = get_nonzeros(X_rest) / X_rest.shape[0]
# deleting the next line causes a memory leak for some reason
del X_rest
def t_test(self, method):
from scipy import stats
self._basic_stats()
for group_index, mask in enumerate(self.groups_masks):
if self.ireference is not None and group_index == self.ireference:
continue
mean_group = self.means[group_index]
var_group = self.vars[group_index]
ns_group = np.count_nonzero(mask)
if self.ireference is not None:
mean_rest = self.means[self.ireference]
var_rest = self.vars[self.ireference]
ns_other = np.count_nonzero(self.groups_masks[self.ireference])
else:
mean_rest = self.means_rest[group_index]
var_rest = self.vars_rest[group_index]
ns_other = self.X.shape[0] - ns_group
if method == 't-test':
ns_rest = ns_other
elif method == 't-test_overestim_var':
# hack for overestimating the variance for small groups
ns_rest = ns_group
else:
raise ValueError('Method does not exist.')
# TODO: Come up with better solution. Mask unexpressed genes?
# See https://github.com/scipy/scipy/issues/10269
with np.errstate(invalid="ignore"):
scores, pvals = stats.ttest_ind_from_stats(
mean1=mean_group,
std1=np.sqrt(var_group),
nobs1=ns_group,
mean2=mean_rest,
std2=np.sqrt(var_rest),
nobs2=ns_rest,
equal_var=False, # Welch's
)
# I think it's only nan when means are the same and vars are 0
scores[np.isnan(scores)] = 0
# This also has to happen for <NAME>
pvals[
|
np.isnan(pvals)
|
numpy.isnan
|
#-*- coding: utf-8 -*-
import numpy as np
from layers import *
from dropout_layers import *
def batchnorm_forward(x, gamma, beta, bn_param):
"""
使用使用类似动量衰减的运行时平均,计算总体均值与方差 例如:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Input:
- x: 数据(N, D)
- gamma: 缩放参数 (D,)
- beta: 平移参数 (D,)
- bn_param: 字典型,使用下列键值:
- mode: 'train' 或'test';
- eps: 保证数值稳定
- momentum: 运行时平均衰减因子
- running_mean: 形状为(D,)的运行时均值
- running_var : 形状为 (D,)的运行时方差
Returns 元组:
- out: 输出(N, D)
- cache: 用于反向传播的缓存
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, None
if mode == 'train':
# Forward pass
# Step 1 - shape of mu (D,)
mu = 1 / float(N) * np.sum(x, axis=0)
# Step 2 - shape of var (N,D)
xmu = x - mu
# Step 3 - shape of carre (N,D)
carre = xmu**2
# Step 4 - shape of var (D,)
var = 1 / float(N) * np.sum(carre, axis=0)
# Step 5 - Shape sqrtvar (D,)
sqrtvar = np.sqrt(var + eps)
# Step 6 - Shape invvar (D,)
invvar = 1. / sqrtvar
# Step 7 - Shape va2 (N,D)
va2 = xmu * invvar
# Step 8 - Shape va3 (N,D)
va3 = gamma * va2
# Step 9 - Shape out (N,D)
out = va3 + beta
running_mean = momentum * running_mean + (1.0 - momentum) * mu
running_var = momentum * running_var + (1.0 - momentum) * var
cache = (mu, xmu, carre, var, sqrtvar, invvar,va2, va3, gamma, beta, x, bn_param)
elif mode == 'test':
# 使用运行时均值与方差归一化数据
mu = running_mean
var = running_var
xhat = (x - mu) / np.sqrt(var + eps)
# 使用gamma和beta参数缩放,平移数据。
out = gamma * xhat + beta
cache = (mu, var, gamma, beta, bn_param)
else:
raise ValueError('无法识别的BN模式: "%s"' % mode)
# 更新运行时均值,方差
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
BN反向传播
Inputs:
- dout: 上层梯度 (N, D)
- cache: 前向传播时的缓存.
Returns 元组:
- dx: 数据梯度 (N, D)
- dgamma: gamma梯度 (D,)
- dbeta: beta梯度 (D,)
"""
dx, dgamma, dbeta = None, None, None
mu, xmu, carre, var, sqrtvar, invvar, va2, va3, gamma, beta, x, bn_param = cache
eps = bn_param.get('eps', 1e-5)
N, D = dout.shape
# Backprop Step 9
dva3 = dout
dbeta = np.sum(dout, axis=0)
# Backprop step 8
dva2 = gamma * dva3
dgamma = np.sum(va2 * dva3, axis=0)
# Backprop step 7
dxmu = invvar * dva2
dinvvar = np.sum(xmu * dva2, axis=0)
# Backprop step 6
dsqrtvar = -1. / (sqrtvar**2) * dinvvar
# Backprop step 5
dvar = 0.5 * (var + eps)**(-0.5) * dsqrtvar
# Backprop step 4
dcarre = 1 / float(N) * np.ones((carre.shape)) * dvar
# Backprop step 3
dxmu += 2 * xmu * dcarre
# Backprop step 2
dx = dxmu
dmu = -
|
np.sum(dxmu, axis=0)
|
numpy.sum
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Dense, Input, Concatenate, Lambda
from scipy.stats import entropy
from matplotlib.lines import Line2D
import config as cf
from targets import target_distribution_gen
def build_model():
cf.pnn.inputsize = 3 # Number of hidden variables, i.e. alpha, beta, gamma
""" Build NN for triangle """
# Hidden variables as inputs.
inputTensor = Input((cf.pnn.inputsize,))
# Group input tensor according to whether alpha, beta or gamma hidden variable.
group_alpha = Lambda(lambda x: x[:,:1], output_shape=((1,)))(inputTensor)
group_beta = Lambda(lambda x: x[:,1:2], output_shape=((1,)))(inputTensor)
group_gamma = Lambda(lambda x: x[:,2:3], output_shape=((1,)))(inputTensor)
# Neural network at the sources, for pre-processing (e.g. for going from uniform distribution to non-uniform one)
## Note that in the example code greek_depth is set to 0, so this part is trivial.
for _ in range(cf.pnn.greek_depth):
group_alpha = Dense(cf.pnn.greek_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg)(group_alpha)
group_beta = Dense(cf.pnn.greek_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg)(group_beta)
group_gamma = Dense(cf.pnn.greek_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg)(group_gamma)
# Route hidden variables to visibile parties Alice, Bob and Charlie
group_a = Concatenate()([group_beta,group_gamma])
group_b = Concatenate()([group_gamma,group_alpha])
group_c = Concatenate()([group_alpha,group_beta])
# Neural network at the parties Alice, Bob and Charlie.
## Note: increasing the variance of the initialization seemed to help in some cases, especially when the number if outputs per party is 4 or more.
kernel_init = tf.keras.initializers.VarianceScaling(scale=cf.pnn.weight_init_scaling, mode='fan_in', distribution='truncated_normal', seed=None)
for _ in range(cf.pnn.latin_depth):
group_a = Dense(cf.pnn.latin_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg, kernel_initializer = kernel_init)(group_a)
group_b = Dense(cf.pnn.latin_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg, kernel_initializer = kernel_init)(group_b)
group_c = Dense(cf.pnn.latin_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg, kernel_initializer = kernel_init)(group_c)
# Apply final softmax layer
group_a = Dense(cf.pnn.a_outputsize,activation=cf.pnn.activ2, kernel_regularizer=cf.pnn.kernel_reg)(group_a)
group_b = Dense(cf.pnn.b_outputsize,activation=cf.pnn.activ2, kernel_regularizer=cf.pnn.kernel_reg)(group_b)
group_c = Dense(cf.pnn.c_outputsize,activation=cf.pnn.activ2, kernel_regularizer=cf.pnn.kernel_reg)(group_c)
outputTensor = Concatenate()([group_a,group_b,group_c])
model = Model(inputTensor,outputTensor)
return model
def np_euclidean_distance(p,q=0):
""" Euclidean distance, useful for plotting results."""
return np.sqrt(np.sum(np.square(p-q),axis=-1))
def np_distance(p,q=0):
""" Same as the distance used in the loss function, just written for numpy arrays."""
if cf.pnn.loss.lower() == 'l2':
return np.sum(np.square(p-q),axis=-1)
elif cf.pnn.loss.lower() == 'l1':
return 0.5*np.sum(np.abs(p-q),axis=-1)
elif cf.pnn.loss.lower() == 'kl':
p = np.clip(p, K.epsilon(), 1)
q = np.clip(q, K.epsilon(), 1)
return np.sum(p * np.log(np.divide(p,q)), axis=-1)
elif cf.pnn.loss.lower() == 'js':
p = np.clip(p, K.epsilon(), 1)
q = np.clip(q, K.epsilon(), 1)
avg = (p+q)/2
return np.sum(p * np.log(np.divide(p,avg)), axis=-1) + np.sum(q * np.log(np.divide(q,avg)), axis=-1)
def keras_distance(p,q):
""" Distance used in loss function. """
if cf.pnn.loss.lower() == 'l2':
return K.sum(K.square(p-q),axis=-1)
elif cf.pnn.loss.lower() == 'l1':
return 0.5*K.sum(K.abs(p-q), axis=-1)
elif cf.pnn.loss.lower() == 'kl':
p = K.clip(p, K.epsilon(), 1)
q = K.clip(q, K.epsilon(), 1)
return K.sum(p * K.log(p / q), axis=-1)
elif cf.pnn.loss.lower() == 'js':
p = K.clip(p, K.epsilon(), 1)
q = K.clip(q, K.epsilon(), 1)
avg = (p+q)/2
return K.sum(p * K.log(p / avg), axis=-1) + K.sum(q * K.log(q / avg), axis=-1)
def customLoss_distr(y_pred):
""" Converts the output of the neural network to a probability vector.
That is from a shape of (batch_size, a_outputsize + b_outputsize + c_outputsize) to a shape of (a_outputsize * b_outputsize * c_outputsize,)
"""
a_probs = y_pred[:,0:cf.pnn.a_outputsize]
b_probs = y_pred[:,cf.pnn.a_outputsize : cf.pnn.a_outputsize + cf.pnn.b_outputsize]
c_probs = y_pred[:,cf.pnn.a_outputsize + cf.pnn.b_outputsize : cf.pnn.a_outputsize + cf.pnn.b_outputsize + cf.pnn.c_outputsize]
a_probs = K.reshape(a_probs,(-1,cf.pnn.a_outputsize,1,1))
b_probs = K.reshape(b_probs,(-1,1,cf.pnn.b_outputsize,1))
c_probs = K.reshape(c_probs,(-1,1,1,cf.pnn.c_outputsize))
probs = a_probs*b_probs*c_probs
probs = K.mean(probs,axis=0)
probs = K.flatten(probs)
return probs
def customLoss(y_true,y_pred):
""" Custom loss function."""
# Note that y_true is just batch_size copies of the target distributions. So any row could be taken here. We just take 0-th row.
return keras_distance(y_true[0,:], customLoss_distr(y_pred))
# Set up generator for X and Y data
training_mean = 0.5
training_sigma = 0.28867513459 #= np.sqrt(1/12)
def generate_xy_batch():
while True:
temp = np.divide((np.random.random((cf.pnn.batch_size, cf.pnn.inputsize)) - training_mean),training_sigma)
yield (temp, cf.pnn.y_true)
def generate_x_test():
while True:
temp = np.divide((np.random.random((cf.pnn.batch_size_test, cf.pnn.inputsize)) - training_mean),training_sigma)
yield temp
def single_evaluation(model):
""" Evaluates the model and returns the resulting distribution as a numpy array. """
test_pred = model.predict_generator(generate_x_test(), steps=1, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)
result = K.eval(customLoss_distr(test_pred))
return result
def single_run():
""" Runs training algorithm for a single target distribution. Returns model."""
# Model and optimizer related setup.
K.clear_session()
model = build_model()
if cf.pnn.start_from is not None:
print("LOADING MODEL WEIGHTS FROM", cf.pnn.start_from)
model = load_model(cf.pnn.start_from,custom_objects={'customLoss': customLoss})
if cf.pnn.optimizer.lower() == 'adadelta':
optimizer = tf.keras.optimizers.Adadelta(lr=cf.pnn.lr, rho=0.95, epsilon=None, decay=cf.pnn.decay)
elif cf.pnn.optimizer.lower() == 'sgd':
optimizer = tf.keras.optimizers.SGD(lr=cf.pnn.lr, decay=cf.pnn.decay, momentum=cf.pnn.momentum, nesterov=True)
else:
optimizer = tf.keras.optimizers.SGD(lr=cf.pnn.lr, decay=cf.pnn.decay, momentum=cf.pnn.momentum, nesterov=True)
print("\n\nWARNING!!! Optimizer {} not recognized. Please implement it if you want to use it. Using SGD instead.\n\n".format(cf.pnn.optimizer))
cf.pnn.optimizer = 'sgd' # set it for consistency.
model.compile(loss=customLoss, optimizer = optimizer, metrics=[])
# Fit model
model.fit_generator(generate_xy_batch(), steps_per_epoch=cf.pnn.no_of_batches, epochs=1, verbose=1, validation_data=generate_xy_batch(), validation_steps=cf.pnn.no_of_validation_batches, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=False, initial_epoch=0)
return model
def compare_models(model1,model2):
""" Evaluates two models for p_target distribution and return one which is closer to it."""
result1 = single_evaluation(model1)
result2 = single_evaluation(model2)
if np_distance(result1, cf.pnn.p_target) < np_distance(result2, cf.pnn.p_target):
return model1, 1
else:
return model2, 2
def update_results(model_new,i):
""" Updates plots and results if better than the one I loaded the model from in this round.
If I am in last sample of the sweep I will plot no matter one, so that there is at least one plot per sweep.
"""
result_new = single_evaluation(model_new)
distance_new = np_distance(result_new, cf.pnn.p_target)
# Decide whether to use new or old model.
if cf.pnn.start_from is not None: # skips this comparison if I was in a fresh_start
try:
model_old = load_model('./saved_models/best_'+str(i).zfill(int(np.ceil(np.log10(cf.pnn.target_distributions.shape[0]))))+'.hdf5', custom_objects={'customLoss': customLoss})
result_old = single_evaluation(model_old)
distance_old = np_distance(result_old, cf.pnn.p_target)
if distance_new > distance_old:
print("Moving on. With old model distance is at {}.".format(distance_old))
result = result_old
model = model_old
distance = distance_old
else:
print("Distance imporved! Distance with new model:", distance_new)
result = result_new
model = model_new
distance = distance_new
except FileNotFoundError:
print("This distance:", distance_new)
result = result_new
model = model_new
distance = distance_new
else:
print("This distance:", distance_new)
result = result_new
model = model_new
distance = distance_new
# Update results
model.save(cf.pnn.savebestpath)
cf.pnn.distributions[i,:] = result
cf.pnn.distances[i] = distance
cf.pnn.euclidean_distances[i] = np_euclidean_distance(result, cf.pnn.p_target)
np.save("./saved_results/target_distributions.npy",cf.pnn.target_distributions)
np.save("./saved_results/distributions.npy",cf.pnn.distributions)
np.save("./saved_results/distances.npy",cf.pnn.distances)
np.save("./saved_results/euclidean_distances.npy",cf.pnn.euclidean_distances)
# Plot distances
plt.clf()
plt.title("D(p_target,p_machine)")
plt.plot(cf.pnn.target_ids,cf.pnn.euclidean_distances, 'ro')
if i!=0 and cf.pnn.sweep_id==0:
plt.ylim(bottom=0,top = np.sort(np.unique(cf.pnn.euclidean_distances))[-2]*1.2)
else:
plt.ylim(bottom=0,top = np.sort(np.unique(cf.pnn.euclidean_distances))[-1]*1.2)
plt.savefig("./figs_training_sweeps/sweep"+str(cf.pnn.sweep_id)+".png")
# Plot distributions
plt.clf()
plt.plot(cf.pnn.p_target,'ro',markersize=5)
plt.plot(result,'gs',alpha = 0.85,markersize=5)
plt.title("Target distr. (in red): {} {:.3f}".format(cf.pnn.target_distr_name, cf.pnn.target_ids[i]))
plt.ylim(bottom=0,top=max(cf.pnn.p_target)*1.2)
plt.savefig("./figs_distributions/target_"+str(i).zfill(int(np.ceil(np.log10(cf.pnn.target_ids.shape[0]))))+".png")
# Plot strategies (only turn on if you're really interested, since it takes quite a bit of time to update in each step!)
plot_strategies(i)
def plot_strategies(i):
sample_size = 4000 #how many hidden variable triples to sample from
random_sample_size = 5 #for each hidden variable triple, how many times to sample from strategies.
alpha_value = 0.25# 3/random_sample_size #opacity of dots. 0.1 or 0.25 make for nice paintings.
markersize = 5000/np.sqrt(sample_size)
modelpath = './saved_models/best_'+str(i).zfill(int(np.ceil(np.log10(cf.pnn.target_distributions.shape[0]))))+'.hdf5'
input_data = generate_x_test()
inputs = next(input_data)
while inputs.shape[0] < sample_size:
inputs = np.concatenate((inputs, next(input_data)),axis=0)
inputs = inputs[:sample_size,:]
K.clear_session()
model = load_model(modelpath,custom_objects={'customLoss': customLoss})
y = model.predict(inputs)
y_a = y[:,0:cf.pnn.a_outputsize]
y_b = y[:,cf.pnn.a_outputsize:cf.pnn.a_outputsize+cf.pnn.b_outputsize]
y_c = y[:,cf.pnn.a_outputsize+cf.pnn.b_outputsize:cf.pnn.a_outputsize+cf.pnn.b_outputsize+cf.pnn.c_outputsize]
y_a = np.array([np.random.choice(np.arange(cf.pnn.a_outputsize),p=y_a[j,:], size = random_sample_size) for j in range(y_a.shape[0])]).reshape(random_sample_size*sample_size)
y_b = np.array([np.random.choice(np.arange(cf.pnn.b_outputsize),p=y_b[j,:], size = random_sample_size) for j in range(y_b.shape[0])]).reshape(random_sample_size*sample_size)
y_c = np.array([np.random.choice(np.arange(cf.pnn.c_outputsize),p=y_c[j,:], size = random_sample_size) for j in range(y_c.shape[0])]).reshape(random_sample_size*sample_size)
training_mean = 0.5
training_sigma = np.sqrt(1/12)
inputs = inputs* training_sigma + training_mean
# Tile and reshape since we sampled random_sample_size times from each input.
inputs = np.array(np.array([
|
np.tile(inputs[i,:],(random_sample_size,1))
|
numpy.tile
|
import numpy as np
from shapely.geometry import shape, Point, Polygon, MultiLineString, MultiPoint, MultiPolygon, LineString
from scipy.ndimage import morphology
import cPickle as pickle
from metrics_utils import *
def extract_polygon_props(islands,
network_lines,
interior_channels):
interior_channel_lengths = [sum([network_lines[j].length
for j in interior_channels[i]]) / 1e3
if len(interior_channels[i])>0 else 0
for i in range(len(islands))]
perimeter = np.array([i.boundary.length for i in islands]) / 1e3
wetted_perimeter = perimeter + 2 * np.array(interior_channel_lengths)
area = np.array([i.area for i in islands]) / 1e6
perimeter_convex_hull = np.array([i.convex_hull.exterior.length
for i in islands]) / 1e3
area_convex_hull = np.array([i.convex_hull.area for i in islands]) / 1e6
a = np.array(map(Polygon_axes, islands))
minor_axis = a[:,0] / 1e3
major_axis = a[:,1] / 1e3
poly_orientation = a[:,2]
aspect_ratio = major_axis / minor_axis
circularity = 4 * np.pi * area / perimeter**2
equivalent_area_diameter = np.sqrt((4 / np.pi) * area)
perimeter_equivalent_diameter = area / np.pi
solidity = area / area_convex_hull
concavity = area_convex_hull - area
convexity = perimeter_convex_hull / perimeter
dry_shape_factor = perimeter / np.sqrt(area)
wet_shape_factor = wetted_perimeter / np.sqrt(area)
num_ox = []
for i in islands:
oxs = 0
for j in i.interiors:
try:
a = Polygon(j).buffer(-20).area
if a > 40000:
oxs += 1
except:
pass
num_ox.append(oxs)
poly_metrics = {'p_area': area,
'p_perim': perimeter,
'p_wetperim': wetted_perimeter,
'p_ch_area': area_convex_hull,
'p_ch_perim': perimeter_convex_hull,
'p_major_ax': major_axis,
'p_minor_ax': minor_axis,
'p_asp_rat': aspect_ratio,
'p_orient': poly_orientation,
'p_circ': circularity,
'p_eq_a_dia': equivalent_area_diameter,
'p_p_eq_dia': perimeter_equivalent_diameter,
'p_solidity': solidity,
'p_concav': concavity,
'p_convex': convexity,
'p_d_shapef': dry_shape_factor,
'p_w_shapef': wet_shape_factor,
'p_num_ox': num_ox,
'p_int_len': interior_channel_lengths}
return poly_metrics
def calculate_edge_distances(islands,
save = True,
load_saved = False,
file_root = ''):
if load_saved:
edgedists = pickle.load( open( file_root + '/edge_distances.p', "rb" ))
else:
edgedists = np.zeros((len(islands),))
for n,s in enumerate(islands):
cellsize = 50
minx, miny, maxx, maxy = s.envelope.bounds
if (((maxx - minx) / cellsize > 1000) or
((maxy - miny) / cellsize > 1000)):
cellsize = 100
if (((maxx - minx) / cellsize > 5000) or
((maxy - miny) / cellsize > 5000)):
cellsize = 500
minx = np.floor(minx) - 1 * cellsize
maxx =
|
np.ceil(maxx)
|
numpy.ceil
|
import numpy as np
from numpy.linalg import norm
from .closedcurve import ClosedCurve
from .helpers import *
class SzegoKernel(object):
def __init__(self, curve, a, opts, **kwargs):
N = opts.numCollPts
dt = 1.0 / float(N)
t = np.arange(0.0, 1.0, dt)
z = curve.position(t)
zt = curve.tangent(t)
zT = zt / np.abs(zt)
IpA = np.ones((N, N), dtype=np.complex)
for i in range(1, N):
cols = np.arange(i)
zc_zj = z[cols] - z[i]
tmp1 = np.conjugate(zT[i]/zc_zj)
tmp2 = zT[cols]/zc_zj
tmp3 = np.sqrt(np.abs(np.dot(zt[i], zt[cols])))
tmp4 = (dt/(2.0j*np.pi))
IpA[i, cols] = (tmp1 - tmp2) * tmp3 * tmp4
IpA[cols, i] = -np.conjugate(IpA[i, cols])
y = 1j * np.sqrt(np.abs(zt))/(2*np.pi) * np.conjugate(zT/(z - a))
# TODO - this is a simplification of the original method
assert(opts.kernSolMethod in ('auto', 'bs'))
assert(N < 2048)
x = np.linalg.solve(IpA, y)
relresid = norm(y - np.dot(IpA, x)) / norm(y)
if relresid > 100.0 * np.spacing(1):
raise Exception('out of tolerance')
# set output
self.phiColl = x
self.dtColl = dt
self.zPts = z
self.zTan = zt
self.zUnitTan = zT
class SzegoOpts(object):
def __init__(self):
self.confCenter = 0.0 + 0.0j
self.numCollPts = 512
self.kernSolMethod = 'auto'
self.newtonTol = 10.0 * np.spacing(2.0*np.pi)
self.trace = False
self.numFourierPts = 2*256
class Szego(object):
def __init__(self, curve=None, confCenter=0.0 + 0.0j,
opts=None, *args, **kwargs):
if not isinstance(curve, ClosedCurve):
raise Exception('Expected a closed curve object')
self.curve = curve
self.confCenter = confCenter
if opts is None:
opts = SzegoOpts()
self.numCollPts = opts.numCollPts
kernel = SzegoKernel(curve, confCenter, SzegoOpts())
self.phiColl = kernel.phiColl
self.dtColl = kernel.dtColl
self.zPts = kernel.zPts
self.zTan = kernel.zTan
self.zUnitTan = kernel.zUnitTan
self.theta0 = np.angle(-1.0j * self.phi(0.0)**2 * self.curve.tangent(0))
self.Saa = np.sum(np.abs(self.phiColl**2))*self.dtColl
self.newtTol = opts.newtonTol
self.beNoisy = opts.trace
@suppress_warnings
def kerz_stein(self, ts):
t = np.asarray(ts).reshape(1, -1)[0, :]
w = self.curve.position(t)
wt = self.curve.tangent(t)
wT = wt / np.abs(wt)
z = self.zPts
zt = self.zTan
zT = self.zUnitTan
separation = 10 * np.spacing(np.max(np.abs(z)))
def KS_by_idx(wi, zi):
# TODO - unflatten this expression and vectorise appropriately when
# futher testing confirms this covers all of the appropriate
# cases
z_w = z[zi] - w[wi]
tmp1 = wt[wi]*zt[zi]
tmp2 = np.abs(tmp1)
tmp3 = np.sqrt(tmp2)
tmp4 = (2j * np.pi)
tmp5 = np.conjugate(wT[wi]/z_w)
tmp6 = zT[zi]/z_w
tmp7 = tmp5 - tmp6
out = tmp3 / tmp4 * tmp7
out[np.abs(z_w) < separation] = 0.0
return out
wis = np.arange(len(w))
zis = np.arange(self.numCollPts)
A = [KS_by_idx(wi, zis) for wi in wis]
A = np.vstack(A)
return A
def phi(self, ts):
ts = np.asarray(ts).reshape(1, -1)[0, :]
v = self.psi(ts) - np.dot(self.kerz_stein(ts), self.phiColl) * self .dtColl
return v
def psi(self, ts):
ts = np.asarray(ts).reshape(1, -1)[0, :]
wt = self.curve.tangent(ts)
xs = self.curve.point(ts) - self.confCenter
tmp1 = np.sqrt(
|
np.abs(wt)
|
numpy.abs
|
from spear import *
import numpy.random as rnd
import matplotlib.pyplot as plt
import numpy
### CONSTANTS
a = 0.5
az0 = 0.75
az12 = 0.75
az23 = 0.75
g = 9.81
t_step = 0.1 #duration of a tick in seconds
q_max = 6.0
q_step = q_max/5.0
q_med = q_max/2.0
l_max = 20.0
l_min = 0.0
l_goal = 10.0
delta_l = 0.5
epsilon = 0.3
q2_dev = 0.5
### ENVIROMENT EVOLUTION
def compute_flow_rate(x1, x2, a, a12):
if x1 > x2:
return a12*a*numpy.sqrt(2*g)*numpy.sqrt(x1-x2)
else:
return -a12*a*numpy.sqrt(2*g)*numpy.sqrt(x2-x1)
def compute_q12(ds):
l1 = ds['l1']
l2 = ds['l2']
return compute_flow_rate(l1, l2, a, az12)
def compute_q23(ds):
l2 = ds['l2']
l3 = ds['l3']
return compute_flow_rate(l2, l3, a, az23)
def Env_scenario1(ds):
newds = ds.copy()
q1 = ds['q1']
q2 = ds['q2']
q3 = ds['q3']
newds['q2'] = max(0.0, rnd.normal(q_med, q2_dev))
q12 = compute_q12(ds)
q23 = compute_q23(ds)
newds['l1'] = max(0.0 , ds['l1'] + q1*t_step - q12*t_step)
newds['l2'] = max(0.0 , ds['l2'] + q12*t_step - q23*t_step)
newds['l3'] = max(0.0 , ds['l3'] + q2*t_step + q23*t_step - q3*t_step)
return newds
def Env_scenario2(ds):
newds = ds.copy()
q1 = ds['q1']
q2 = ds['q2']
q3 = ds['q3']
newds['q2'] = min( max(0.0, q2 + rnd.normal(0,1)), q_max)
q12 = compute_q12(ds)
q23 = compute_q23(ds)
newds['l1'] = max(0.0 , ds['l1'] + q1*t_step - q12*t_step)
newds['l2'] = max(0.0 , ds['l2'] + q12*t_step - q23*t_step)
newds['l3'] = max(0.0 , ds['l3'] + q2*t_step + q23*t_step - q3*t_step)
return newds
### PENALTY FUNCTIONS
def rho_fun(x):
v = abs(x-l_goal)/max(l_max-l_goal,l_goal-l_min)
return v
def ranking_function_1(i, ds):
return rho_fun(ds['l1'])
def ranking_function_2(i, ds):
return rho_fun(ds['l2'])
def ranking_function_3(i, ds):
return rho_fun(ds['l3'])
def ranking_function_max(i, ds):
return max(rho_fun(ds['l1']),rho_fun(ds['l2']),rho_fun(ds['l3']))
### ADDITIONAL FUNCTIONS
def plot_tanks_trajectory(k, trj, title, file):
fix, ax = plt.subplots()
ax.plot(range(0, k), [ds['l1'] for ds in trj], label='Tank 1')
ax.plot(range(0, k), [ds['l2'] for ds in trj], label='Tank 2')
ax.plot(range(0, k), [ds['l3'] for ds in trj], label='Tank 3')
ax.plot(range(0,k),[[10] for i in range(k)], '--')
legend = ax.legend()
plt.title(title)
plt.savefig(file)
plt.show()
def plot_tanks_traj_l1(k, trj1, trj2, title, file):
fix, ax = plt.subplots()
ax.plot(range(0, k), [ds['l1'] for ds in trj1], label='Scen 1')
ax.plot(range(0, k), [ds['l1'] for ds in trj2], label='Scen 2')
ax.plot(range(0,k),[[10] for i in range(k)], '--')
legend = ax.legend()
plt.title(title)
plt.savefig(file)
plt.show()
def plot_tanks_traj_l2(k, trj1, trj2, title, file):
fix, ax = plt.subplots()
ax.plot(range(0, k), [ds['l2'] for ds in trj1], label='Scen 1')
ax.plot(range(0, k), [ds['l2'] for ds in trj2], label='Scen 2')
ax.plot(range(0,k),[[10] for i in range(k)], '--')
legend = ax.legend()
plt.title(title)
plt.savefig(file)
plt.show()
def plot_tanks_traj_l3(k, trj1, trj2, title, file):
fix, ax = plt.subplots()
ax.plot(range(0, k), [ds['l3'] for ds in trj1], label='Scen 1')
ax.plot(range(0, k), [ds['l3'] for ds in trj2], label='Scen 2')
ax.plot(range(0,k),[[10] for i in range(k)], '--')
legend = ax.legend()
plt.title(title)
plt.savefig(file)
plt.show()
def plot_tanks_3runs(k, trj1, trj2, trj3, title, file):
fix, ax = plt.subplots()
ax.plot(range(0, k), [ds['l3'] for ds in trj1], label='0.5')
ax.plot(range(0, k), [ds['l3'] for ds in trj2], label='0.3')
ax.plot(range(0, k), [ds['l3'] for ds in trj3], label='0.7')
ax.plot(range(0,k),[[10] for i in range(k)], '--')
legend = ax.legend()
plt.title(title)
plt.savefig(file)
plt.show()
### PROCESSES
processes = {
'Pin': if_then_else_process(lambda d: d['l1'] > l_goal + d['delta_l'],
act_process({'q1': lambda d: max(0.0, d['q1'] - q_step)}, 'Pin'),
if_then_else_process(lambda d: d['l1'] < l_goal - d['delta_l'],
act_process({'q1': lambda d: min(q_max, d['q1'] + q_step)}, 'Pin'),
act_process({}, 'Pin'))),
'Pout': if_then_else_process(lambda d: d['l3'] > l_goal + d['delta_l'],
act_process({'q3': lambda d: min(q_max, d['q3'] + q_step)}, 'Pout'),
if_then_else_process(lambda d: d['l3'] < l_goal - d['delta_l'],
act_process({'q3': lambda d: max(0.0, d['q3'] - q_step)}, 'Pout'),
act_process({},'Pout')))
}
PTanks = synch_parallel_process(processes['Pin'], processes['Pout'])
def init_ds(q1, q2, q3, l1, l2, l3, delta_l):
return {'q1': q1, 'q2': q2, 'q3': q3, 'l1': l1, 'l2': l2, 'l3': l3, 'delta_l': delta_l}
### SIMULATIONS
ds_basic = init_ds(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, delta_l)
k = 151
n = 1000
l = 5
trj1 = run(processes, Env_scenario1, PTanks, ds_basic, k)
trj2 = run(processes, Env_scenario2, PTanks, ds_basic, k)
plot_tanks_trajectory(k,trj1,"Variation of the level of water in time (Scenario 1)","tank_level_sim_scen1.png")
plot_tanks_trajectory(k,trj2,"Variation of the level of water in time (Scenario 2)","tank_level_sim_scen2.png")
plot_tanks_traj_l1(k,trj1,trj2,"Comparison of the variation in time of l1","tank_sim_1.png")
plot_tanks_traj_l2(k,trj1,trj2,"Comparison of the variation in time of l2","tank_sim_2.png")
plot_tanks_traj_l3(k,trj1,trj2,"Comparison of the variation in time of l3","tank_sim_3.png")
for samples in [ 100, 1000, 10000 ]:
simdata1 = simulate(processes, Env_scenario1, PTanks, ds_basic, k, samples)
simdata2 = simulate(processes, Env_scenario2, PTanks, ds_basic, k, samples)
plot_histogram_double(simdata1, simdata2, [50], lambda d: d['l1'], 7.0, 13.0, 100, "l1, N="+str(samples)+", ", "comp_l1_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [50], lambda d: d['l2'], 7.0, 13.0, 100, "l2, N="+str(samples)+", ", "comp_l2_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [50], lambda d: d['l3'], 7.0, 13.0, 100, "l3, N="+str(samples)+", ", "comp_l3_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [100], lambda d: d['l1'], 8.0, 12.0, 100, "l1, N="+str(samples)+", ", "comp_l1_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [100], lambda d: d['l2'], 8.0, 12.0, 100, "l2, N="+str(samples)+", ", "comp_l2_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [100], lambda d: d['l3'], 8.0, 12.0, 100, "l3, N="+str(samples)+", ", "comp_l3_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [150], lambda d: d['l1'], 8.0, 12.0, 100, "l1, N="+str(samples)+", ", "comp_l1_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [150], lambda d: d['l2'], 8.0, 12.0, 100, "l2, N="+str(samples)+", ", "comp_l2_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [150], lambda d: d['l3'], 8.0, 12.0, 100, "l3, N="+str(samples)+", ", "comp_l3_"+str(samples)+"_")
estdata1_n = simulate(processes, Env_scenario1, PTanks, ds_basic, k, n)
estdata1_nl = simulate(processes, Env_scenario1, PTanks, ds_basic, k, n*l)
estdata2_n = simulate(processes, Env_scenario2, PTanks, ds_basic, k, n)
estdata2_nl = simulate(processes, Env_scenario2, PTanks, ds_basic, k, n*l)
### EVALUATION OF DISTANCES DIFFERENT ENVIRONMENTS
(evmet_12_rho3, pointdist_12_rho3) = distance(processes,PTanks,ds_basic,Env_scenario1,processes,PTanks,ds_basic,Env_scenario2,k,n,l,ranking_function_3)
print("Distance scen1-scen2: "+str(evmet_12_rho3[0]))
fix, ax = plt.subplots()
ax.plot(range(k),evmet_12_rho3,'r.')
ax.plot(range(k),pointdist_12_rho3,'b-')
plt.title("Distance modulo rho_3 scenarios 1-2 N="+str(n)+", l="+str(l))
plt.savefig("distance_scen1-scen2_newest.png")
plt.show()
(evmet_21_rho3, pointdist_21_rho3) = distance(processes,PTanks,ds_basic,Env_scenario2,processes,PTanks,ds_basic,Env_scenario1,k,n,l,ranking_function_3)
print("Distance scen2-scen1: "+str(evmet_21_rho3[0]))
fix, ax = plt.subplots()
ax.plot(range(k),evmet_21_rho3,'r.')
ax.plot(range(k),pointdist_21_rho3,'b-')
plt.title("Distance modulo rho_3 scenarios 2-1 N="+str(n)+", l="+str(l))
plt.savefig("distance_scen2-scen1_newest.png")
plt.show()
(evmet_12_rho1, pointdist_12_rho1) = distance(processes,PTanks,ds_basic,Env_scenario1,processes,PTanks,ds_basic,Env_scenario2,k,n,l,ranking_function_1)
(evmet_12_rho2, pointdist_12_rho2) = distance(processes,PTanks,ds_basic,Env_scenario1,processes,PTanks,ds_basic,Env_scenario2,k,n,l,ranking_function_2)
(evmet_12_rhoM, pointdist_12_rhoM) = distance(processes,PTanks,ds_basic,Env_scenario1,processes,PTanks,ds_basic,Env_scenario2,k,n,l,ranking_function_max)
fix, ax = plt.subplots()
ax.plot(range(k),evmet_12_rho1,label="rho^l1")
ax.plot(range(k),evmet_12_rho2,label="rho^l2")
ax.plot(range(k),evmet_12_rho3,label="rho^l3")
ax.plot(range(k),evmet_12_rhoM,label="rho^max")
legend = ax.legend()
plt.title("Evolution metric wrt different penalty functions N="+str(n)+", l="+str(l))
plt.savefig("ev_distance_rho_scen1-scen2_basic.png")
plt.show()
fix, ax = plt.subplots()
ax.plot(range(k),pointdist_12_rho1,label="rho^l1")
ax.plot(range(k),pointdist_12_rho2,label="rho^l2")
ax.plot(range(k),pointdist_12_rho3,label="rho^l3")
ax.plot(range(k),pointdist_12_rhoM,label="rho^max")
legend = ax.legend()
plt.title("Pointiwise distance wrt different penalty functions N="+str(n)+",l="+str(l))
plt.savefig("pt_distance_rho_scen1-scen2_basic.png")
plt.show()
(evmet_21_rho1, pointdist_21_rho1) = distance(processes,PTanks,ds_basic,Env_scenario2,processes,PTanks,ds_basic,Env_scenario1,k,n,l,ranking_function_1)
(evmet_21_rho2, pointdist_21_rho2) = distance(processes,PTanks,ds_basic,Env_scenario2,processes,PTanks,ds_basic,Env_scenario1,k,n,l,ranking_function_2)
(evmet_21_rhoM, pointdist_21_rhoM) = distance(processes,PTanks,ds_basic,Env_scenario2,processes,PTanks,ds_basic,Env_scenario1,k,n,l,ranking_function_max)
fix, ax = plt.subplots()
ax.plot(range(k),evmet_21_rho1,label="rho^l1")
ax.plot(range(k),evmet_21_rho2,label="rho^l2")
ax.plot(range(k),evmet_21_rho3,label="rho^l3")
ax.plot(range(k),evmet_21_rhoM,label="rho^max")
legend = ax.legend()
plt.title("Evolution metric wrt different penalty functions N="+str(n)+",l="+str(l))
plt.savefig("ev_distance_rho_scen2-scen1_basic.png")
plt.show()
fix, ax = plt.subplots()
ax.plot(range(k),pointdist_21_rho1,label="rho^l1")
ax.plot(range(k),pointdist_21_rho2,label="rho^l2")
ax.plot(range(k),pointdist_21_rho3,label="rho^l3")
ax.plot(range(k),pointdist_21_rhoM,label="rho^max")
legend = ax.legend()
plt.title("Pointiwise distance wrt different penalty functions N="+str(n)+",l="+str(l))
plt.savefig("pt_distance_rho_scen2-scen1_basic.png")
plt.show()
### EVALUATION OF DISTANCES DIFFERENT DELTAS
delta_l_less = 0.3
delta_l_more = 0.7
ds_start_less = init_ds(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, delta_l_less)
ds_start_more = init_ds(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, delta_l_more)
run_less = run(processes, Env_scenario1, PTanks, ds_start_less, k)
run_more = run(processes, Env_scenario1, PTanks, ds_start_more, k)
run_normal = run(processes, Env_scenario1, PTanks, ds_basic, k)
plot_tanks_3runs(k,run_normal,run_less,run_more,"Variation of l3 modulo different delta_l","deltas_scen1.png")
estless_n = simulate(processes, Env_scenario1, PTanks, ds_start_less, k, n)
estmore_nl = simulate(processes, Env_scenario1, PTanks, ds_start_more, k, n*l)
(evmet_32_rho3, pointdist_32_rho3) = compute_distance(estless_n,estdata1_nl,k,n,l,ranking_function_3)
print("Distance 0.3-0.5: "+str(evmet_32_rho3[0]))
(evmet_24_rho3, pointdist_24_rho3) = compute_distance(estdata1_n,estmore_nl,k,n,l,ranking_function_3)
print("Distance 0.5-0.7: "+str(evmet_24_rho3[0]))
(evmet_34_rho3, pointdist_34_rho3) = compute_distance(estless_n,estmore_nl,k,n,l,ranking_function_3)
print("Distance 0.3-0.7: "+str(evmet_34_rho3[0]))
fix, ax = plt.subplots()
ax.plot(range(k),evmet_32_rho3, label='0.3,0.5')
ax.plot(range(k),evmet_24_rho3, label='0.5,0.7')
ax.plot(range(k),evmet_34_rho3, label='0.3,0.7')
legend=ax.legend()
plt.title("Evolution metric with delta_l = 0.3,0.5,0.7, N="+str(n)+",l="+str(l))
plt.savefig("ev_distance_scen1_deltas.png")
plt.show()
fix, ax = plt.subplots()
ax.plot(range(k),pointdist_32_rho3, label='0.3,0.5')
ax.plot(range(k),pointdist_24_rho3, label='0.5,0.7')
ax.plot(range(k),pointdist_34_rho3, label='0.3,0.7')
legend=ax.legend()
plt.title("Pointwise distance with delta_l = 0.3,0.5,0.7, N="+str(n)+",l="+str(l))
plt.savefig("pt_distance_scen1_deltas.png")
plt.show()
### EVALUATION OF ROBUSTNESS
def robust_variation(simdata,ds,scale,size,t1,t2,pdef,p,e,k,n,l,rho):
v = scale*max(l_max-l_goal,l_goal-l_min)
res = []
c=0
sdata = simdata[t1:t2+1]
t = t2-t1+1
for j in range(0,size):
d = ds.copy()
d['l1'] = rnd.uniform(l_min,l_max)
d['l2'] = rnd.uniform(l_min,l_max)
d['l3'] = max(0.0, min(ds['l3'] + rnd.uniform(-v,v),l_max))
d['q1'] = rnd.uniform(0.0,q_max)
d['q2'] = rnd.uniform(0.0,q_max)
d['q3'] = rnd.uniform(0.0,q_max)
simdata2 = simulate(pdef,e,p,d,k,n*l)
sdata2 = simdata2[t1:t2+1]
evdist,ptdist = compute_distance(sdata,sdata2,t,n,l,rho)
c = c+1
print(c)
if evdist[0] <=scale:
res.append(d)
return res
def compute_robust(simuldata,dlist,k,n,l,rho):
print("Starting the simulations of the variations for M="+str(M))
delta = [ 0 for i in range(k) ]
c = 0
for data2 in dlist:
simuldata2 = simulate(processes,Env_scenario1,PTanks,data2,k,n*l)
(ev,dist) = compute_distance(simuldata,simuldata2,k,n,l,rho)
c = c+1
print (c)
for i in range(k):
delta[i] = max(delta[i],ev[i])
return delta
estrob = simulate(processes, Env_scenario1, PTanks, ds_start, k, n)
eta1= 0.3
i1 = 0
i2 = 50
def new_rho(i,ds):
return 0.5*rho_fun(ds['l1']) + 0.5*rho_fun(ds['l2'])
print("Computing variations")
estrob2 = robust_variation(estrob, ds_start, eta1, M, i1, i2, processes, PTanks, Env_scenario1, k, n, l, ranking_function_3)
print("Computing robustness")
robustness = compute_robust(estrob, estrob2, k, n, l, new_rho)
plt.plot(range(i2,k),robustness[i2:])
plt.title("Robustness, M="+str(M)+", eta_1=0.3, I = [0,50]")
plt.savefig("tanks_robust_scen1.png")
plt.show()
### EVALUATION OF ADAPTABILITY
def set_variation(l1, l2, l3):
d = init_ds(0,0,0,0,0,0,delta_l)
d['l1'] = l1
d['l2'] = l2
d['l3'] = l3
d['q1'] = rnd.uniform(0,q_max)
d['q2'] = rnd.uniform(0,q_max)
d['q3'] = rnd.uniform(0,q_max)
return d
def variations_1(ds,scale,size):
v = rho_fun(ds['l1'])
u = scale*max(l_max-l_goal,l_goal-l_min)
res = []
for j in range(0,size):
l1 = max(0.0, min(ds['l1'] + rnd.uniform(-u,u),l_max))
l2 = rnd.uniform(l_min,l_max)
l3 = rnd.uniform(l_min,l_max)
if max(rho_fun(l1)-v, 0.0)<=scale:
res.append(set_variation(l1,l2,l3))
return res
def variations_2(ds,scale,size):
v = rho_fun(ds['l2'])
u = scale*max(l_max-l_goal,l_goal-l_min)
res = []
for j in range(0,size):
l1 = rnd.uniform(l_min,l_max)
l2 = max(0.0, min(ds['l2'] + rnd.uniform(-u,u),l_max))
l3 = rnd.uniform(l_min,l_max)
if max(rho_fun(l2)-v , 0.0)<=scale:
res.append(set_variation(l1,l2,l3))
return res
def variations_3(ds,scale,size):
v = rho_fun(ds['l3'])
u = scale*max(l_max-l_goal,l_goal-l_min)
res = []
for j in range(0,size):
l1 = rnd.uniform(l_min,l_max)
l2 = rnd.uniform(l_min,l_max)
l3 = max(0.0, min(ds['l3'] + rnd.uniform(-u,u),l_max))
if max(rho_fun(l3) - v, 0.0)<=scale:
res.append(set_variation(l1,l2,l3))
return res
def variations_max(ds,scale,size):
v = max(rho_fun(ds['l1']),rho_fun(ds['l2']),rho_fun(ds['l3']))
u = scale*max(l_max-l_goal,l_goal-l_min)
res = []
for j in range(0,size):
l1 = max(0.0, min(ds['l1'] + rnd.uniform(-u,u),l_max))
l2 = max(0.0, min(ds['l2'] + rnd.uniform(-u,u),l_max))
l3 = max(0.0, min(ds['l3'] +
|
rnd.uniform(-u,u)
|
numpy.random.uniform
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-02-02 14:58:44
# @Author : Ricky (<EMAIL>)
import os
import time
import pickle
import numpy as np
import sklearn.datasets as dt
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
'''logistic regression model'''
def generator_nonzero(sample):
"""
generate the nonzero index in the feature
:param sample: one sample vector
:return: generator
"""
for j in range(len(sample)):
if sample[j] != 0:
yield j
def cal_loss(true_label, probability):
"""
calculate the log_loss between ground true-label and prediction
:param true_label: the ground truth label for the sample {0, 1}
:param probability: the prediction of the trained model [0, 1]
:return: logloss
"""
probability = max(min(probability, 1. - 1e-15), 1e-15)
return -np.log(probability) if true_label == 1 else -np.log(1 - probability)
def cal_loss2(true_label, probability):
"""
calculate the softmax log_loss between ground true-label and prediction for one single sample
note: the probability has been normalized (no need to max or min operation)
:param true_label: the ground truth label vector for the sample -array
:param probability: the prediction vector of the trained model -array
:return: logloss
"""
k = np.argmax(true_label)
return -np.log(probability[k])
def evaluate_model(preds, labels):
"""
evaluate the model errors on a set of data (not one single sample)
:param preds: the prediction of unseen samples (n_sample, n_label)
:param labels: the ground truth labels (n_sample, n_label)
:return:
"""
shapes = len(labels.shape)
if shapes == 2:
# multi-class classification-find the max-index per row
max_index = np.argmax(preds, axis=1)
for i, p in enumerate(max_index):
preds[i, p] = 1
preds[preds < 1.] = 0
else:
# binary classification-default (n_sample, )
preds[preds >= 0.5] = 1
preds[preds < 0.5] = 0
return np.abs(preds - labels).sum() / (len(labels) * shapes)* 100
def get_auc(scores, labels):
"""
calculate the auc indicator on a set of data
:param scores: the probability of each sample [0, 1]-array
:param labels: the ground truth labels {0, 1}-array
:return: auc indicator
"""
data_shape = labels.shape
pos_num = np.sum(labels, axis=0)
neg_num = len(labels) - pos_num
# rank scores
rank_index = np.argsort(scores, axis=0, kind='quicksort')
if len(data_shape) == 1:
rank_sum = 0.0
for i in range(data_shape[0]):
if labels[rank_index[i]] == 1:
rank_sum += (i + 1)
# calculate the auc
denominator = pos_num * neg_num
if denominator == 0:
res = 0
else:
res = (rank_sum - 0.5 * (pos_num + 1) * pos_num) / denominator
else:
rank_sum = np.zeros(data_shape[1])
res = 0.0
for i in range(data_shape[0]):
for j in range(data_shape[1]):
if labels[rank_index[i, j], j] == 1:
rank_sum[j] += (i + 1)
# calculate the auc
denominator = pos_num * neg_num
for j in range(data_shape[1]):
if denominator[j] == 0:
res += 0.0
else:
numerator = rank_sum[j] - 0.5 * (pos_num[j] + 1) * pos_num[j]
res += numerator / denominator[j]
res = res / data_shape[1]
return res
def logistic0(var):
"""
calculate the logistic value of one variable
:param var: the input variable
:return: logistic value
"""
var = max(min(var, 100), -100)
return 1. / (1 + np.exp(-var))
def logistic(var):
"""
extend to multi-dimension ndarray (1,2,3,4)multi-dimensions
:param var: float/int/ndarray
:return:
"""
if isinstance(var, np.ndarray):
shapes = var.shape
length = np.multiply.reduce(shapes)
var = np.reshape(var, length)
res = np.zeros(length)
for i in range(length):
res[i] = logistic0(var[i])
res = np.reshape(res, shapes)
else:
res = logistic0(var)
return res
def softmax(var):
"""
calculate the softmax value of one vector variable
:param var: the input vector
:return: softmax vector
"""
e_x = np.exp(var - np.max(var))
output = e_x / e_x.sum()
return output
def generate_samples(dimension, n_samples):
"""
generate samples according to the user-defined requirements
:param dimension:
:param n_samples:
:return:
"""
samples = np.random.rand(n_samples, dimension)
labels = np.random.randint(0, 2, (n_samples, ))
return samples, labels
class LR:
def __init__(self, dim, alpha, beta, lambda1, lambda2):
"""
the constructor of LR class
:param dim: the dimension of input features
:param alpha: the alpha parameters for learning rate in the update of weights
:param beta: the beta parameters for learning rate in the update of weights
:param lambda1: L1 regularization
:param lambda2: L2 regularization
"""
self.dim = dim
self.alpha = alpha
self.beta = beta
self.lambda1 = lambda1
self.lambda2 = lambda2
# initialize the zis, nis, gradient, weights
self._zs = np.zeros(self.dim + 1)
self._ns = np.zeros(self.dim + 1)
self.weights = np.zeros(self.dim + 1)
def update_param(self, sample, label):
"""
update the parameters: weights, zs, ns, gradients
:param sample: the feature vector -array vector
:param label: the ground truth label -value
:param nonzero_index: the nonzero index list -list
"""
# update bias
if np.abs(self._zs[-1]) > self.lambda1:
fore = (self.beta + np.sqrt(self._ns[-1])) / self.alpha + self.lambda2
self.weights[-1] = -1. / fore * (self._zs[-1] - np.sign(self._zs[-1]) * self.lambda1)
else:
self.weights[-1] = 0.0
# update weights
for index in generator_nonzero(sample):
if np.abs(self._zs[index]) > self.lambda1:
fore = (self.beta + np.sqrt(self._ns[index])) / self.alpha + self.lambda2
self.weights[index] = -1. / fore * (self._zs[index] - np.sign(self._zs[index]) * self.lambda1)
else:
self.weights[index] = 0
# predict the sample, compute the gradient of loss
prediction = self.predict(sample)
base_grad = prediction - label
# update the zs, ns
for j in generator_nonzero(sample):
gradient = base_grad * sample[j]
sigma = (
|
np.sqrt(self._ns[j] + gradient ** 2)
|
numpy.sqrt
|
import numpy as np
try:
import open3d as o3d # the extra feature
except ImportError:
pass
def translate(p):
x, y, z = p
return np.array([
[1., 0., 0., x],
[0., 1., 0., y],
[0., 0., 1., z],
[0., 0., 0., 1.]
])
def rotate(axis, angle):
x, y, z = axis
return np.array([ # Rodrigues
[
np.cos(angle) + (x**2 * (1 - np.cos(angle))),
(x * y * (1 - np.cos(angle))) - (z * np.sin(angle)),
(x * z * (1 - np.cos(angle))) + (y *
|
np.sin(angle)
|
numpy.sin
|
"""Genetic evaluation of individuals."""
import os
import sys
# import time
from collections import Counter
from itertools import compress
from numba import njit
import pkg_resources
import numpy as np
import pandas as pd
import scipy.linalg
import scipy.stats
def example_data():
"""Provide data to the package."""
cwd = os.getcwd()
stream = pkg_resources.resource_stream(__name__, 'data/chr.txt')
chrmosomedata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/group.txt')
groupdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/effects.txt')
markereffdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/phase.txt')
genodata = pd.read_table(stream, header=None, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/ped.txt')
ped = pd.read_table(stream, header=None, sep=" ")
os.chdir(cwd)
return chrmosomedata, markereffdata, genodata, groupdata, ped
if __name__ == "__main__":
example_data()
@njit
def fnrep2(gen, aaxx, aaxx1):
"""Code phased genotypes into 1, 2, 3 and 4."""
qqq = np.empty((int(gen.shape[0]/2), gen.shape[1]), np.int_)
for i in range(qqq.shape[0]):
for j in range(qqq.shape[1]):
if gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx:
qqq[i, j] = 1
elif gen[2*i, j] == aaxx1 and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 2
elif gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 3
else:
qqq[i, j] = 4
return qqq
def haptogen(gen, progress=False):
"""Convert haplotypes to coded genotypes."""
if progress:
print("Converting phased haplotypes to genotypes")
if gen.shape[1] == 2:
gen = np.array(gen.iloc[:, 1]) # del col containing ID
# convert string to 2D array of integers
gen = [list(gen[i].rstrip()) for i in range(gen.shape[0])]
gen = np.array(gen, int)
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
elif gen.shape[1] > 2:
gen = gen.iloc[:, 1:gen.shape[1]] # del col containing ID
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
return gen
class Datacheck:
"""Check the input data for errors and store relevant info as an object."""
def __init__(self, gmap, meff, gmat, group, indwt, progress=False):
"""
Check input data for errors and store relevant info as class object.
Parameters
----------
gmap : pandas.DataFrame
Index: RangeIndex
Columns:
Name: CHR, dtype: int64; chromosome number
Name: SNPName, dtype: object; marker name
Name: Position: dtype: int64; marker position in bp
Name: group: dtype: float64; marker distance (cM) or reco rates
meff : pandas.DataFrame
Index: RangeIndex
Columns:
Name: trait names: float64; no. of columns = no of traits
gmat : pandas.DataFrame
Index: RangeIndex
Columns:
Name: ID, dtype: int64 or str; identification of individuals
Name: haplotypes, dtype: object; must be biallelic
group : pandas.DataFrame
Index: RangeIndex
Columns:
Name: group, dtype: object; group code of individuals, e.g., M, F
Name: ID, dtype: int64 or str; identification of individuals
indwt : list of index weights for each trait
progress : bool, optional; print progress of the function if True
Returns stored input files
-------
"""
# check: ensures number of traits match size of index weights
indwt = np.array(indwt)
if (meff.shape[1]-1) != indwt.size:
sys.exit('no. of index weights do not match marker effects cols')
# check: ensure individuals' genotypes match group and ID info
id_indgrp = pd.Series(group.iloc[:, 1]).astype(str) # no of inds
if not pd.Series(
pd.unique(gmat.iloc[:, 0])).astype(str).equals(id_indgrp):
sys.exit("ID of individuals in group & genotypic data don't match")
# check: ensure marker names in marker map and effects match
if not (gmap.iloc[:, 1].astype(str)).equals(meff.iloc[:, 0].astype(str)):
print("Discrepancies between marker names")
sys.exit("Check genetic map and marker effects")
# check: ensure marker or allele sub effect are all numeric
meff = meff.iloc[:, 1:meff.shape[1]]
test = meff.apply(
lambda s: pd.to_numeric(s, errors='coerce').notnull().all())
if not test.all():
sys.exit("Marker or allele sub effects contain non-numeric values")
# check: ensure unique maps match no of groups if map more than 1
grpg = pd.unique(group.iloc[:, 0]) # groups of individuals
grp_chrom = gmap.shape[1]-3 # no of unique maps
gmat = haptogen(gmat, progress)
if grp_chrom > 1 and grp_chrom != grpg.size:
sys.exit("no. of unique maps does not match no. of groups")
# check no of markers in genotype and map and marker effects match
no_markers = gmap.shape[0] # no of markers
if no_markers != gmat.shape[1] or no_markers != meff.shape[0]:
sys.exit("markers nos in gen, chrm or marker effects don't match")
# check: ordered marker distance or recombination rates
for grn in range(grp_chrom):
for chrm in pd.unique(gmap.iloc[:, 0]):
mpx = np.array(gmap.iloc[:, 3+grn][gmap.iloc[:, 0] == chrm])
if not (mpx == np.sort(sorted(mpx))).any():
sys.exit(
f"Faulty marker map on chr {chrm} for grp {grpg[grn]}")
if progress:
print('Data passed the test!')
print("Number of individuals: ", len(id_indgrp))
print("Number of groups: ", len(grpg), ": ", grpg)
print("Number of specific maps:", grp_chrom)
print("Number of chromosomes: ", len(pd.unique(gmap.iloc[:, 0])))
print("Total no. markers: ", no_markers)
print("Number of trait(s): ", meff.columns.size)
print("Trait name(s) and Index weight(s)")
if meff.columns.size == 1:
print(meff.columns[0], ": ", indwt[0])
elif meff.columns.size > 1:
for i in range(meff.columns.size):
print(meff.columns[i], ": ", indwt[i])
self.gmap = gmap
self.meff = meff
self.gmat = gmat
self.group = group
self.indwt = indwt
def elem_cor(mylist, mprc, ngp, mposunit, method, chrm):
"""Derive pop cov matrix."""
if method == 1: # Bonk et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = np.exp(-2*(np.abs(mprc - mprc[:, None])/100))/4
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (1-(2*mprc))/4
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
elif method == 2: # Santos et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = (-1*(np.abs(mprc - mprc[:, None])/200))+0.25
cutoff = (-1*(50/200))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (-1*(mprc/2))+0.25
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
cutoff = (-1*(0.5/2))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
# append chromosome-specific covariance matrix to list
mylist[int(ngp)].append(tmp)
return mylist
def popcovmat(info, mposunit, method):
"""
Derive population-specific covariance matrices.
Parameters
----------
info : class object
A class object created using the function "datacheck"
mposunit : string
A sting with containing "cM" or "reco".
method : int
An integer with a value of 1 for Bonk et al.'s approach or
2 for Santos et al's approach'
Returns
-------
mylist : list
A list containing group-specific pop covariance matrices for each chr.
"""
if mposunit not in ("cM", "cm", "CM", "Cm", "reco", "RECO"):
sys.exit("marker unit should be either cM or reco")
# unique group name for naming the list if map is more than 1
probn = pd.unique(info.group.iloc[:, 0].astype(str)).tolist()
chromos = pd.unique(info.gmap.iloc[:, 0]) # chromosomes
no_grp = info.gmap.shape[1]-3 # no of maps
mylist = [] # list stores chromosome-wise covariance matrix
for ngp in range(no_grp):
mylist.append([])
# marker position in cM or recombination rates
grouprecodist = info.gmap.iloc[:, 3+ngp]
for chrm in chromos:
mpo = np.array(grouprecodist[info.gmap.iloc[:, 0] == (chrm)])
elem_cor(mylist, mpo, ngp, mposunit, method, chrm)
if no_grp > 1:
# if map is more than one, name list using group names
mylist = dict(zip(probn, mylist))
return mylist
@njit
def makemems(gmat, meff):
"""Set up family-specific marker effects (Mendelian sampling)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 4:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 3:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
@njit
def makemebv(gmat, meff):
"""Set up family-specific marker effects (GEBV)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 2:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 1:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
def traitspecmatrices(gmat, meff):
"""Store trait-specific matrices in a list."""
notr = meff.shape[1] # number of traits
slist = [] # list stores trait-specific matrices
slist.append([])
for i in range(notr):
# specify data type for numba
mefff = np.array(meff.iloc[:, i], float)
matrix_ms = makemems(gmat, mefff)
slist[0].append(matrix_ms)
return slist
def namesdf(notr, trait_names):
"""Create names of dataframe columns for Mendelian co(var)."""
tnn = np.zeros((notr, notr), 'U20')
tnn = np.chararray(tnn.shape, itemsize=30)
for i in range(notr):
for trt in range(notr):
if i == trt:
tnn[i, trt] = str(trait_names[i])
elif i != trt:
tnn[i, trt] = "{}_{}".format(trait_names[i], trait_names[trt])
colnam = tnn[np.tril_indices(notr)]
return colnam
def mrmmult(temp, covmat):
"""Matrix multiplication (MRM' or m'Rm)."""
return temp @ covmat @ temp.T
def dgmrm(temp, covmat):
"""Matrix multiplication (MRM') for bigger matrices."""
temp1111 = scipy.linalg.blas.dgemm(alpha=1.0, a=temp, b=covmat)
return scipy.linalg.blas.dgemm(alpha=1.0, a=temp1111, b=temp.T)
def progr(itern, total):
"""Print progress of a task."""
fill, printend, prefix, suffix = '█', "\r", 'Progress:', 'Complete'
deci, length = 0, 50
percent = ("{0:." + str(deci) + "f}").format(100 * (itern / float(total)))
filledlen = int(length * itern // total)
bars = fill * filledlen + '-' * (length - filledlen)
print(f'\r{prefix} |{bars}| {percent}% {suffix}', end=printend)
if itern == total:
print()
def subindcheck(info, sub_id):
"""Check if inds provided in pd.DataFrame (sub_id) are in group data."""
sub_id = pd.DataFrame(sub_id).reset_index(drop=True)
if sub_id.shape[1] != 1:
sys.exit("Individuals' IDs (sub_id) should be provided in one column")
numbers = info.group.iloc[:, 1].astype(str).tolist()
sub_id = sub_id.squeeze().astype(str).tolist()
aaa = [numbers.index(x) if x in numbers else None for x in sub_id]
aaa = np.array(aaa)
if len(aaa) != len(sub_id):
sys.exit("Some individual ID could not be found in group data")
return aaa
def msvarcov_g_st(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for single trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
msvmsc = np.empty((matsub.shape[0], 1))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr, notr)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, 0] = mscov
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
msvmsc.columns = info.meff.columns
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g_mt(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for multiple traits."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
mad = len(np.zeros((notr+1, notr+1))[np.tril_indices(notr+1)])
msvmsc = np.empty((matsub.shape[0], mad))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr+1, notr+1)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr+1, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
temp[notr, :] = np.matmul(info.indwt.T, temp[0:notr, :])
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, :] = mscov[np.tril_indices(notr+1)]
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
tnames = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, tnames).decode('utf-8')
msvmsc.columns = colnam
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g(info, covmat, sub_id, progress=False):
"""
Derive Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
info : class object
A class object created using the function "datacheck"
covmat : A list of pop cov matrices created using "popcovmat" function
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
progress : bool, optional; print progress of the function if True
Returns
-------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
Note: If sub_id is None, Mendelian (co-)variance will be estimated for
all individuals. Otherwise, Mendelian (co-)variance will be estimated for
the individuals in sub_id
"""
notr = info.meff.columns.size
if notr == 1:
msvmsc = msvarcov_g_st(info, covmat, sub_id, progress)
elif notr > 1:
msvmsc = msvarcov_g_mt(info, covmat, sub_id, progress)
return msvmsc
def array2sym(array):
"""Convert array to stdized symm mat, and back to array without diags."""
dfmsize = array.size
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
iii, jjj = np.tril_indices(notr)
mat = np.empty((notr, notr), float)
mat[iii, jjj], mat[jjj, iii] = array, array
mat = np.array(mat)
mat1 = cov2corr(mat)
return np.array(mat1[np.tril_indices(notr, k=-1)])
def msvarcov_gcorr(msvmsc):
"""
Standardize Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
created using msvarcov_g function
Returns
-------
dfcor : pandas.DataFrame
containing standardized Mendelian sampling (co)variance
"""
if msvmsc.columns.size == 3:
sys.exit("Correlation cannot be derived for a single trait")
dfm = msvmsc.iloc[:, 2:msvmsc.shape[1]] # exclude ID and group
dfmsize = dfm.shape[1]
# derive number of traits
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
# standardize covariance between traits
dfcor = dfm.apply(array2sym, axis=1)
# extract column names
listnames = dfm.columns.tolist()
cnames = [x for x in listnames if "_" in x]
# convert pd.series of list to data frame
dfcor = pd.DataFrame.from_dict(dict(zip(dfcor.index, dfcor.values))).T
dfcor.columns = cnames
# insert ID and group info
dfcor = [pd.DataFrame(msvmsc.iloc[:, 0:2]), dfcor] # add ID and GRP
dfcor = pd.concat(dfcor, axis=1)
return dfcor
def calcgbv(info, sub_id):
"""Calculate breeding values for each trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
no_individuals = matsub.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
gbv = np.zeros((no_individuals, notr))
mefff = np.array(info.meff.iloc[:, 0], float) # type spec for numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, 0] = matrix_me.sum(axis=1) # sum all effects
gbv = pd.DataFrame(gbv)
gbv.columns = trait_names
elif notr > 1:
gbv = np.zeros((no_individuals, notr+1))
for i in range(notr):
mefff = np.array(info.meff.iloc[:, i], float) # type spec 4 numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, i] = matrix_me.sum(axis=1) # sum all effects for each trait
gbv[:, notr] = gbv[:, notr] + info.indwt[i]*gbv[:, i] # Agg gen
gbv = pd.DataFrame(gbv)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
gbv.columns = colnames
gbv.insert(0, "ID", idn, True) # insert ID
gbv.insert(1, "Group", groupsex, True) # insert group
return gbv
def calcprob(info, msvmsc, thresh):
"""Calculate the probability of breeding top individuals."""
aaa = subindcheck(info, pd.DataFrame(msvmsc.iloc[:, 0]))
gbvall = calcgbv(info, None) # calc GEBV for all inds used by thresh
gbv = gbvall.iloc[aaa, :].reset_index(drop=True) # GEBV matching msvmsc
no_individuals = gbv.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
probdf = np.zeros((no_individuals, notr))
ttt = np.quantile(gbvall.iloc[:, (0+2)], q=1-thresh) # threshold
probdf[:, 0] = 1 - scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (0+2)], scale=np.sqrt(msvmsc.iloc[:, 0+2]))
probdf = pd.DataFrame(probdf)
probdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh) # threshold
probdf = np.zeros((no_individuals, notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
ttt = np.quantile(gbvall.iloc[:, (i+2)], q=1-thresh) # threshold
probdf[:, i] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (i+2)], scale=np.sqrt(
msvmsc.iloc[:, (t_ind[i])+2]))
probdf[:, i] =
|
np.nan_to_num(probdf[:, i])
|
numpy.nan_to_num
|
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Primary container for radio interferometer datasets.
"""
from __future__ import absolute_import, division, print_function
import os
import copy
import re
import numpy as np
import six
import warnings
from astropy import constants as const
import astropy.units as units
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, FK5, Angle
from .uvbase import UVBase
from . import parameter as uvp
from . import telescopes as uvtel
from . import utils as uvutils
if six.PY2:
from collections import Iterable
else:
from collections.abc import Iterable
class UVData(UVBase):
"""
A class for defining a radio interferometer dataset.
Currently supported file types: uvfits, miriad, fhd.
Provides phasing functions.
Attributes
----------
UVParameter objects :
For full list see UVData Parameters
(http://pyuvdata.readthedocs.io/en/latest/uvdata_parameters.html).
Some are always required, some are required for certain phase_types
and others are always optional.
"""
def __init__(self):
"""Create a new UVData object."""
# add the UVParameters to the class
# standard angle tolerance: 10 mas in radians.
# Should perhaps be decreased to 1 mas in the future
radian_tol = 10 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0)
self._Ntimes = uvp.UVParameter('Ntimes', description='Number of times',
expected_type=int)
self._Nbls = uvp.UVParameter('Nbls', description='Number of baselines',
expected_type=int)
self._Nblts = uvp.UVParameter('Nblts', description='Number of baseline-times '
'(i.e. number of spectra). Not necessarily '
'equal to Nbls * Ntimes', expected_type=int)
self._Nfreqs = uvp.UVParameter('Nfreqs', description='Number of frequency channels',
expected_type=int)
self._Npols = uvp.UVParameter('Npols', description='Number of polarizations',
expected_type=int)
desc = ('Array of the visibility data, shape: (Nblts, Nspws, Nfreqs, '
'Npols), type = complex float, in units of self.vis_units')
self._data_array = uvp.UVParameter('data_array', description=desc,
form=('Nblts', 'Nspws',
'Nfreqs', 'Npols'),
expected_type=np.complex)
desc = 'Visibility units, options are: "uncalib", "Jy" or "K str"'
self._vis_units = uvp.UVParameter('vis_units', description=desc,
form='str', expected_type=str,
acceptable_vals=["uncalib", "Jy", "K str"])
desc = ('Number of data points averaged into each data element, '
'NOT required to be an integer, type = float, same shape as data_array.'
'The product of the integration_time and the nsample_array '
'value for a visibility reflects the total amount of time '
'that went into the visibility. Best practice is for the '
'nsample_array to be used to track flagging within an integration_time '
'(leading to a decrease of the nsample array value below 1) and '
'LST averaging (leading to an increase in the nsample array '
'value). So datasets that have not been LST averaged should '
'have nsample array values less than or equal to 1.'
'Note that many files do not follow this convention, but it is '
'safe to assume that the product of the integration_time and '
'the nsample_array is the total amount of time included in a visibility.')
self._nsample_array = uvp.UVParameter('nsample_array', description=desc,
form=('Nblts', 'Nspws',
'Nfreqs', 'Npols'),
expected_type=(np.float))
desc = 'Boolean flag, True is flagged, same shape as data_array.'
self._flag_array = uvp.UVParameter('flag_array', description=desc,
form=('Nblts', 'Nspws',
'Nfreqs', 'Npols'),
expected_type=np.bool)
self._Nspws = uvp.UVParameter('Nspws', description='Number of spectral windows '
'(ie non-contiguous spectral chunks). '
'More than one spectral window is not '
'currently supported.', expected_type=int)
self._spw_array = uvp.UVParameter('spw_array',
description='Array of spectral window '
'Numbers, shape (Nspws)', form=('Nspws',),
expected_type=int)
desc = ('Projected baseline vectors relative to phase center, '
'shape (Nblts, 3), units meters. Convention is: uvw = xyz(ant2) - xyz(ant1).'
'Note that this is the Miriad convention but it is different '
'from the AIPS/FITS convention (where uvw = xyz(ant1) - xyz(ant2)).')
self._uvw_array = uvp.UVParameter('uvw_array', description=desc,
form=('Nblts', 3),
expected_type=np.float,
acceptable_range=(0, 1e8), tols=1e-3)
desc = ('Array of times, center of integration, shape (Nblts), '
'units Julian Date')
self._time_array = uvp.UVParameter('time_array', description=desc,
form=('Nblts',),
expected_type=np.float,
tols=1e-3 / (60.0 * 60.0 * 24.0)) # 1 ms in days
desc = ('Array of lsts, center of integration, shape (Nblts), '
'units radians')
self._lst_array = uvp.UVParameter('lst_array', description=desc,
form=('Nblts',),
expected_type=np.float,
tols=radian_tol)
desc = ('Array of first antenna indices, shape (Nblts), '
'type = int, 0 indexed')
self._ant_1_array = uvp.UVParameter('ant_1_array', description=desc,
expected_type=int, form=('Nblts',))
desc = ('Array of second antenna indices, shape (Nblts), '
'type = int, 0 indexed')
self._ant_2_array = uvp.UVParameter('ant_2_array', description=desc,
expected_type=int, form=('Nblts',))
desc = ('Array of baseline indices, shape (Nblts), '
'type = int; baseline = 2048 * (ant1+1) + (ant2+1) + 2^16')
self._baseline_array = uvp.UVParameter('baseline_array',
description=desc,
expected_type=int, form=('Nblts',))
# this dimensionality of freq_array does not allow for different spws
# to have different dimensions
desc = 'Array of frequencies, center of the channel, shape (Nspws, Nfreqs), units Hz'
self._freq_array = uvp.UVParameter('freq_array', description=desc,
form=('Nspws', 'Nfreqs'),
expected_type=np.float,
tols=1e-3) # mHz
desc = ('Array of polarization integers, shape (Npols). '
'AIPS Memo 117 says: pseudo-stokes 1:4 (pI, pQ, pU, pV); '
'circular -1:-4 (RR, LL, RL, LR); linear -5:-8 (XX, YY, XY, YX). '
'NOTE: AIPS Memo 117 actually calls the pseudo-Stokes polarizations '
'"Stokes", but this is inaccurate as visibilities cannot be in '
'true Stokes polarizations for physical antennas. We adopt the '
'term pseudo-Stokes to refer to linear combinations of instrumental '
'visibility polarizations (e.g. pI = xx + yy).')
self._polarization_array = uvp.UVParameter('polarization_array',
description=desc,
expected_type=int,
acceptable_vals=list(
np.arange(-8, 0)) + list(np.arange(1, 5)),
form=('Npols',))
desc = ('Length of the integration in seconds, shape (Nblts). '
'The product of the integration_time and the nsample_array '
'value for a visibility reflects the total amount of time '
'that went into the visibility. Best practice is for the '
'integration_time to reflect the length of time a visibility '
'was integrated over (so it should vary in the case of '
'baseline-dependent averaging and be a way to do selections '
'for differently integrated baselines).'
'Note that many files do not follow this convention, but it is '
'safe to assume that the product of the integration_time and '
'the nsample_array is the total amount of time included in a visibility.')
self._integration_time = uvp.UVParameter('integration_time',
description=desc,
form=('Nblts',),
expected_type=np.float, tols=1e-3) # 1 ms
self._channel_width = uvp.UVParameter('channel_width',
description='Width of frequency channels (Hz)',
expected_type=np.float,
tols=1e-3) # 1 mHz
# --- observation information ---
self._object_name = uvp.UVParameter('object_name',
description='Source or field '
'observed (string)', form='str',
expected_type=str)
self._telescope_name = uvp.UVParameter('telescope_name',
description='Name of telescope '
'(string)', form='str',
expected_type=str)
self._instrument = uvp.UVParameter('instrument', description='Receiver or backend. '
'Sometimes identical to telescope_name',
form='str', expected_type=str)
desc = ('Telescope location: xyz in ITRF (earth-centered frame). '
'Can also be accessed using telescope_location_lat_lon_alt or '
'telescope_location_lat_lon_alt_degrees properties')
self._telescope_location = uvp.LocationParameter('telescope_location',
description=desc,
acceptable_range=(
6.35e6, 6.39e6),
tols=1e-3)
self._history = uvp.UVParameter('history', description='String of history, units English',
form='str', expected_type=str)
# --- phasing information ---
desc = ('String indicating phasing type. Allowed values are "drift", '
'"phased" and "unknown"')
self._phase_type = uvp.UVParameter('phase_type', form='str', expected_type=str,
description=desc, value='unknown',
acceptable_vals=['drift', 'phased', 'unknown'])
desc = ('Required if phase_type = "phased". Epoch year of the phase '
'applied to the data (eg 2000.)')
self._phase_center_epoch = uvp.UVParameter('phase_center_epoch',
required=False,
description=desc,
expected_type=np.float)
desc = ('Required if phase_type = "phased". Right ascension of phase '
'center (see uvw_array), units radians. Can also be accessed using phase_center_ra_degrees.')
self._phase_center_ra = uvp.AngleParameter('phase_center_ra',
required=False,
description=desc,
expected_type=np.float,
tols=radian_tol)
desc = ('Required if phase_type = "phased". Declination of phase center '
'(see uvw_array), units radians. Can also be accessed using phase_center_dec_degrees.')
self._phase_center_dec = uvp.AngleParameter('phase_center_dec',
required=False,
description=desc,
expected_type=np.float,
tols=radian_tol)
desc = ('Only relevant if phase_type = "phased". Specifies the frame the'
' data and uvw_array are phased to. Options are "gcrs" and "icrs",'
' default is "icrs"')
self._phase_center_frame = uvp.UVParameter('phase_center_frame',
required=False,
description=desc,
expected_type=str,
acceptable_vals=['icrs', 'gcrs'])
# --- antenna information ----
desc = ('Number of antennas with data present (i.e. number of unique '
'entries in ant_1_array and ant_2_array). May be smaller '
'than the number of antennas in the array')
self._Nants_data = uvp.UVParameter('Nants_data', description=desc,
expected_type=int)
desc = ('Number of antennas in the array. May be larger '
'than the number of antennas with data')
self._Nants_telescope = uvp.UVParameter('Nants_telescope',
description=desc, expected_type=int)
desc = ('List of antenna names, shape (Nants_telescope), '
'with numbers given by antenna_numbers (which can be matched '
'to ant_1_array and ant_2_array). There must be one entry '
'here for each unique entry in ant_1_array and '
'ant_2_array, but there may be extras as well.')
self._antenna_names = uvp.UVParameter('antenna_names', description=desc,
form=('Nants_telescope',),
expected_type=str)
desc = ('List of integer antenna numbers corresponding to antenna_names, '
'shape (Nants_telescope). There must be one '
'entry here for each unique entry in ant_1_array and '
'ant_2_array, but there may be extras as well.')
self._antenna_numbers = uvp.UVParameter('antenna_numbers', description=desc,
form=('Nants_telescope',),
expected_type=int)
# -------- extra, non-required parameters ----------
desc = ('Orientation of the physical dipole corresponding to what is '
'labelled as the x polarization. Options are "east" '
'(indicating east/west orientation) and "north" (indicating '
'north/south orientation)')
self._x_orientation = uvp.UVParameter('x_orientation', description=desc,
required=False, expected_type=str,
acceptable_vals=['east', 'north'])
blt_order_options = ['time', 'baseline', 'ant1', 'ant2', 'bda']
desc = ('Ordering of the data array along the blt axis. A tuple with '
'the major and minor order (minor order is omitted if order is "bda"). '
'The allowed values are: '
+ ' ,'.join([str(val) for val in blt_order_options]))
self._blt_order = uvp.UVParameter('blt_order', description=desc, form=(2,),
required=False, expected_type=str,
acceptable_vals=blt_order_options)
desc = ('Any user supplied extra keywords, type=dict. Keys should be '
'8 character or less strings if writing to uvfits or miriad files. '
'Use the special key "comment" for long multi-line string comments.')
self._extra_keywords = uvp.UVParameter('extra_keywords', required=False,
description=desc, value={},
spoof_val={}, expected_type=dict)
desc = ('Array giving coordinates of antennas relative to '
'telescope_location (ITRF frame), shape (Nants_telescope, 3), '
'units meters. See the tutorial page in the documentation '
'for an example of how to convert this to topocentric frame.'
'Will be a required parameter in a future version.')
self._antenna_positions = uvp.AntPositionParameter('antenna_positions',
required=False,
description=desc,
form=(
'Nants_telescope', 3),
expected_type=np.float,
tols=1e-3) # 1 mm
desc = ('Array of antenna diameters in meters. Used by CASA to '
'construct a default beam if no beam is supplied.')
self._antenna_diameters = uvp.UVParameter('antenna_diameters',
required=False,
description=desc,
form=('Nants_telescope',),
expected_type=np.float,
tols=1e-3) # 1 mm
# --- other stuff ---
# the below are copied from AIPS memo 117, but could be revised to
# merge with other sources of data.
self._gst0 = uvp.UVParameter('gst0', required=False,
description='Greenwich sidereal time at '
'midnight on reference date',
spoof_val=0.0, expected_type=np.float)
self._rdate = uvp.UVParameter('rdate', required=False,
description='Date for which the GST0 or '
'whatever... applies',
spoof_val='', form='str')
self._earth_omega = uvp.UVParameter('earth_omega', required=False,
description='Earth\'s rotation rate '
'in degrees per day',
spoof_val=360.985, expected_type=np.float)
self._dut1 = uvp.UVParameter('dut1', required=False,
description='DUT1 (google it) AIPS 117 '
'calls it UT1UTC',
spoof_val=0.0, expected_type=np.float)
self._timesys = uvp.UVParameter('timesys', required=False,
description='We only support UTC',
spoof_val='UTC', form='str')
desc = ('FHD thing we do not understand, something about the time '
'at which the phase center is normal to the chosen UV plane '
'for phasing')
self._uvplane_reference_time = uvp.UVParameter('uvplane_reference_time',
required=False,
description=desc,
spoof_val=0)
desc = "Per-antenna and per-frequency equalization coefficients"
self._eq_coeffs = uvp.UVParameter("eq_coeffs",
required=False,
description=desc,
form=("Nants_telescope", "Nfreqs"),
expected_type=np.float,
spoof_val=1.0)
desc = "Convention for how to remove eq_coeffs from data"
self._eq_coeffs_convention = uvp.UVParameter("eq_coeffs_convention",
required=False,
description=desc,
form="str",
spoof_val="divide")
super(UVData, self).__init__()
@property
def _data_params(self):
"""List of strings giving the data-like parameters"""
return ['data_array', 'nsample_array', 'flag_array']
@property
def data_like_parameters(self):
"""An iterator of defined parameters which are data-like (not metadata-like)"""
for key in self._data_params:
if hasattr(self, key):
yield getattr(self, key)
@property
def metadata_only(self):
"""
Property that determines whether this is a metadata only object.
An object is metadata only if data_array, nsample_array and flag_array
are all None.
"""
metadata_only = all(d is None for d in self.data_like_parameters)
for param_name in self._data_params:
getattr(self, "_" + param_name).required = not metadata_only
return metadata_only
def check(self, check_extra=True, run_check_acceptability=True):
"""
Add some extra checks on top of checks on UVBase class.
Check that required parameters exist. Check that parameters have
appropriate shapes and optionally that the values are acceptable.
Parameters
----------
check_extra : bool
If true, check all parameters, otherwise only check required parameters.
run_check_acceptability : bool
Option to check if values in parameters are acceptable.
Returns
-------
bool
True if check passes
Raises
------
ValueError
if parameter shapes or types are wrong or do not have acceptable
values (if run_check_acceptability is True)
"""
# first run the basic check from UVBase
# set the phase type based on object's value
if self.phase_type == 'phased':
self.set_phased()
elif self.phase_type == 'drift':
self.set_drift()
else:
self.set_unknown_phase_type()
# check for deprecated x_orientation strings and convert to new values (if possible)
if self.x_orientation is not None:
# the acceptability check is always done with a `lower` for strings
if self.x_orientation.lower() not in self._x_orientation.acceptable_vals:
warn_string = ('x_orientation {xval} is not one of [{vals}], '
.format(xval=self.x_orientation,
vals=(', ').join(self._x_orientation.acceptable_vals)))
if self.x_orientation.lower() == 'e':
self.x_orientation = 'east'
warn_string += 'converting to "east".'
elif self.x_orientation.lower() == 'n':
self.x_orientation = 'north'
warn_string += 'converting to "north".'
else:
warn_string += 'cannot be converted.'
warnings.warn(warn_string + ' Only [{vals}] will be supported '
'starting in version 1.5'
.format(vals=(', ').join(self._x_orientation.acceptable_vals)),
DeprecationWarning)
super(UVData, self).check(check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
# Check internal consistency of numbers which don't explicitly correspond
# to the shape of another array.
nants_data_calc = int(len(np.unique(self.ant_1_array.tolist()
+ self.ant_2_array.tolist())))
if self.Nants_data != nants_data_calc:
raise ValueError('Nants_data must be equal to the number of unique '
'values in ant_1_array and ant_2_array')
if self.Nbls != len(np.unique(self.baseline_array)):
raise ValueError('Nbls must be equal to the number of unique '
'baselines in the data_array')
if self.Ntimes != len(np.unique(self.time_array)):
raise ValueError('Ntimes must be equal to the number of unique '
'times in the time_array')
# require that all entries in ant_1_array and ant_2_array exist in antenna_numbers
if not all(ant in self.antenna_numbers for ant in self.ant_1_array):
raise ValueError('All antennas in ant_1_array must be in antenna_numbers.')
if not all(ant in self.antenna_numbers for ant in self.ant_2_array):
raise ValueError('All antennas in ant_2_array must be in antenna_numbers.')
# issue warning if extra_keywords keys are longer than 8 characters
for key in self.extra_keywords.keys():
if len(key) > 8:
warnings.warn('key {key} in extra_keywords is longer than 8 '
'characters. It will be truncated to 8 if written '
'to uvfits or miriad file formats.'.format(key=key))
# issue warning if extra_keywords values are lists, arrays or dicts
for key, value in self.extra_keywords.items():
if isinstance(value, (list, dict, np.ndarray)):
warnings.warn('{key} in extra_keywords is a list, array or dict, '
'which will raise an error when writing uvfits or '
'miriad file types'.format(key=key))
# issue deprecation warning if antenna positions are not set
if self.antenna_positions is None:
warnings.warn('antenna_positions are not defined. '
'antenna_positions will be a required parameter in '
'version 1.5', DeprecationWarning)
# check auto and cross-corrs have sensible uvws
autos = np.isclose(self.ant_1_array - self.ant_2_array, 0.0)
if not np.all(np.isclose(self.uvw_array[autos], 0.0,
rtol=self._uvw_array.tols[0],
atol=self._uvw_array.tols[1])):
raise ValueError("Some auto-correlations have non-zero "
"uvw_array coordinates.")
if np.any(np.isclose([np.linalg.norm(uvw) for uvw in self.uvw_array[~autos]], 0.0,
rtol=self._uvw_array.tols[0],
atol=self._uvw_array.tols[1])):
raise ValueError("Some cross-correlations have near-zero "
"uvw_array magnitudes.")
return True
def copy(self, metadata_only=False):
"""Make and return a copy of the UVData object.
Parameters
----------
metadata_only : bool
If True, only copy the metadata of the object.
Returns
-------
uv : UVData
Copy of self.
"""
uv = UVData()
for param in self:
# parameter names have a leading underscore we want to ignore
if metadata_only and param.lstrip("_") in self._data_params:
continue
setattr(uv, param, copy.deepcopy(getattr(self, param)))
return uv
def set_drift(self):
"""Set phase_type to 'drift' and adjust required parameters."""
self.phase_type = 'drift'
self._phase_center_epoch.required = False
self._phase_center_ra.required = False
self._phase_center_dec.required = False
def set_phased(self):
"""Set phase_type to 'phased' and adjust required parameters."""
self.phase_type = 'phased'
self._phase_center_epoch.required = True
self._phase_center_ra.required = True
self._phase_center_dec.required = True
def set_unknown_phase_type(self):
"""Set phase_type to 'unknown' and adjust required parameters."""
self.phase_type = 'unknown'
self._phase_center_epoch.required = False
self._phase_center_ra.required = False
self._phase_center_dec.required = False
def known_telescopes(self):
"""
Get a list of telescopes known to pyuvdata.
This is just a shortcut to uvdata.telescopes.known_telescopes()
Returns
-------
list of str
List of names of known telescopes
"""
return uvtel.known_telescopes()
def set_telescope_params(self, overwrite=False):
"""
Set telescope related parameters.
If the telescope_name is in the known_telescopes, set any missing
telescope-associated parameters (e.g. telescope location) to the value
for the known telescope.
Parameters
----------
overwrite : bool
Option to overwrite existing telescope-associated parameters with
the values from the known telescope.
Raises
------
ValueError
if the telescope_name is not in known telescopes
"""
telescope_obj = uvtel.get_telescope(self.telescope_name)
if telescope_obj is not False:
params_set = []
for p in telescope_obj:
telescope_param = getattr(telescope_obj, p)
self_param = getattr(self, p)
if telescope_param.value is not None and (overwrite is True
or self_param.value is None):
telescope_shape = telescope_param.expected_shape(telescope_obj)
self_shape = self_param.expected_shape(self)
if telescope_shape == self_shape:
params_set.append(self_param.name)
prop_name = self_param.name
setattr(self, prop_name, getattr(telescope_obj, prop_name))
else:
# expected shapes aren't equal. This can happen e.g. with diameters,
# which is a single value on the telescope object but is
# an array of length Nants_telescope on the UVData object
# use an assert here because we want an error if this condition
# isn't true, but it's really an internal consistency check.
# This will error if there are changes to the Telescope
# object definition, but nothing that a normal user does will cause an error
assert(telescope_shape == () and self_shape != 'str')
array_val = np.zeros(self_shape,
dtype=telescope_param.expected_type) + telescope_param.value
params_set.append(self_param.name)
prop_name = self_param.name
setattr(self, prop_name, array_val)
if len(params_set) > 0:
params_set_str = ', '.join(params_set)
warnings.warn('{params} is not set. Using known values '
'for {telescope_name}.'.format(params=params_set_str,
telescope_name=telescope_obj.telescope_name))
else:
raise ValueError('Telescope {telescope_name} is not in '
'known_telescopes.'.format(telescope_name=self.telescope_name))
def baseline_to_antnums(self, baseline):
"""
Get the antenna numbers corresponding to a given baseline number.
Parameters
----------
baseline : int or array_like of int
baseline number
Returns
-------
int or array_like of int
first antenna number(s)
int or array_like of int
second antenna number(s)
"""
return uvutils.baseline_to_antnums(baseline, self.Nants_telescope)
def antnums_to_baseline(self, ant1, ant2, attempt256=False):
"""
Get the baseline number corresponding to two given antenna numbers.
Parameters
----------
ant1 : int or array_like of int
first antenna number
ant2 : int or array_like of int
second antenna number
attempt256 : bool
Option to try to use the older 256 standard used in many uvfits files
(will use 2048 standard if there are more than 256 antennas).
Returns
-------
int or array of int
baseline number corresponding to the two antenna numbers.
"""
return uvutils.antnums_to_baseline(ant1, ant2, self.Nants_telescope, attempt256=attempt256)
def set_lsts_from_time_array(self):
"""Set the lst_array based from the time_array."""
latitude, longitude, altitude = self.telescope_location_lat_lon_alt_degrees
unique_times, inverse_inds = np.unique(self.time_array, return_inverse=True)
unique_lst_array = uvutils.get_lst_for_time(unique_times, latitude, longitude, altitude)
self.lst_array = unique_lst_array[inverse_inds]
def unphase_to_drift(self, phase_frame=None, use_ant_pos=False):
"""
Convert from a phased dataset to a drift dataset.
See the phasing memo under docs/references for more documentation.
Parameters
----------
phase_frame : str
The astropy frame to phase from. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation, 'icrs' also includes abberation.
Defaults to using the 'phase_center_frame' attribute or 'icrs'
if that attribute is None.
use_ant_pos : bool
If True, calculate the uvws directly from the antenna positions
rather than from the existing uvws.
Raises
------
ValueError
If the phase_type is not 'phased'
"""
if self.phase_type == 'phased':
pass
elif self.phase_type == 'drift':
raise ValueError('The data is already drift scanning; can only '
'unphase phased data.')
else:
raise ValueError('The phasing type of the data is unknown. '
'Set the phase_type to drift or phased to '
'reflect the phasing status of the data')
if phase_frame is None:
if self.phase_center_frame is not None:
phase_frame = self.phase_center_frame
else:
phase_frame = 'icrs'
icrs_coord = SkyCoord(ra=self.phase_center_ra, dec=self.phase_center_dec,
unit='radian', frame='icrs')
if phase_frame == 'icrs':
frame_phase_center = icrs_coord
else:
# use center of observation for obstime for gcrs
center_time = np.mean([np.max(self.time_array), np.min(self.time_array)])
icrs_coord.obstime = Time(center_time, format='jd')
frame_phase_center = icrs_coord.transform_to('gcrs')
# This promotion is REQUIRED to get the right answer when we
# add in the telescope location for ICRS
# In some cases, the uvws are already float64, but sometimes they're not
self.uvw_array = np.float64(self.uvw_array)
# apply -w phasor
if not self.metadata_only:
w_lambda = (self.uvw_array[:, 2].reshape(self.Nblts, 1)
/ const.c.to('m/s').value * self.freq_array.reshape(1, self.Nfreqs))
phs = np.exp(-1j * 2 * np.pi * (-1) * w_lambda[:, None, :, None])
self.data_array *= phs
unique_times, unique_inds = np.unique(self.time_array, return_index=True)
for ind, jd in enumerate(unique_times):
inds =
|
np.where(self.time_array == jd)
|
numpy.where
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
import numpy as np
import itertools
class Mesh:
# .mesh import functions
keywords = ["Vertices", "Triangles", "Quadrilaterals","Tetrahedra","SolAtVertices"]
def analyse(self, index, line):
for k,kwd in enumerate(self.keywords):
if self.found[k] and kwd not in self.done:
self.numItems[k] = int(line)
self.offset += self.numItems[k]
self.found[k] = False
self.done.append(kwd)
return 1
if kwd in line:
if kwd not in self.done and line[0]!="#":
if kwd == "Vertices" and line.strip()=="SolAtVertices":
pass
else:
self.begin[k] = index+3 if kwd=="SolAtVertices" else index+2
self.found[k] = True
def get_infos(self, path):
for j in range(len(self.keywords)):
with open(path) as f:
f.seek(0)
for i,l in enumerate(f):
if i>self.offset:
if self.analyse(i,l):
break
f.seek(0)
def readArray(self,f, ind, dim, dt=None):
#Allows for searching through n empty lines
maxNumberOfEmptylines = 20
for i in range(maxNumberOfEmptylines):
f.seek(0)
firstValidLine = f.readlines()[self.begin[ind]].strip()
if firstValidLine == "":
self.begin[ind]+=1
else:
break
try:
f.seek(0)
X = " ".join([l for l in itertools.islice(f, self.begin[ind], self.begin[ind] + self.numItems[ind])])
if dt:
return np.fromstring(X, sep=" ", dtype=dt).reshape((self.numItems[ind],dim))
else:
return np.fromstring(X, sep=" ").reshape((self.numItems[ind],dim))
except:
#print "Invalid format", ind, dim
f.seek(0)
try:
for i in range(self.begin[ind]):
f.readline()
arr = []
for l in f:
if len(l.strip())>0:
arr.append([int(x) for x in l.strip().split()])
else:
break
return np.array(arr,dtype=dt)
except:
print("Did not manage to read the array of " + f.name)
return np.array([])
return np.array([])
def readSol(self,path=None):
fileName = path if path is not None else self.path[:-5]+".sol"
if True:
if fileName is not None:
self.offset=0
self.get_infos(fileName)
with open(fileName) as f:
if self.numItems[4]:
#Allows for searching through n empty lines
maxNumberOfEmptylines = 20
for i in range(maxNumberOfEmptylines):
f.seek(0)
firstValidLine = f.readlines()[self.begin[4]].strip()
if firstValidLine == "":
self.begin[4]+=1
else:
break
f.seek(0)
nItems = len(f.readlines()[self.begin[4]].strip().split())
f.seek(0)
#Read a scalar
if nItems == 1:
self.scalars = np.array([float(l) for l in itertools.islice(f, self.begin[4], self.begin[4] + self.numItems[4])])
self.solMin = np.min(self.scalars)
self.solMax = np.max(self.scalars)
self.vectors = np.array([])
#Read a vector
if nItems == 3:
self.vectors = np.array([ [float(x) for x in l.strip().split()[:3]] for l in itertools.islice(f, self.begin[4], self.begin[4] + self.numItems[4])])
self.vecMin = np.min(np.linalg.norm(self.vectors,axis=1))
self.vecMax = np.min(np.linalg.norm(self.vectors,axis=1))
self.scalars=np.array([])
#Read a scalar after a vector
if nItems == 4:
self.vectors = np.array([ [float(x) for x in l.strip().split()[:3]] for l in itertools.islice(f, self.begin[4], self.begin[4] + self.numItems[4])])
self.vecMin = np.min(np.linalg.norm(self.vectors,axis=1))
self.vecMax = np.min(np.linalg.norm(self.vectors,axis=1))
f.seek(0)
self.scalars = np.array([float(l.split()[3]) for l in itertools.islice(f, self.begin[4], self.begin[4] + self.numItems[4])])
self.solMin = np.min(self.scalars)
self.solMax = np.max(self.scalars)
else:
self.scalars = np.array([])
self.vectors = np.array([])
else:
print("No .sol file associated with the .mesh file")
# Constructor
def __init__(self, path=None, cube=None, ico=None):
self.done = []
self.found = [False for k in self.keywords]
self.begin = [0 for k in self.keywords]
self.numItems = [0 for k in self.keywords]
self.offset = 0
if cube:
self.verts = np.array([
[cube[0], cube[2], cube[4]],
[cube[0], cube[2], cube[5]],
[cube[1], cube[2], cube[4]],
[cube[1], cube[2], cube[5]],
[cube[0], cube[3], cube[4]],
[cube[0], cube[3], cube[5]],
[cube[1], cube[3], cube[4]],
[cube[1], cube[3], cube[5]]
])
self.tris = np.array([
[0,1,2],
[1,3,2],
[4,6,5],
[5,6,7],
[1,5,3],
[3,5,7],
[2,6,4],
[0,2,4],
[3,7,6],
[2,3,6],
[0,4,1],
[1,4,5]
])
self.verts = np.insert(self.verts,3,0,axis=1)
self.tris = np.insert(self.tris,3,0,axis=1)
self.quads=np.array([])
self.tets=np.array([])
self.computeBBox()
elif ico:
self.verts = np.array([[ 0. , 0. , -1. , 0. ],
[ 0.72359997, -0.52572 , -0.44721499, 0. ],
[-0.27638501, -0.85064 , -0.44721499, 0. ],
[-0.89442497, 0. , -0.44721499, 0. ],
[-0.27638501, 0.85064 , -0.44721499, 0. ],
[ 0.72359997, 0.52572 , -0.44721499, 0. ],
[ 0.27638501, -0.85064 , 0.44721499, 0. ],
[-0.72359997, -0.52572 , 0.44721499, 0. ],
[-0.72359997, 0.52572 , 0.44721499, 0. ],
[ 0.27638501, 0.85064 , 0.44721499, 0. ],
[ 0.89442497, 0. , 0.44721499, 0. ],
[ 0. , 0. , 1. , 0. ]])
self.tris = np.array([[ 0, 1, 2, 0],
[ 1, 0, 5, 0],
[ 0, 2, 3, 0],
[ 0, 3, 4, 0],
[ 0, 4, 5, 0],
[ 1, 5, 10, 0],
[ 2, 1, 6, 0],
[ 3, 2, 7, 0],
[ 4, 3, 8, 0],
[ 5, 4, 9, 0],
[ 1, 10, 6, 0],
[ 2, 6, 7, 0],
[ 3, 7, 8, 0],
[ 4, 8, 9, 0],
[ 5, 9, 10, 0],
[ 6, 10, 11, 0],
[ 7, 6, 11, 0],
[ 8, 7, 11, 0],
[ 9, 8, 11, 0],
[10, 9, 11, 0]])
self.quads=np.array([])
self.tets=np.array([])
self.verts[:,:3]*=0.5*ico[1]
self.verts[:,:3]+=ico[0]
elif path:
self.path = path
self.get_infos(path)
with open(path) as f:
if self.numItems[0]:
self.verts = self.readArray(f,0,4,np.float)
if self.numItems[1]:
self.tris = self.readArray(f,1,4,np.int)
self.tris[:,:3]-=1
else:
self.tris = np.array([])
if self.numItems[2]:
self.quads = self.readArray(f,2,5,np.int)
self.quads[:,:4]-=1
else:
self.quads = np.array([])
if self.numItems[3]:
self.tets = self.readArray(f,3,5,np.int)
self.tets[:,:4]-=1
else:
self.tets = np.array([])
self.computeBBox()
else:
self.verts=np.array([])
self.tris=np.array([])
self.quads=np.array([])
self.tets=np.array([])
self.scalars=np.array([])
self.vectors=np.array([])
def caracterize(self):
try:
print("File " + self.path)
except:
pass
if len(self.verts):
print("\tVertices: ", len(self.verts))
print("\tBounding box: ", "[%.2f, %.2f] [%.2f, %.2f] [%.2f, %.2f]" % (self.xmin, self.xmax,self.ymin, self.ymax, self.zmin, self.zmax))
if len(self.tris):
print("\tTriangles: ", len(self.tris))
if len(self.quads):
print("\tQuadrilaterals: ", len(self.quads))
if len(self.tets):
print("\tTetrahedra: ", len(self.tets))
if len(self.scalars):
print("\tScalars: ", len(self.scalars))
if len(self.vectors):
print("\tVectors: ", len(self.vectors))
def computeBBox(self):
self.xmin, self.ymin, self.zmin = np.amin(self.verts[:,:3],axis=0)
self.xmax, self.ymax, self.zmax = np.amax(self.verts[:,:3],axis=0)
self.dims = np.array([self.xmax - self.xmin, self.ymax - self.ymin, self.zmax - self.zmin])
self.center = np.array([self.xmin + (self.xmax - self.xmin)/2, self.ymin + (self.ymax - self.ymin)/2, self.zmin + (self.zmax - self.zmin)/2])
def fondre(self, otherMesh):
off = len(self.verts)
if len(otherMesh.tris)>0:
self.tris = np.append(self.tris, otherMesh.tris + [off, off, off, 0], axis=0) if len(self.tris)>0 else otherMesh.tris + [off, off, off, 0]
if len(otherMesh.tets)>0:
self.tets = np.append(self.tets, otherMesh.tets + [off, off, off, off, 0], axis=0) if len(self.tets)>0 else otherMesh.tets + [off, off, off, 0]
if len(otherMesh.quads)>0:
self.quads = np.append(self.quads, otherMesh.quads + [off, off, off, off, 0], axis=0) if len(self.quads)>0 else otherMesh.quads + [off, off, off, 0]
if len(otherMesh.scalars)>0:
self.scalars = np.append(self.scalars, otherMesh.scalars, axis=0) if len(self.scalars)>0 else np.append(np.zeros((len(self.verts))), otherMesh.scalars, axis=0)
if len(otherMesh.vectors)>0:
self.vectors = np.append(self.vectors, otherMesh.vectors, axis=0) if len(self.vectors)>0 else np.append(np.zeros((len(self.verts),3)), otherMesh.vectors, axis=0)
if len(otherMesh.verts)>0:
self.verts = np.append(self.verts, otherMesh.verts, axis=0) if len(self.verts)>0 else otherMesh.verts
def replaceRef(self, oldRef, newRef):
if len(self.tris)!=0:
self.tris[self.tris[:,-1]==oldRef,-1] = newRef
if len(self.quads)!=0:
self.quads[self.quads[:,-1]==oldRef,-1] = newRef
if len(self.tets)!=0:
self.tets[self.tets[:,-1]==oldRef,-1] = newRef
def removeRef(self, ref, keepTris=False, keepTets=False, keepQuads=False):
if len(self.tris)!=0 and not keepTris:
self.tris = self.tris[self.tris[:,-1]!=ref]
if len(self.quads)!=0 and not keepQuads:
self.quads = self.quads[self.quads[:,-1]!=ref]
if len(self.tets)!=0 and not keepTets:
self.tets = self.tets[self.tets[:,-1]!=ref]
def writeVertsRef(self):
self.tets = self.tets[self.tets[:,-1].argsort()]
for i, t in enumerate(self.tets):
for iPt in t[:-1]:
self.verts[iPt][-1] = t[-1]
self.tris = self.tris[self.tris[:,-1].argsort()]
for i, t in enumerate(self.tris):
for iPt in t[:-1]:
self.verts[iPt][-1] = t[-1]
def scale(self,sc,center=[]):
if len(center)>0:
self.verts[:,:3] -= center
else:
self.verts[:,:3] -= self.center
self.verts[:,:3] *= sc
if len(center)>0:
self.verts[:,:3] += center
else:
self.verts[:,:3] += self.center
self.computeBBox()
def inflate(self,sc):
self.verts[:,:3] -= self.center
self.verts[:,:3] += sc/np.linalg.norm(self.verts[:,:3],axis=1)[:,None] * self.verts[:,:3]
self.verts[:,:3] += self.center
self.computeBBox()
def fitTo(self, otherMesh, keepRatio=True):
otherDim = [
otherMesh.dims[0]/self.dims[0],
otherMesh.dims[1]/self.dims[1],
otherMesh.dims[2]/self.dims[2]
]
if keepRatio:
scale = np.min(otherDim)
else:
scale = otherDim
self.verts[:,:3]-=self.center
self.verts[:,:3]*=scale
self.verts[:,:3]+=otherMesh.center
self.computeBBox()
def toUnitMatrix(self):
scale = 0.8/np.max(self.dims)
M1 = np.eye(4)
M1[:3,3] = -self.center
M2 = np.eye(4)*scale
M2[3,3]=1
M3 = np.eye(4)
M3[:3,3] = np.array([0.5,0.5,0.5])
MAT = np.dot(np.dot(M3, M2),M1)
return MAT
def discardUnused(self):
used = np.zeros(shape=(len(self.verts)),dtype="bool_")
if len(self.tris)>0:
used[np.ravel(self.tris[:,:3])]=True
if len(self.tets)>0:
used[np.ravel(self.tets[:,:4])]=True
if len(self.quads)>0:
used[np.ravel(self.quads[:,:4])]=True
newUsed = np.cumsum(used)
self.verts = self.verts[used==True]
if len(self.scalars)>0:
self.scalars = self.scalars[used==True]
if len(self.vectors)>0:
self.vectors = self.vectors[used==True]
if len(self.tris)>0:
newTris = np.zeros(shape=(len(self.tris),4),dtype=int)
newTris[:,-1] = self.tris[:,-1]
for i,triangle in enumerate(self.tris):
for j,t in enumerate(triangle[:-1]):
newTris[i,j] = newUsed[t]-1
self.tris = newTris
if len(self.tets)>0:
newTets = np.zeros(shape=(len(self.tets),5),dtype=int)
newTets[:,-1] = self.tets[:,-1]
for i,tet in enumerate(self.tets):
for j,t in enumerate(tet[:-1]):
newTets[i][j] = newUsed[t]-1
self.tets = newTets
self.computeBBox()
def discardDuplicateVertices(self):
unique, unique_inverse = np.unique([v for v in self.verts], return_inverse=True, axis=0)
unique_inverse = np.reshape(unique_inverse, (len(unique_inverse)/3,3))
self.verts = unique
self.tris = np.insert(unique_inverse, 3, self.tris[:,3], axis=1)
self.computeBBox()
def getHull(self):
with open("tmp.node","w") as f:
f.write( str(len(self.verts)) + " 3 0 0\n")
for i,v in enumerate(self.verts):
f.write(str(i+1) + " " + " ".join([str(x) for x in v]) + "\n")
import os
os.system("tetgen -cAzn tmp.node > /dev/null 2>&1")
neigh = []
with open("tmp.1.neigh") as f:
for l in f.readlines()[1:-1]:
neigh.append( [int(l.split()[i]) for i in range(1,5)] )
tets = []
with open("tmp.1.ele") as f:
for l in f.readlines()[1:-1]:
tets.append( [int(l.split()[i]) for i in range(1,5)] )
verts = []
with open("tmp.1.node") as f:
for l in f.readlines()[1:-1]:
verts.append( [float(l.split()[i]) for i in range(1,4)]+[0] )
tris = []
for i,n in enumerate(neigh):
for j,c in enumerate(n):
if c==-1:
tris.append([tets[i][k] for k in range(4) if k!=j]+[0])
refs = [1 for t in tris]
mesh = Mesh()
mesh.verts = np.array(verts)
mesh.tris = np.array(tris,dtype=int)
mesh.discardUnused()
mesh.computeBBox()
return mesh
def applyMatrix(self, mat=None, matFile=None):
if mat is None and matFile is not None:
mat = np.zeros(shape=(4,4))
with open(matFile) as f:
for i,l in enumerate(f.readlines()):
if i<4:
elts = [float(x) for x in l.strip().split()]
mat[i] = elts
refs = np.copy(self.verts[:,3])
self.verts[:,3] = 1
self.verts = np.dot(self.verts,mat.T)
self.verts[:,3]=refs
self.computeBBox()
def scaleSol(self, mini, maxi, absolute=False):
if absolute:
ABS = np.absolute(self.scalars)
self.scalars = mini + (maxi-mini) * (ABS - np.min(ABS)) / (np.max(ABS) - np.min(ABS))
else:
self.scalars = mini + (maxi-mini) * (self.scalars - np.min(self.scalars)) / (np.max(self.scalars) -
|
np.min(self.scalars)
|
numpy.min
|
# -*- coding: ISO-8859-1 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2014, HFTools Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import itertools
import re
import sys
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.projections.polar import PolarAxes
from matplotlib.ticker import FormatStrFormatter, EngFormatter
import matplotlib
import numpy as np
import pylab
from matplotlib._pylab_helpers import Gcf
from matplotlib.backends.backend_pdf import PdfPages
import hftools.math
from hftools.constants.si_units import si_exp_to_prefixes, _help_format_sci
from hftools.dataset import hfarray, remove_tail
from hftools.math import delay, unwrap_phase
"""
TODO
investigate how add_axobserver can be used
"""
__all__ = ["is_in_ipython", "build_figlegend", "twin_axes_legend",
"twin_fig_legend", "set_ytick", "set_xtick", "no_xtick_text",
"no_ytick_text", "arrange_figures", "xlabel_fmt", "ylabel_fmt",
"save_all_figures_to_pdf", "all_figures", "adjust_axwidth"]
def savefig(filename, *k, **kw):
if kw.get("facetransparent", False):
fig = pylab.gcf()
alpha = fig.patch.get_alpha()
fig.patch.set_alpha(0)
pylab.savefig(filename, *k, **kw)
if kw.get("facetransparent", False):
fig.patch.set_alpha(alpha)
def xlabel_fmt(fmt, unit=None, axes=None):
if axes is None:
axes = plt.gca()
if unit is None:
axes.set_xlabel_fmt(fmt)
else:
axes.set_xlabel_fmt(fmt, unit)
def ylabel_fmt(fmt, unit=None, axes=None):
if axes is None:
axes = plt.gca()
if unit is None:
axes.set_ylabel_fmt(fmt)
else:
axes.set_ylabel_fmt(fmt, unit)
default_unit_names = {"s": "Time",
u"s": "Time",
"Hz": "Frequency",
u"Hz": "Frequency",
"m": "Length",
u"m": "Length",
}
class SimpleUnitFormatter(FormatStrFormatter):
"""Only adds the base unit does no prefix magic
"""
def __init__(self, fmt, set_label_fun=None, label_fmt=None, unit=None):
FormatStrFormatter.__init__(self, fmt)
self.engfmt = EngFormatter(unit="", places=2)
self.set_label_fun = set_label_fun
self.default_unit = unit
self._label_unit = None
self.set_label_fmt(label_fmt)
self.label_template = None
def __call__(self, x, pos=None):
div = 1
prefix = ""
for dig in range(3):
digs = [abs((int((elem / div) * 10 ** dig) -
(elem / div) * 10 ** dig)) for elem in self.locs]
if max(digs) < 0.001:
self.fmt = "%%.%df" % dig
break
else:
self.fmt = "%.3f"
if self.set_label_fun and self.label_template:
xlabel = self.label_template % dict(prefix=prefix)
self.set_label_fun(xlabel)
return FormatStrFormatter.__call__(self, x / div, pos)
def get_label_fmt(self):
return self._label_fmt
def set_label_fmt(self, fmt):
if fmt is None:
self._label_fmt = ""
else:
self._label_fmt = fmt
self.update_template()
def get_label_unit(self):
if self._label_unit is None:
if self.default_unit is None:
return ""
else:
return self.default_unit
else:
return self._label_unit
def set_label_unit(self, unit):
if unit is not None:
self._label_unit = unit
self.update_template()
def update_template(self):
fmt = self.get_label_fmt()
if "[]" in fmt:
if self.get_label_unit():
fmt = fmt.replace("[]",
"[%%(prefix)s%s]" % self.get_label_unit())
else:
fmt = fmt.replace("[]", "[%(prefix)sU]")
elif "%(unit)s" in fmt:
fmt = fmt % dict(unit=self.get_label_unit())
self.label_template = fmt
class UnitFormatter(FormatStrFormatter):
def __init__(self, fmt, set_label_fun=None,
label_fmt=None, unit=None, digs=3):
FormatStrFormatter.__init__(self, fmt)
self.engfmt = EngFormatter(unit="", places=2)
self.set_label_fun = set_label_fun
self.default_unit = unit
self._label_unit = None
self.set_label_fmt(label_fmt)
self.label_template = None
self.digs = digs
self.default_label = "X"
def __call__(self, x, pos=None):
locs = [abs(y) for y in self.locs if abs(y) != 0]
if locs and max(locs) / min(locs) <= 10000:
_, exponent = _help_format_sci(max(locs), 2)
div = 10 ** exponent
for dig in range(3):
digs = [abs((int((elem / div) * 10 ** dig) -
(elem / div) * 10 ** dig)) for elem in self.locs]
if max(digs) < 0.001:
self.fmt = "%%.%df" % dig
break
else:
self.fmt = "%%.%df" % self.digs
if self.set_label_fun and self.label_template:
prefix = si_exp_to_prefixes.get(exponent, "q")
if prefix == "q":
prefix = ""
div = 1
self.fmt = "%%.%de" % self.digs
xlabel = self.label_template % dict(prefix=prefix,
powerprefix=exponent)
self.set_label_fun(xlabel)
return FormatStrFormatter.__call__(self, x / div, pos)
else:
self.engfmt.locs = self.locs
return self.engfmt(x, pos)
def get_label_fmt(self):
return self._label_fmt
def set_label_fmt(self, fmt):
if fmt is None:
self._label_fmt = ""
else:
self._label_fmt = fmt
self.update_template()
def get_label_unit(self):
if self._label_unit is None:
if self.default_unit is None:
return ""
else:
if self.default_unit == "deg":
return u"\xb0"
else:
return self.default_unit
else:
if self._label_unit == "deg":
return u"\xb0"
else:
return self._label_unit
def set_label_unit(self, unit):
if unit is not None:
self._label_unit = unit
self.update_template()
def get_label_name_and_unit(self):
unit = self.get_label_unit()
default = self.default_label
if unit == "Hz":
default = "Frequency"
elif unit in ["s", "h", "min"]:
default = "Time"
name = default_unit_names.get(unit, default)
return dict(unit=unit, default=name)
def update_template(self):
fmt = self.get_label_fmt()
if "[]" in fmt:
if self.get_label_unit():
fmt = fmt.replace("[]", u"[%(prefix)s{unit}]")
else:
fmt = fmt.replace("[]", u"[%(prefix)sU]")
elif "[^]" in fmt:
if self.get_label_unit():
fmt = fmt.replace("[^]", u"$[10^{{%%(powerprefix).0f" +
u"\\rm{{{unit}}}}]$")
else:
fmt = fmt.replace("[^]", u"$[10^{{%(powerprefix).0f}}]$")
if "{unit}" in fmt or "{default}" in fmt:
dct = self.get_label_name_and_unit()
fmt = fmt.format(**dct)
self.label_template = fmt
def get_dims_names(x):
return [y.name for y in x.dims]
class HFToolsAxes(Axes):
name = "rectilinear"
colorcycle = "bgrcmyk"
markercycle = ['', 'o', 'v', '^', '<', '>', '*', 'x', '+']
linecycle = ['-', '--', '-.', ':']
def __init__(self, *k, **kw):
Axes.__init__(self, *k, **kw)
self.HFTOOLS_default_x_name = None
def _plot_helper(self, x, y, *args, **kwargs):
if not hasattr(y, "dims"):
return Axes.plot(self, x, y, *args, **kwargs)
if x.ndim == 1 and y.ndim == 1:
return Axes.plot(self, x, y, *args, **kwargs)
else:
return Axes.plot(self, remove_tail(x), remove_tail(y), *args, **kwargs)
Ns = y.shape[1:4]
kw = kwargs.copy()
lines = []
if len(Ns) == 1:
C = zip(itertools.cycle(self.colorcycle), range(Ns[0]))
for c, i in C:
#kw.update(dict(color=c))
if hasattr(x, "dims") and (get_dims_names(x) ==
get_dims_names(y)):
xx = x[:, i].squeeze()
else:
xx = x
lines.extend(Axes.plot(self, xx, remove_tail(y[:, i]),
*args, **kw))
elif len(Ns) == 2:
C = zip(itertools.cycle(self.colorcycle), range(Ns[0]))
M = zip(itertools.cycle(self.markercycle), range(Ns[1]))
for c, i in C:
for m, j in M:
if hasattr(x, "dims") and (get_dims_names(x) ==
get_dims_names(y)):
xx = x[:, i, j].squeeze()
else:
xx = x
#kw.update(dict(color=c, marker=m))
lines.extend(Axes.plot(self, xx,
remove_tail(y[:, i, j]),
*args, **kw))
elif len(Ns) > 2:
C = zip(itertools.cycle(self.colorcycle), range(Ns[0]))
M = zip(itertools.cycle(self.markercycle), range(Ns[1]))
L = zip(itertools.cycle(self.linecycle), range(Ns[2]))
for c, i in C:
for m, j in M:
for l, k in L:
if hasattr(x, "dims") and (get_dims_names(x) ==
get_dims_names(y)):
xx = x[:, i, j, k].squeeze()
else:
xx = x
#kw.update(dict(color=c, marker=m, line=l))
lines.extend(Axes.plot(self, xx,
remove_tail(y[:, i, j, k]),
*args, **kw))
return lines
def plot(self, *args, **kwargs):
if "projection" in kwargs:
projection = kwargs.pop("projection")
else:
projection = self.name
vars = args[:2]
args = args[2:]
if len(vars) == 2 and isinstance(vars[1], (str, unicode)):
args = (vars[1],) + args
vars = vars[:1]
if ((len(vars) == 1 and
isinstance(vars[0], hfarray) and
len(vars[0].dims) >= 1)):
y = vars[0]
x = hfarray(y.dims[0])
vars = (x, y)
if self.HFTOOLS_default_x_name is None:
self.HFTOOLS_default_x_name = y.dims[0].name
fmt = self.axes.xaxis.get_major_formatter()
if hasattr(fmt, "update_template"):
fmt.default_label = self.HFTOOLS_default_x_name
fmt.update_template()
if len(vars) == 1:
y = vars[0]
if projection in _projfun:
x, y = _projfun[projection](None, y)
return Axes.plot(self, y, *args, **kwargs)
elif np.iscomplexobj(y):
return Axes.plot(self, y.real, y.imag, *args, **kwargs)
else:
return Axes.plot(self, y, *args, **kwargs)
elif len(vars) == 2:
x = vars[0]
y = vars[1]
xunit = getattr(x, "unit", None)
yunit = getattr(y, "unit", None)
if projection in _projfun:
x, y = _projfun[projection](x, y)
lines = self._plot_helper(x, y, *args, **kwargs)
elif np.iscomplexobj(y):
xunit = yunit
lines = self._plot_helper(y.real, y.imag, *args, **kwargs)
else:
lines = self._plot_helper(x, y, *args, **kwargs)
if xunit:
self.set_xlabel_unit(xunit)
if yunit:
self.set_ylabel_unit(yunit)
return lines
else:
raise Exception("Missing plot data")
def set_xlabel_unit(self, unit):
xfmt = self.axes.xaxis.get_major_formatter()
if hasattr(xfmt, "set_label_unit"):
xfmt.set_label_unit(unit)
def get_xlabel_unit(self):
xfmt = self.axes.xaxis.get_major_formatter()
if hasattr(xfmt, "get_label_unit"):
return xfmt.get_label_unit()
else:
return None
def set_ylabel_unit(self, unit):
yfmt = self.axes.yaxis.get_major_formatter()
if hasattr(yfmt, "set_label_unit"):
yfmt.set_label_unit(unit)
def get_ylabel_unit(self):
xfmt = self.axes.yaxis.get_major_formatter()
if hasattr(xfmt, "get_label_unit"):
return xfmt.get_label_unit()
else:
return None
def set_xlabel_fmt(self, fmt, unit=None):
xfmt = self.axes.xaxis.get_major_formatter()
xfmt.set_label_fun = self.set_xlabel
if hasattr(xfmt, "set_label_fmt"):
self.set_xlabel_unit(unit)
xfmt.set_label_fmt(fmt)
def set_ylabel_fmt(self, fmt, unit=None):
yfmt = self.axes.yaxis.get_major_formatter()
yfmt.set_label_fun = self.set_ylabel
if hasattr(yfmt, "set_label_fmt"):
self.set_ylabel_unit(unit)
yfmt.set_label_fmt(fmt)
def get_ylabel_fmt(self):
xfmt = self.axes.yaxis.get_major_formatter()
if hasattr(xfmt, "get_label_fmt"):
return xfmt.get_label_fmt()
else:
return None
def get_xlabel_fmt(self):
xfmt = self.axes.xaxis.get_major_formatter()
if hasattr(xfmt, "get_label_fmt"):
return xfmt.get_label_fmt()
else:
return None
matplotlib.projections.register_projection(HFToolsAxes)
class dBAxes(HFToolsAxes):
name = "db"
def __init__(self, *args, **kwargs):
HFToolsAxes.__init__(self, *args, **kwargs)
self.axes.xaxis.set_major_formatter(UnitFormatter("%.3f"))
self.axes.yaxis.set_major_formatter(SimpleUnitFormatter("%.3f"))
self.set_ylabel_fmt("[]", unit="dB")
self.set_xlabel_fmt("{default} []")
def set_xlabel_fmt(self, fmt, unit=None):
HFToolsAxes.set_xlabel_fmt(self, fmt, unit)
matplotlib.projections.register_projection(dBAxes)
class dB10Axes(HFToolsAxes):
name = "db10"
def __init__(self, *args, **kwargs):
HFToolsAxes.__init__(self, *args, **kwargs)
self.axes.xaxis.set_major_formatter(UnitFormatter("%.3f"))
self.axes.yaxis.set_major_formatter(SimpleUnitFormatter("%.3f"))
self.set_ylabel_fmt("[]", unit="dB")
self.set_xlabel_fmt("{default} []")
def set_xlabel_fmt(self, fmt, unit=None):
HFToolsAxes.set_xlabel_fmt(self, fmt, unit)
matplotlib.projections.register_projection(dB10Axes)
class MagAxes(HFToolsAxes):
name = "mag"
def __init__(self, *args, **kwargs):
HFToolsAxes.__init__(self, *args, **kwargs)
self.axes.xaxis.set_major_formatter(UnitFormatter("%.1f"))
self.axes.yaxis.set_major_formatter(UnitFormatter("%.1f"))
self.set_xlabel_fmt("{default} []")
self.set_ylabel_fmt("[]")
def set_xlabel_fmt(self, fmt, unit=None):
HFToolsAxes.set_xlabel_fmt(self, fmt, unit)
matplotlib.projections.register_projection(MagAxes)
class MagSquareAxes(HFToolsAxes):
name = "mag_square"
def __init__(self, *args, **kwargs):
HFToolsAxes.__init__(self, *args, **kwargs)
self.axes.xaxis.set_major_formatter(UnitFormatter("%.1f"))
self.axes.yaxis.set_major_formatter(UnitFormatter("%.1f"))
self.set_xlabel_fmt("{default} []")
self.set_ylabel_fmt("[]")
def set_xlabel_fmt(self, fmt, unit=None):
HFToolsAxes.set_xlabel_fmt(self, fmt, unit)
matplotlib.projections.register_projection(MagSquareAxes)
class UnityAxes(MagAxes):
name = "unity"
matplotlib.projections.register_projection(UnityAxes)
class XSIAxes(MagAxes):
name = "x-si"
def __init__(self, *args, **kwargs):
HFToolsAxes.__init__(self, *args, **kwargs)
self.axes.xaxis.set_major_formatter(UnitFormatter("%.1f"))
self.set_xlabel_fmt("{default} []")
matplotlib.projections.register_projection(XSIAxes)
class ComplexAxes(HFToolsAxes):
name = "cplx"
def __init__(self, *args, **kwargs):
HFToolsAxes.__init__(self, *args, **kwargs)
self.axes.xaxis.set_major_formatter(UnitFormatter("%.1f"))
self.axes.yaxis.set_major_formatter(UnitFormatter("%.1f"))
self.set_xlabel_fmt("Real []")
self.set_ylabel_fmt("Imaginary []")
matplotlib.projections.register_projection(ComplexAxes)
class GroupDelayAxes(MagAxes):
name = "groupdelay"
def __init__(self, *args, **kwargs):
MagAxes.__init__(self, *args, **kwargs)
self.axes.yaxis.set_major_formatter(UnitFormatter("%.1f"))
self.set_ylabel_fmt(r"$\tau$ []")
matplotlib.projections.register_projection(GroupDelayAxes)
class RealAxes(MagAxes):
name = "real"
matplotlib.projections.register_projection(RealAxes)
class ImagAxes(MagAxes):
name = "imag"
matplotlib.projections.register_projection(ImagAxes)
class DegAxes(MagAxes):
name = "deg"
def __init__(self, *args, **kwargs):
MagAxes.__init__(self, *args, **kwargs)
self.set_ylabel_fmt(u"[]")
def set_ylabel_fmt(self, fmt, unit=u"\xb0"):
MagAxes.set_ylabel_fmt(self, fmt, unit)
matplotlib.projections.register_projection(DegAxes)
class UnwrapDegAxes(DegAxes):
name = "unwrapdeg"
matplotlib.projections.register_projection(UnwrapDegAxes)
class WrapUnwrapDegAxes(DegAxes):
name = "wrapunwrapeddeg"
matplotlib.projections.register_projection(WrapUnwrapDegAxes)
class RadAxes(MagAxes):
name = "rad"
def __init__(self, *args, **kwargs):
MagAxes.__init__(self, *args, **kwargs)
self.set_ylabel_fmt(u"[]")
def set_ylabel_fmt(self, fmt, unit=u"rad"):
MagAxes.set_ylabel_fmt(self, fmt, unit)
matplotlib.projections.register_projection(RadAxes)
class UnwrapRadAxes(RadAxes):
name = "unwraprad"
matplotlib.projections.register_projection(UnwrapRadAxes)
class ComplexPolarAxes(PolarAxes):
name = "cplxpolar"
def plot(self, *args, **kwargs):
projection = self.name
vars = args[:2]
args = args[2:]
if len(vars) == 2 and isinstance(vars[1], (str, unicode)):
args = (vars[1],) + args
vars = vars[:1]
if ((len(vars) == 1 and
isinstance(vars[0], hfarray) and
len(vars[0].dims) >= 1)):
y = vars[0]
x = hfarray(y.dims[0])
vars = (x, y)
if len(vars) == 1:
y = vars[0]
if projection in _projfun:
x, y = _projfun[projection](None, y)
return Axes.plot(self, y, *args, **kwargs)
elif np.iscomplexobj(y):
return Axes.plot(self, y.real, y.imag, *args, **kwargs)
else:
return Axes.plot(self, y, *args, **kwargs)
elif len(vars) == 2:
x = vars[0]
y = remove_tail(vars[1])
if projection in _projfun:
x, y = _projfun[projection](x, y)
lines = Axes.plot(self, x, y, *args, **kwargs)
elif np.iscomplexobj(y):
lines = Axes.plot(self, y.real, y.imag, *args, **kwargs)
else:
lines = Axes.plot(self, x, y, *args, **kwargs)
# if xunit:
# self.set_xlabel_unit(xunit)
# if yunit:
# self.set_ylabel_unit(yunit)
return lines
else:
raise Exception("Missing plot data")
matplotlib.projections.register_projection(ComplexPolarAxes)
def cplx_polar_projection(x, y):
if x is None:
r = abs(y)
theta = np.angle(y)
elif np.iscomplexobj(y):
r = abs(y)
theta = np.angle(y)
else:
y = x + 1j * y
r = abs(y)
theta =
|
np.angle(y)
|
numpy.angle
|
import time, gym
import numpy as np
from UTILS.tensor_ops import my_view
from ..core import World, Agent, EntityState
class collective_assultEnvV1(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self,numguards =5, numattackers = 5, size=1.0):
from ..collective_assult_parallel_run import ScenarioConfig
self.init_dis = ScenarioConfig.init_distance
self.half_death_reward = ScenarioConfig.half_death_reward
self.random_jam_prob = ScenarioConfig.random_jam_prob
# environment will have guards(green) and attackers(red)
# red bullets - can hurt green agents, vice versa
# single hit - if hit once agent dies
self.world = World()
# 场地尺寸size,在参数后面初始化
self.world.wall_pos=[-1*size,1*size,-1*size,1*size]
self.world.init_box=[-1*5,1*5,-1*5,1*5]
self.world.fortDim = 0.15 # radius
self.world.doorLoc = np.array([0,0]) #堡垒的位置
self.world.numGuards = numguards # initial number of guards, attackers and bullets
self.world.numAttackers = numattackers
self.world.numBullets = 0
self.world.numAgents = self.world.numGuards + self.world.numAttackers
self.world.numAliveGuards, self.world.numAliveAttackers, self.world.numAliveAgents = self.world.numGuards, self.world.numAttackers, self.world.numAgents
self.world.atttacker_reached = False ## did any attacker succeed to reach the gate?
landmarks = [] # as of now no obstacles, landmarks
self.attacker_reward_sum = 0
self.guard_reward_sum = 0
self.world.agents = [Agent(iden=i) for i in range(self.world.numAgents)] # first we have the guards and then we have the attackers
for i, agent in enumerate(self.world.agents):
agent.name = 'agent %d' % (i+1)
agent.collide = False
agent.collide_wall = True
agent.silent = True
agent.bullets_is_limited = False #设置子弹是否受限制
agent.numbullets = 10 #设置子弹数量
agent.attacker = False if i < self.world.numGuards else True
# agent.shootRad = 0.8 if i<self.world.numGuards else 0.6
agent.accel = 3 ## guards and attackers have same speed and accel
agent.max_speed = 1.0 ## used in integrate_state() inside core.py. slowing down so that bullet can move fast and still it doesn't seem that the bullet is skipping steps
agent.max_rot = 0.17 ## approx 10 degree
# agent.action_callback_test = self.action_callback if agent.attacker else None #评估的时候是否采用规则
agent.action_callback = self.action_callback if agent.attacker else None #评估的时候是否采用规则
# agent.size = 0.1
# agent.action_callback
# agent.script = False if i < self.world.numGuards else True
self.viewers = [None]
self.render_geoms = None
self.shared_viewer = True
self.world.time_step = 0
self.world.max_time_steps = None # set inside malib/environments/collective_assult 最大步数为100 在外围初始化
self.world.vizDead = True # whether to visualize the dead agents
self.world.vizAttn = True # whether to visualize attentions
self.world.gameResult = np.array([0,0,0,0,0]) # [guards all win, guard win, attacker all win, attcker win, draw]
self.reset_world()
if ScenarioConfig.MCOM_DEBUG:
from VISUALIZE.mcom import mcom
from config import GlobalConfig as cfg
self.mcv = mcom(
path='%s/v2d_logger/'%cfg.logdir,
digit=16, rapid_flush=True, draw_mode='OFF')
self.mcv.v2d_init()
# a fake callback, don't know what's for, do not del it
def action_callback(self,agent,world):
pass
def reset_world(self):
# light green for guards and light red for attackers
self.world.time_step = 0
self.world.bullets = [] ##
self.world.numAliveAttackers = self.world.numAttackers
self.world.numAliveGuards = self.world.numGuards
self.world.numAliveAgents = self.world.numAgents
self.world.gameResult[:] = 0
theta = (2*np.random.rand()-1)*np.pi
self.world.init_theta = theta
rotate = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
for i, agent in enumerate(self.world.agents):
agent.alive = True
agent.color = np.array([0.0, 1.0, 0.0]) if not agent.attacker else np.array([1.0, 0.0, 0.0])
agent.state.p_vel = np.zeros(self.world.dim_p-1) ##
agent.state.c = np.zeros(self.world.dim_c)
agent.state.p_ang = (theta+np.pi) + (np.random.rand()-0.5)/12 if agent.attacker else (theta + (np.random.rand()-0.5)/12)
agent.numbullets = 10
xMin, xMax, yMin, yMax = self.world.init_box
xMid = xMin/2 + xMax/2
yMid = yMin/2 + yMax/2
xInitDis = self.init_dis
# now we will set the initial positions
# attackers start from far away
#攻击者是红方,防守者是蓝方
if agent.attacker:
#随机初始化位置
# agent.state.p_pos = np.concatenate((np.random.uniform(xMax,0.8*xMax,1), np.random.uniform(yMin,1*yMax,1)))
x_ = xMid+xInitDis/2
y_ = (yMax-yMin)/self.world.numAttackers*(agent.iden - self.world.numGuards +0.5) + yMin
agent.state.p_pos = np.array([x_, y_])
agent.state.p_pos += (np.random.randn(2,)-0.5)/10
if self.world.numAttackers>50:
centering = np.array([xMid, yMid])
ratio = 1
if agent.iden%3 == 0:
ratio = 0.5
if agent.iden%3 == 1:
ratio = 0.75
agent.state.p_pos = centering + (agent.state.p_pos-centering)*ratio
agent.state.p_pos = np.dot(agent.state.p_pos, rotate.T)
# guards start near the door
else:
#随机初始化位置
# agent.state.p_pos = np.concatenate((np.random.uniform(xMin,0.8*xMin,1), np.random.uniform(yMin,1*yMax,1)))
agent.state.p_pos = np.concatenate(( np.array([xMid-xInitDis/2]),
np.array([(yMax-yMin)/self.world.numGuards*(agent.iden+0.5) + yMin])))
agent.state.p_pos += (np.random.randn(2,)-0.5)/10
if self.world.numGuards>50:
centering = np.array([xMid, yMid])
ratio = 1
if agent.iden%3 == 0:
ratio = 0.5
if agent.iden%3 == 1:
ratio = 0.75
agent.state.p_pos = centering + (agent.state.p_pos-centering)*ratio
agent.state.p_pos = np.dot(agent.state.p_pos, rotate.T)
agent.numHit = 0 # overall in one episode
agent.numWasHit = 0
agent.hit = False # in last time step
agent.wasHit = False
# return all agents that are attackers
def alive_attackers(self):
return [agent for agent in self.world.agents if ( (agent.alive or agent.justDied) and agent.attacker)]
# return all agents that are not attackers
def alive_guards(self):
return [agent for agent in self.world.agents if ( (agent.alive or agent.justDied) and not agent.attacker)]
# return all agents that are attackers
def attackers(self):
return [agent for agent in self.world.agents if (agent.attacker)]
# return all agents that are not attackers
def guards(self):
return [agent for agent in self.world.agents if (not agent.attacker)]
def reward(self, agent):
if agent.alive or agent.justDied:
main_reward = self.attacker_reward(agent) if agent.attacker else self.guard_reward(agent)
else:
main_reward = 0
return main_reward
def attacker_reward(self, agent):
rew0, rew1, rew2, rew3, rew4, rew5, rew10 = 0,0,0,0,0,0,0
for agents in self.alive_attackers():
if agents.hit:
rew3 = +1
if agents.wasHit:
rew4 = -1 if not self.half_death_reward else -0.5
self.attacker_reward_sum = rew0+rew1+rew2+rew3+rew4+rew5+rew10
return self.attacker_reward_sum
def guard_reward(self, agent):
rew0, rew1, rew2, rew3, rew4, rew5, rew6, rew7, rew8,rew10 = 0,0,0,0,0,0,0,0,0,0
if agent.hit:
rew5 += 1
if agent.wasHit:
rew6 = -1 if not self.half_death_reward else -0.5
self.guard_reward_sum = rew0+rew1+rew2+rew3+rew4+rew5+rew6+rew7+rew8 +rew10
return self.guard_reward_sum
raw_obs_size = -1
class raw_obs_array(object):
def __init__(self):
if collective_assultEnvV1.raw_obs_size==-1:
self.guards_group = []
self.nosize = True
else:
self.guards_group = np.zeros(shape=(collective_assultEnvV1.raw_obs_size), dtype=np.float32)
self.nosize = False
self.p = 0
def append(self, buf):
if self.nosize:
self.guards_group.append(buf)
else:
L = len(buf)
self.guards_group[self.p:self.p+L] = buf[:]
self.p += L
def get(self):
if self.nosize:
self.guards_group = np.concatenate(self.guards_group)
collective_assultEnvV1.raw_obs_size = len(self.guards_group)
return self.guards_group
@staticmethod
def get_binary_array(n_int, n_bits=8, dtype=np.float32):
arr = np.zeros((*n_int.shape, n_bits), dtype=dtype)
pointer = 0
for i in range(8):
arr[:, i] = (n_int%2==1).astype(np.int)
n_int = n_int / 2
n_int = n_int.astype(np.int8)
return arr
@staticmethod
def item_random_mv(src,dst,prob,rand=False):
assert len(src.shape)==1; assert len(dst.shape)==1
if rand: np.random.shuffle(src)
len_src = len(src)
n_mv = (np.random.rand(len_src) < prob).sum()
item_mv = src[range(len_src-n_mv,len_src)]
src = src[range(0,0+len_src-n_mv)]
dst = np.concatenate((item_mv, dst))
return src, dst
def observation(self, agent, world, get_obs_dim=False):
if get_obs_dim: return 12*16
if agent.iden == 0:
num_guards = self.world.numGuards
num_attackers = self.world.numAttackers
n_int = np.arange(num_guards+num_attackers)
bi_hot = self.get_binary_array(n_int, 8)
self.obs_arr = self.raw_obs_array()
for guard in self.guards():
self.obs_arr.append([guard.alive])
self.obs_arr.append(guard.state.p_pos)
self.obs_arr.append([guard.state.p_ang])
self.obs_arr.append(guard.state.p_vel)
self.obs_arr.append([guard.iden])
self.obs_arr.append([guard.terrain])
self.obs_arr.append(bi_hot[guard.iden])
for attacker in self.attackers():
self.obs_arr.append([attacker.alive])
self.obs_arr.append(attacker.state.p_pos)
self.obs_arr.append([attacker.state.p_ang])
self.obs_arr.append(attacker.state.p_vel)
self.obs_arr.append([attacker.iden])
self.obs_arr.append([attacker.terrain])
self.obs_arr.append(bi_hot[attacker.iden])
shit = self.obs_arr.get()
'''
from VISUALIZE.mcom import mcom
from config import GlobalConfig as cfg
if not hasattr(cfg, 'ak_logger'):
cfg.ak_logger = mcom(ip='127.0.0.1',
port=12084,
path='%s/v2d_logger/'%cfg.logdir,
digit=16, rapid_flush=True, draw_mode='Native')
cfg.ak_logger.v2d_init()
self.mcv = cfg.ak_logger
self.mcv.v2d_clear()
for index, guard in enumerate(self.guards()):
self.mcv.v2dx('cir|%d|b|0.04'%(index), guard.state.p_pos[0], guard.state.p_pos[1])
if not guard.alive:
self.mcv.v2dx('cir|%d|k|0.04'%(index), guard.state.p_pos[0], guard.state.p_pos[1])
for index, attacker in enumerate(self.attackers()):
self.mcv.v2dx('cir|%d|r|0.04'%(index+50), attacker.state.p_pos[0], attacker.state.p_pos[1])
if not attacker.alive:
self.mcv.v2dx('cir|%d|k|0.04'%(index+50), attacker.state.p_pos[0], attacker.state.p_pos[1])
self.mcv.v2d_show()
'''
self.new_obs = shit.astype(np.float32)
self.dec = {'alive':0,
'pos':range(1,3),
'ang':3,
'vel':range(4,6),
'id':6,
'terrain':7,
'bi_hot':range(8, 16)}
self.obs_range = 2.0
self.n_object = self.world.numGuards + self.world.numAttackers
self.obs = my_view(self.new_obs, [self.n_object, -1])
self.dis = distance_matrix(self.obs[:,self.dec['pos']])
# set almost inf distance for dead agents
self.h_alive = np.array([attacker.alive for attacker in self.attackers()])
self.f_alive = np.array([guard.alive for guard in self.guards()])
alive_all = np.concatenate((self.f_alive, self.h_alive))
self.dis[~alive_all,:] = +np.inf
self.dis[:,~alive_all] = +np.inf
# 没有考虑智能体是否存活???
guards_uid = range(0,num_guards)
attackers_uid = range(num_guards,num_attackers+num_guards)
self.f2h_dis = self.dis[guards_uid, :][:, attackers_uid]
self.f2f_dis = self.dis[guards_uid, :][:, guards_uid]
self.agent_emb = self.obs[guards_uid]
self.hostile_emb = self.obs[attackers_uid]
A_id = agent.iden
a2h_dis = self.f2h_dis[A_id]
a2f_dis = self.f2f_dis[A_id]
vis_n = 6
h_iden_sort = np.argsort(a2h_dis)[:vis_n]
f_iden_sort = np.argsort(a2f_dis)[:vis_n]
# np.random.shuffle(h_iden_sort)
# np.random.shuffle(f_iden_sort)
if not agent.alive:
agent_obs = np.zeros(shape=(self.agent_emb.shape[-1] *vis_n*2,))
info_n = {'vis_f': None, 'vis_h':None, 'alive': False}
return agent_obs, info_n
# observe hostile:: dis array([4, 6, 3, 5, 2, 7]) shuf array([5, 2, 3, 6, 7, 4])
a2h_dis_sorted = a2h_dis[h_iden_sort]
hostile_vis_mask = (a2h_dis_sorted<=self.obs_range) & (self.h_alive[h_iden_sort])
vis_index = h_iden_sort[hostile_vis_mask]
invis_index = h_iden_sort[~hostile_vis_mask]
vis_index, invis_index = self.item_random_mv(src=vis_index, dst=invis_index,prob=self.random_jam_prob, rand=True)
_ind = np.concatenate((vis_index, invis_index))
_msk =
|
np.concatenate((vis_index<0, invis_index>=0))
|
numpy.concatenate
|
"""
LIF (Leaky integrate-and-fire) Neuron model
Copyright(c) HiroshiARAKI
"""
import numpy as np
import matplotlib.pyplot as plt
from .neuron import Neuron
from ..tools import kernel
class LIF(Neuron):
"""
LIF: leaky integrate-and-fire model
"""
def __init__(self,
time: int,
dt: float = 1.0,
rest=-65,
th=-40,
ref=3,
tc_decay=100,
k='single',
tau: tuple = (20, ),
**kwargs):
"""
Initialize Neuron parameters
:param time: experimental time
:param dt: time step
:param rest: resting potential
:param th: threshold
:param ref: refractory period
:param tc_decay: time constance
:param k: kernel {'single', 'double'}
:param tau: exponential decays as tuple(tau_1 ,tau_2) or float
"""
super().__init__(time, dt)
if k not in ['single', 'double']:
print('The kernel is selected "single".')
k = 'single'
self.rest = kwargs.get('rest', rest)
self.th = kwargs.get('th', th)
self.ref = kwargs.get('ref', ref)
self.tc_decay = kwargs.get('tc_decay', tc_decay)
self.monitor = {}
self.kernel = kernel[kwargs.get('k', k)] # default: single exp filter
self.tau = tau if type(tau) is tuple else (tau, )
def calc_v(self, data):
"""
Calculate Membrane Voltage
:param data: tuple(spikes[], weight[])
:return:
"""
spikes = np.array(data[0])
weights = np.array(data[1])
data = [
spikes[i] * weights[i]
for i in range(weights.size)
]
time = int(self.time / self.dt)
data = np.sum(data, 0)
data = np.convolve(data,
self.kernel(np.arange(0, self.time, self.dt),
self.tau)
)[0:time]
# initialize
f_last = 0 # last firing time
vpeak = 20 # the peak of membrane voltage
spikes = np.zeros(time)
v = self.rest # set to resting voltage
v_monitor = [] # monitor voltage
# Core of LIF
for t in range(time):
dv = ((self.dt * t) > (f_last + self.ref)) * (-v + self.rest) / self.tc_decay + data[t]
v = v + self.dt * dv # calc voltage
f_last = f_last + (self.dt * t - f_last) * (v >= self.th) # if fires, memory the firing time
v = v + (vpeak - v) * (v >= self.th) # set to peak
v_monitor.append(v)
spikes[t] = (v >= self.th) * 1 # set to spike
v = v + (self.rest - v) * (v >= self.th) # return to resting voltage
self.monitor['s'] = spikes
self.monitor['v'] = v_monitor
self.monitor['f'] = np.arange(0, self.time, self.dt)[v >= self.th], # real firing times
return v_monitor, spikes, self.monitor['f']
def plot_v(self, save=False, filename='lif.png', **kwargs):
"""
plot membrane potential
:param save:
:param filename:
:param kwargs:
:return:
"""
x = np.arange(0, self.time, self.dt)
plt.title('LIF Neuron model Simulation')
plt.plot(x, self.monitor['v'])
plt.ylabel('V [mV]')
plt.xlabel('time [ms]')
if not save:
plt.show()
else:
plt.savefig(filename, dpi=kwargs.get('dpi', 150))
plt.close()
class IF(LIF):
"""
IF: integrate-and-fire model
"""
def __init__(self,
time: int,
dt: float = 1.0,
rest=-65,
th=-40,
ref=3,
k='single',
tau: tuple = (20, ),
**kwargs):
"""
Initialize Neuron parameters
:param time:
:param dt:
:param rest:
:param th:
:param ref:
:param k:
:param tau:
:param kwargs:
"""
super().__init__(time=time,
dt=dt,
rest=rest,
th=th,
ref=ref,
k=k,
tau=tau,
**kwargs)
def calc_v(self, data):
"""
Calculate Membrane Voltage
:param data: tuple(spikes[], weight[])
:return membrane voltage, output spikes, firing times:
"""
spikes =
|
np.array(data[0])
|
numpy.array
|
from src.AutoMLpy import SearchType
from src.AutoMLpy import logs_file_setup, log_device_setup
from tests import execute_optimisation
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import os
from src.AutoMLpy import plotly_colors
import logging
def compute_stats_per_dimension_table(
max_dim: int = 10,
iterations_per_dim: int = 10,
**kwargs
):
columns = ["Dimensions", *[st.name for st in SearchType]]
iterations_results = pd.DataFrame(columns=columns)
time_results = pd.DataFrame(columns=columns)
for d in range(1, max_dim+1):
logging.info(f"\n{'-'*50} {d} Dimensions {'-'*50}")
new_iteration_results = {"Dimensions": d, **{st.name: [] for st in SearchType}}
new_time_results = {"Dimensions": d, **{st.name: [] for st in SearchType}}
for _search_type in SearchType:
logging.info(f"\n{'-'*10} {_search_type.name} search {'-'*10}")
ell_itr = []
ell_time = []
for itr_seed in range(iterations_per_dim):
param_gen = execute_optimisation(
_search_type,
dim=d,
optimize_kwargs=dict(stop_criterion=kwargs.get("stop_criterion", 0.9)),
seed=itr_seed,
**kwargs
)
ell_itr.append(param_gen.current_itr)
ell_time.append(param_gen.last_itr_elapse_time)
new_iteration_results[_search_type.name] = (np.mean(ell_itr), np.std(ell_itr))
new_time_results[_search_type.name] = (np.mean(ell_time), np.std(ell_time))
iterations_results = iterations_results.append(new_iteration_results, ignore_index=True)
time_results = time_results.append(new_time_results, ignore_index=True)
return iterations_results, time_results
def show_stats_per_dimension(
max_dim: int = 10,
iterations_per_dim: int = 10,
**kwargs
):
iterations_results, time_results = compute_stats_per_dimension_table(max_dim, iterations_per_dim, **kwargs)
iterations_results_mean = {
st.name: np.array([x[0] for x in iterations_results[st.name]])
for st in SearchType
}
iterations_results_std = {
st.name:
|
np.array([x[1] for x in iterations_results[st.name]])
|
numpy.array
|
import cv2
import numpy as np
def detect(frame, focus=None):
img = cv2.resize(frame, (0, 0), fx=0.333, fy=0.333)
window = 70
if focus is None:
focus = (int(img.shape[0] / 2) + 100, int(img.shape[1] / 2))
else:
focus = (int(float(focus[0]) * 0.33), int(float(focus[1]) * 0.33))
h, w = img.shape[0], img.shape[1]
shiftBottom = 10
shiftUp = 10
src = np.array([
[focus[0] - window, focus[1]+shiftUp],
[focus[0] + window, focus[1]+shiftUp],
[w-shiftBottom, h],
[0+shiftBottom, h]
], np.float32)
dst = np.array([
[0, 0],
[w, 0],
[w, h],
[0, h]
], np.float32)
M = cv2.getPerspectiveTransform(src, dst)
warp = cv2.warpPerspective(img.copy(), M, (w, h))
warp = cv2.equalizeHist(warp)
warp = cv2.medianBlur(warp, 5)
cv2.imshow('warp',warp)
cv2.waitKey(1)
sobelx64f = cv2.Sobel(warp, cv2.CV_64F, 2, 0, ksize=1)
abs_sobel64f = np.absolute(sobelx64f)
edges =
|
np.uint8(abs_sobel64f)
|
numpy.uint8
|
# -*- coding: utf-8 -*-
## @package ivf.batch.overview_figure
#
# ivf.batch.overview_figure utility package.
# @author tody
# @date 2016/02/21
import numpy as np
import cv2
import matplotlib.pyplot as plt
from ivf.datasets.shape import shapeFile, shapeResultFile
from ivf.io_util.image import loadNormal, saveRGBA, saveNormal, saveRGB
from ivf.datasets.colormap import colorMapFile, loadColorMap
from ivf.plot.window import SubplotGrid, showMaximize
from ivf.np.norm import normalizeVector
from ivf.core.shader.toon import ColorMapShader, ToonShader
from ivf.cv.image import setAlpha, to32F, gray2rgb, to8U
from ivf.core.sfs.toon_sfs import ToonSFS
from ivf.cv.normal import normalToColor, normalizeImage
from ivf.core.shader.light_sphere import lightSphere
from ivf.core.sfs.silhouette_normal import silhouetteNormal
from ivf.core.shader.shader import LdotN
def overviewFigure():
cmap_id = 10
colormap_file = colorMapFile(cmap_id)
num_rows = 1
num_cols = 5
w = 10
h = w * num_rows / num_cols
fig, axes = plt.subplots(figsize=(w, h))
font_size = 15
fig.subplots_adjust(left=0.02, right=0.98, top=0.96, bottom=0.02, hspace=0.05, wspace=0.05)
fig.suptitle("", fontsize=font_size)
plot_grid = SubplotGrid(num_rows, num_cols)
L = normalizeVector(np.array([-0.4, 0.6, 0.6]))
L_img = lightSphere(L)
shape_name = "ThreeBox"
Ng_data = shapeFile(shape_name)
Ng_data = loadNormal(Ng_data)
Ng_32F, A_8U = Ng_data
N0_file = shapeResultFile(result_name="InitialNormal", data_name=shape_name)
N0_data = loadNormal(N0_file)
N0_32F, A_8U = N0_data
M_32F = loadColorMap(colormap_file)
Cg_32F = ColorMapShader(M_32F).diffuseShading(L, Ng_32F)
borders=[0.6, 0.8, 0.92]
colors = [np.array([0.2, 0.2, 0.4]),
np.array([0.3, 0.3, 0.6]),
np.array([0.4, 0.4, 0.8]),
np.array([0.5, 0.5, 1.0])]
#Cg_32F = ToonShader(borders, colors).diffuseShading(L, Ng_32F)
#Cg_32F = cv2.GaussianBlur(Cg_32F, (0,0), 2.0)
sfs_method = ToonSFS(L, Cg_32F, A_8U)
sfs_method.setInitialNormal(N0_32F)
sfs_method.setNumIterations(iterations=40)
sfs_method.setWeights(w_lap=10.0)
sfs_method.run()
N_32F = sfs_method.normal()
I_32F = np.float32(np.clip(LdotN(L, N_32F), 0.0, 1.0))
I0_32F = np.float32(np.clip(LdotN(L, N0_32F), 0.0, 1.0))
C_32F = sfs_method.shading()
C0_32F = sfs_method.initialShading()
M_32F = sfs_method.colorMap().mapImage()
L1 = normalizeVector(
|
np.array([0.0, 0.6, 0.6])
|
numpy.array
|
import numpy
import array
import copy
import re,os,sys,copy
from glob import glob
from scipy.interpolate import griddata
from scipy.integrate import simps,quad
from scipy.optimize import leastsq, fsolve
#from sm_functions import read_ised,read_ised2,calc_lyman,calc_beta
from astropy import units as U
from astropy import constants as C
from astropy import cosmology as cos
cosmo = cos.FlatLambdaCDM(H0=70,Om0=0.3)
f = open("error.log", "w")
original_stderr = sys.stderr
sys.stderr = f
class ised(object):
def __init__(self,path):
self.file = path
self.read_ised(self.file)
def read_ised(self,filename):
"""
This function reads data from Bruzual & Charlot binary format
SSP files and returns the necessary data in an array The input files
should be '.ised' files, either 2003 or 2007.
'ks' in the binary files is slightly different between 03/07 files
so the read length and index should be set appropriately, therefore
the function tries '03 format first and retries with the '07 format
if the returned number of ages isn't as expected (e.g. 221 ages)
"""
with open(filename,'rb') as f:
check = array.array('i')
check.fromfile(f,2)
if check[1] == 221:
ksl, ksi = 2, 1
F_l, F_i = 3, 2
else:
ksl, ksi = 3, 2
F_l, F_i = 5, 4
with open(filename,'rb') as f:
ks = array.array('i')
ks.fromfile(f,ksl)
ta = array.array('f')
ta.fromfile(f,ks[ksi])
self.ta = numpy.array(ta)
tmp = array.array('i')
tmp.fromfile(f,3)
self.ml,self.mul,iseg = tmp
if iseg > 0:
tmp = array.array('f')
tmp.fromfile(f,iseg*6)
tmp = array.array('f')
tmp.fromfile(f,5)
self.totm, self.totn, self.avs, self.jo, self.tauo = tmp
self.ids= array.array('c')
self.ids.fromfile(f,80)
tmp = array.array('f')
tmp.fromfile(f,4)
self.tcut = tmp[0]
self.ttt = tmp[1:]
ids = array.array('c')
ids.fromfile(f,80)
self.ids = array.array('c')
self.ids.fromfile(f,80)
self.igw = array.array('i')
self.igw.fromfile(f,1)
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.iw = array.array('i')
self.iw.fromfile(f,1)
wave = array.array('f')
wave.fromfile(f,self.iw[0])
self.wave = numpy.array(wave)
#SED Section
self.F = array.array('i')
self.F.fromfile(f,F_l)
self.iw = self.F[F_i] #Number of wavelength elements
self.sed = numpy.zeros((self.iw,ks[ksi]),dtype=numpy.float32)
G = array.array('f')
G.fromfile(f,self.iw)
self.sed[:,0] = G
ik = array.array('i')
ik.fromfile(f,1)
self.h = numpy.empty((ik[0],ks[ksi]),'f')
H = array.array('f')
H.fromfile(f,ik[0])
self.h[:,0] = H
for i in range(1,ks[ksi]): #Fill rest of array with SEDs
F = array.array('i')
F.fromfile(f,F_l)
iw = F[F_i]
G = array.array('f')
G.fromfile(f,iw)
self.sed[:,i] = G
ik = array.array('i')
ik.fromfile(f,1)
H = array.array('f')
H.fromfile(f,ik[0])
self.h[:,i] = H
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.bflx = array.array('f')
self.bflx.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
strm = array.array('f')
strm.fromfile(f,tmp[F_i])
self.strm = numpy.array(strm)
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.evf = array.array('f')
self.evf.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.evf = array.array('f')
self.evf.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.snr = array.array('f')
self.snr.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.pnr = array.array('f')
self.pnr.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.sn = array.array('f')
self.sn.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.bh = array.array('f')
self.bh.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.wd = array.array('f')
self.wd.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
rmtm = array.array('f')
rmtm.fromfile(f,tmp[F_i])
self.rmtm = numpy.array(rmtm)
class CSP:
def __init__(self,SSPpath = '../ssp/bc03/salpeter/lr/',
age=None,sfh=None,dust=None,metal_ind=None,fesc=None,
sfh_law='exp',dustmodel = 'calzetti',neb_cont=True,neb_met=True):
self.SSPpath = SSPpath
self.files = glob(self.SSPpath + '*.ised')
self.files.sort()
self.iseds = []
self.ta_arr = []
self.metal_arr = []
self.iw_arr = []
self.wave_arr = []
self.sed_arr = []
self.strm_arr = []
self.rmtm_arr = []
#Set up
for file in self.files:
ised_binary = ised(file)
self.ta_arr.append(ised_binary.ta)
self.metal_arr.append(ised_binary.ids)
self.iw_arr.append(ised_binary.iw)
self.wave_arr.append(ised_binary.wave)
self.sed_arr.append(ised_binary.sed)
self.strm_arr.append(ised_binary.strm)
self.rmtm_arr.append(ised_binary.rmtm)
self.iseds.append(ised_binary)
#Find closest match for each tg value in ta - set tg to these values
nebular = numpy.loadtxt('nebular_emission.dat',skiprows=1)
self.neb_cont = nebular[:,1]
self.neb_hlines = nebular[:,2]
self.neb_metal = nebular[:,3:]
self.neb_wave = nebular[:,0]
if None not in (age,sfh,dust,metal_ind):
if fesc == None:
self.build(age,sfh,dust,metal_ind,sfh_law=sfh_law,dustmodel=dustmodel,
neb_cont=neb_cont,neb_met=neb_met)
else:
self.build(age,sfh,dust,metal_ind,fesc,sfh_law,dustmodel,neb_cont,neb_met)
def _sfh_exp(self,t,tau):
sfh = numpy.exp(-1*t/tau)/abs(tau)
return sfh
def _sfh_pow(self,t,alpha):
sfh = numpy.power(t/1.e9,alpha)
return sfh
def _sfh_del(self,t,tau):
sfh = t/(tau**2)*numpy.exp(-t/tau)
return sfh
def _sfh_tru(self,t,tstop):
sfh = numpy.ones_like(t)
sfh[t > tstop*numpy.max(t)] = 0.
sfh /= numpy.trapz(sfh,t)
return sfh
def dust_func(self,lam,ai,bi,ni,li):
"""
Functional form for SMC, LMC and MW extinction curves of
Pei et al. 1992
"""
lam = numpy.array(lam) / 1e4
ki = numpy.power((lam / li),ni) + numpy.power((li / lam),ni) + bi
eta_i = ai / ki
return eta_i
def build(self,age,sfh,dust,metal,fesc=1.,sfh_law='exp',dustmodel = 'calzetti',
neb_cont=True,neb_met=True):
"""
"""
self.tg = age*1.e9
if sfh_law == 'exp':
self.tau = sfh*1.e9
elif sfh_law == 'del':
self.tau = sfh*1.e9
else:
self.tau = sfh
self.tauv = dust
self.mi = int(abs(metal))
self.fesc = fesc
self.sfh_law = sfh_law
self.inc_cont= neb_cont
self.inc_met = neb_met
self.dust_model = dustmodel
mu = 0.3
epsilon = 0.
self.ta = self.ta_arr[self.mi]
self.wave = self.wave_arr[self.mi]
[T1,T2] = numpy.meshgrid(self.tg,self.ta)
tgi = numpy.argmin(numpy.abs(self.tg-self.ta))
self.tg = self.ta[tgi]
if len(self.neb_wave) != len(self.wave):
self.neb_cont = griddata(self.neb_wave,self.neb_cont,self.wave)
self.neb_hlines = griddata(self.neb_wave,self.neb_hlines,self.wave)
neb_metaln = numpy.zeros((len(self.wave),3))
for i in range(3):
neb_metaln[:,i] = griddata(self.neb_wave,self.neb_metal[:,i],self.wave)
self.neb_metal = neb_metaln
self.neb_wave = self.wave
#quietprint("Metallicity "+str(self.mi+1)+":")
#print ".ised file: "+files[abs(SSP)]
sed = self.sed_arr[self.mi]
strm = self.strm_arr[self.mi]
rmtm = self.rmtm_arr[self.mi]
self.iw = self.iw_arr[self.mi]
metal=str((self.metal_arr[self.mi]))[12:-3].strip()
#quietprint(metal[self.mi] + "\nInclude nebular emission: " + str(add_nebular))
SSP_Z = float(re.split("Z=?",metal)[1])
#print SSP_Z,
if SSP_Z <= 0.0004: neb_z = 0
elif SSP_Z > 0.0004 and SSP_Z <= 0.004: neb_z = 1
elif SSP_Z > 0.004: neb_z = 2
#print neb_z
if self.dust_model == "charlot":
ATT = numpy.empty([len(self.wave),len(self.ta)])
tv = ((self.tauv/1.0857)*numpy.ones(len(self.ta)))
tv[self.ta>1e7] = mu*self.tauv
lam = numpy.array((5500/self.wave)**0.7)
ATT[:,:] = (numpy.exp(-1*numpy.outer(lam,tv)))
elif self.dust_model == "calzetti":
ATT = numpy.ones([len(self.wave),len(self.ta)])
k = numpy.zeros_like(self.wave)
w0 = [self.wave <= 1200]
w1 = [self.wave < 6300]
w2 = [self.wave >= 6300]
w_u = self.wave/1e4
x1 = numpy.argmin(numpy.abs(self.wave-1200))
x2 = numpy.argmin(numpy.abs(self.wave-1250))
k[w2] = 2.659*(-1.857 + 1.040/w_u[w2])
k[w1] = 2.659*(-2.156 + (1.509/w_u[w1]) - (0.198/w_u[w1]**2) + (0.011/w_u[w1]**3))
k[w0] = k[x1] + ((self.wave[w0]-1200.) * (k[x1]-k[x2]) / (self.wave[x1]-self.wave[x2]))
k += 4.05
k[k < 0.] = 0.
tv = self.tauv*k/4.05
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*tv)
elif self.dust_model == "calzetti2":
ATT = numpy.ones([len(self.wave),len(self.ta)])
k = numpy.zeros_like(self.wave)
w0 = [self.wave <= 1000]
w1 = [(self.wave > 1000)*(self.wave < 6300)]
w2 = [self.wave >= 6300]
w_u = self.wave/1e4
k[w2] = 2.659*(-1.857 + 1.040/w_u[w2])
k[w1] = 2.659*(-2.156 + (1.509/w_u[w1]) - (0.198/w_u[w1]**2) + (0.011/w_u[w1]**3))
p1 = self.dust_func(self.wave,27,4,5.5,0.08) + self.dust_func(self.wave,185,90,2,0.042)
k[w0] = p1[w0] / (p1[w1][0]/k[w1][0])
k += 4.05
k[k < 0.] = 0.
tv = self.tauv*k/4.05
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*tv)
elif self.dust_model == "smc":
ai = [185., 27., 0.005, 0.01, 0.012, 0.03]
bi = [90., 5.5, -1.95, -1.95, -1.8, 0.]
ni = [2., 4., 2., 2., 2., 2.]
li = [0.042, 0.08, 0.22, 9.7, 18., 25.]
eta = numpy.zeros_like(self.wave)
for i in range(len(ai)):
eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])
Rv = 2.93
Ab = self.tauv * (1 + (1/Rv))
print(numpy.exp(self.tauv*eta))
ATT = numpy.ones([len(self.wave),len(self.ta)])
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*(Ab*eta))
#Offset added to renormalise from B to V band
#ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)
elif self.dust_model == "lmc":
ai = [175., 19., 0.023, 0.005, 0.006, 0.02]
bi = [90., 4.0, -1.95, -1.95, -1.8, 0.]
ni = [2., 4.5, 2., 2., 2., 2.]
li = [0.046, 0.08, 0.22, 9.7, 18., 25.]
eta = numpy.zeros_like(self.wave)
for i in range(len(ai)):
eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])
Rv = 3.16
Ab = self.tauv * (1 + (1/Rv))
ATT = numpy.ones([len(self.wave),len(self.ta)])
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*(Ab*eta))
#Offset added to renormalise from B to V band
#ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)
elif self.dust_model == "mw":
ai = [165., 14., 0.045, 0.002, 0.002, 0.012]
bi = [90., 4., -1.95, -1.95, -1.8, 0.]
ni = [2., 6.5, 2., 2., 2., 2.]
li = [0.047, 0.08, 0.22, 9.7, 18., 25.]
eta = numpy.zeros_like(self.wave)
for i in range(len(ai)):
eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])
Rv = 3.08
Ab = self.tauv * (1 + (1/Rv))
ATT = numpy.ones([len(self.wave),len(self.ta)])
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*(Ab*eta))
#Offset added to renormalise from B to V band
#ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)
"""
SECTION 1
First calculate and store those parameters that are functions of the age array
'ta' only - these are the same for every model to be made. The parameters are
the age array TP, the time interval array DT, the interpolation coefficient
'a' and the interpolation indices J. Each are stored in cell arrays of size ks,
with the data corresponding to the original age array first, and the
interpolated data second.
"""
self.TP = {}
self.A = {}
self.J = {}
self.DT = {}
for ai in range(tgi+1):
#Calculate taux2: the reverse age array; remove those values which
#are less than the first non-zero entry of taux1 - these values
#are treated differently in the original BC code
taux1 = self.ta[:ai+1]
taux2 = self.ta[ai]-self.ta[ai::-1]
if max(taux1) > 0.:
taux2 = numpy.delete(taux2,numpy.where(taux2<taux1[numpy.flatnonzero(taux1)[0]]))
#Remove values common to taux1 and taux2; calulate array TP
[T1,T2] = numpy.meshgrid(taux1,taux2)
[i,j] = numpy.where(T1-T2==0)
taux2 = numpy.delete(taux2, i)
self.TP[ai] = self.ta[ai]-numpy.concatenate((taux1,taux2),axis=0)
l = len(taux2)
#If taux2 has entries, calculate the interpolation parameters a and J.
#The indicies correspond to those values of 'ta' which are just below
#the entries in taux2. They are calculated by taking the difference
#between the two arrays, then finding the last negative entry in the
#resulting array.
if l == 0:
self.J[ai] = numpy.array([])
self.A[ai] = numpy.array([])
if l>0:
[T1,T2] = numpy.meshgrid(self.ta,taux2)
T = T1-T2
T[numpy.where(T<=0)] = 0
T[numpy.where(T!=0)] = 1
T = numpy.diff(T,1,1)
(i,self.J[ai]) = T.nonzero()
self.A[ai] = (numpy.log10(taux2/self.ta[self.J[ai]]) /
numpy.log10(self.ta[self.J[ai]+1]/self.ta[self.J[ai]]))
#Calculate age difference array: the taux arrays are joined and
#sorted, the differences calculated, then rearranged back to the order
#of the original taux values.
taux = numpy.concatenate((taux1,taux2),axis=0)
taux.sort()
b = numpy.searchsorted(taux,taux1)
c = numpy.searchsorted(taux,taux2)
order = numpy.concatenate((b,c))
d = numpy.diff(taux)
dt = numpy.append(d,0) + numpy.append(0,d)
self.DT[ai] = numpy.copy(dt[order])
SED = numpy.empty([len(self.wave)])
Nlyman = numpy.empty([1])
Nlyman_final = numpy.empty([1])
beta = numpy.empty([1])
norm = numpy.empty([1])
STR = numpy.empty([tgi+1])
SFR = numpy.empty([tgi+1])
W = {}
# metal=[str((self.data[1]))[12:-3].strip()]*len(params.metallicities)
RMr = numpy.empty([tgi+1])
PRr = numpy.empty([tgi+1])
URr = numpy.empty([tgi+1])
Tr = numpy.empty([tgi+1])
"""
SECTION 2
Now calculate the integration coefficients w, and store them in the
cell array W. Also calculate the stellar mass fraction str. The so
array is expanded and used by each successive iteration of the inner
loop (ai). The outer loop repeats the operation for each tau value.
"""
prgas = numpy.zeros(tgi+1)
for ai in range(tgi+1):
j = self.J[ai] #Interpolation indices
tp = self.TP[ai] #Integration timescale
pgas = numpy.zeros_like(tp)
if ai ==0:
prgas = numpy.zeros_like(self.ta)
else:
i = numpy.where(tp<=self.ta[ai-1])
ii = numpy.where(tp>self.ta[ai-1])
pgas[i] = griddata(self.ta,prgas,tp[i])
pgas[ii] = prgas[ai-1]
#print prgas[ai]
tbins = numpy.logspace(0,numpy.log10(max(tp)),1000)
npgas = numpy.zeros_like(tbins)
if self.sfh_law == 'exp':
if self.tau > 0.:
sr = (1 + epsilon*pgas)*numpy.exp(-1*tp/self.tau)/abs(self.tau)
norma = 1
if len(sr) > 1:
i = numpy.where(tbins <= self.ta[ai-1])
ii =
|
numpy.where(tbins > self.ta[ai-1])
|
numpy.where
|
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import os.path as osp
import tempfile
import xml.etree.ElementTree as ET
# import gym.envs.mujoco.arm_shaping
import os
import IPython
import scipy.misc
class PusherEnv7DOFExp2(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
self.randomize_xml('pr2_arm3d_blockpush_new_2.xml')
mujoco_env.MujocoEnv.__init__(self, "temp.xml", 5)
def _step(self, a):
vec_1 = self.get_body_com("object")-self.get_body_com("r_wrist_roll_link")
vec_2 = self.get_body_com("object")-self.get_body_com("goal")
reward_near = - np.linalg.norm(vec_1)
reward_dist = -
|
np.linalg.norm(vec_2)
|
numpy.linalg.norm
|
import numpy as np
import math
import particles
import pickle
class Simulator():
def __init__(self):
'''
The Simulator object contains model parameters for the epidemic
simulation of the population of interest. Particles move in a 2D
map represented by a square with x limits (-1, 1) and y limits (-1, 1).
'''
self.NUMBER_OF_PARTICLES = 5000
self.INITIAL_EXPOSED = 50
self.AGE_GROUPS =
|
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
|
numpy.array
|
###############################################
#ASM# module "numerics" from package "common" #ASM#
###############################################
"""
These are miscellaneous functions useful for a variety of python purposes.
"""
import copy
import types
import re
from .log import Logger
from . import misc
from . import baseclasses
from .baseclasses import expand_limits,get_array_slice
try: import numpy
except ImportError:
Logger.raiseException('The <numpy> module is required for this suite, but it was not found in the python path.')
raise ImportError
__module_name__=__name__
deg2rad=numpy.pi/180
number_types=(int,int,float,numpy.int32,numpy.int64,numpy.float,numpy.float32,numpy.float64,numpy.complex)
if 'float128' in dir(numpy): number_types+=(numpy.float128,)
if 'complex128' in dir(numpy): number_types+=(numpy.complex128,)
def sequence_cmp(a,b):
if hasattr(a,'__len__') and not hasattr(b,'__len__'): return -1
elif hasattr(b,'__len__') and not hasattr(a,'__len__'): return 1
elif not hasattr(a,'__len__') and not hasattr(b,'__len__'): return cmp(a,b)
else:
lengths=(len(a),len(b))
for i in range(max(lengths)):
try: a_value=a[i]
except IndexError: return 1
try: b_value=b[i]
except IndexError: return -1
comparison=sequence_cmp(a_value,b_value)
if comparison!=0: return comparison
return 0
def factorial(n):
"""
This function calculates the factorial of a number.
"""
sum = 1.0
for m in range(1, int(n)+1):
sum = float(m)*sum
return sum
def bin_array(arr,bins=200,weights=None,density=False):
arr=arr.flatten()
if weights is not None: weights=weights.flatten()
h=numpy.histogram(arr,bins=bins,density=density,weights=weights)
h[0][numpy.isnan(h[0])]=0
h=baseclasses.ArrayWithAxes(h[0],axes=[h[1][:-1] \
+numpy.diff(h[1])]).sort_by_axes()
return h
def shear_array(a, strength=1, shear_axis=1, increase_axis=None,interpolate=False):
Logger.raiseException('The dimension of `a` must be at least 2.',
unless=(a.ndim>=2), exception=TypeError)
Logger.raiseException('`shear_axis` and `increase_axis` must be distinct.',
unless=(shear_axis!=increase_axis), exception=ValueError)
shear_axis%=a.ndim
if increase_axis is None:
axes=list(range(a.ndim))
axes.remove(shear_axis)
increase_axis=axes[0]
if interpolate:
Logger.raiseException('The `interpolate` option is only enabled for \
an array `a` of dimension 2.',\
unless=a.ndim==2, exception=IndexError)
arr=baseclasses.AWA(a)
xs=arr.axes[shear_axis]
get_ind=lambda y: (y,slice(None)) if shear_axis==1 else (slice(None),y)
res = [arr[get_ind(y)].interpolate_axis((xs-strength*y)%arr.shape[shear_axis],\
axis=0, extrapolate=True,\
bounds_error=False) \
for y in range(a.shape[increase_axis])]
res=numpy.array(res)
if shear_axis==0: res=res.transpose()
else:
indices = numpy.indices(a.shape)
indices[shear_axis] -= strength * indices[increase_axis]
indices[shear_axis] %= a.shape[shear_axis]
res = a[tuple(indices)]
return res
def levi_cevita(*indices):
result = 1
for i, x1 in enumerate(indices):
for j in range(i+1, len(indices)):
x2 = indices[j]
if x1 > x2:
result = -result
elif x1 == x2:
return 0
return result
def rotation_matrix(angle,about_axis=None,optimize=False):
"""
Returns the rotation matrix corresponding to rotation by
angle *angle* (in degrees) about some axis vector
*about_vector* (DEFAULT: Z-axis).
"""
from numpy.linalg import norm
###Set conversion to radians###
deg2rad=numpy.pi/180.
angle=deg2rad*angle
###Set default about_axis###
if about_axis is None: about_axis=[0,0,1]
###Or make sure provided one is a unit vector###
else:
about_axis=numpy.array(about_axis).astype(complex).flatten()
about_axis/=norm(about_axis)
#####Construct a rotation matrix = P+(I-P)*cos(theta)+Q*sin(theta)#####
v=about_axis
TensorProd=numpy.transpose(numpy.matrix(v))*numpy.matrix(v)
CrossProd=numpy.matrix([[0,-v[2],v[1]],\
[v[2],0,-v[0]],\
[-v[1],v[0],0]])
I=numpy.matrix(numpy.eye(3))
return numpy.cos(angle)*I+numpy.sin(angle)*CrossProd+(1-numpy.cos(angle))*TensorProd
def rotate_vector(vector,angle,about_axis=None,\
optimize=False):
"""
Returns *vector* rotated by angle *angle* (in degrees)
about *about_axis* (DEFAULT: [0 0 1]).
"""
vector=numpy.asarray(vector)
vector=numpy.matrix(vector).T
#Pad to three dimensions if necessary#
padded=False
if vector.shape==(2,1):
vector=numpy.matrix(list(numpy.array(vector).squeeze())+[0]).T
padded=True
assert vector.shape==(3,1),\
'Input `vector` must be a 2- or 3-coordinate iterable.'
#vector=vector.T
rotated_vector=(rotation_matrix(angle,about_axis,\
optimize=optimize)*vector)
result=numpy.array(rotated_vector).squeeze()
if padded: result=result[:2]
return result
def cross_product_matrix(vec):
from numpy.linalg import norm
vec=numpy.array(vec)
vecNorm=numpy.float(norm(vec))
vecHat=vec/vecNorm
I=numpy.matrix(numpy.eye(3))
projector=numpy.transpose(numpy.matrix(vecHat))*numpy.matrix(vecHat)
R=rotation_matrix(90,about_axis=vecHat)
return vecNorm*R*(I-projector)
def change_basis_matrix(from_basis=numpy.eye(3),\
to_basis=numpy.eye(3),\
optimize=False):
if not optimize:
try:
##Check that we have array type##
from_basis=numpy.matrix(from_basis).squeeze()
to_basis=numpy.matrix(to_basis).squeeze()
##Check that we have square matrices (arrays)##
if not ((from_basis.ndim is 2) and (from_basis.shape[-1] is from_basis.shape[0])) \
and not ((to_basis.ndim is 2) and (to_basis.shape[-1] is to_basis.shape[0])): raise ValueError
##Check that we have non-singular matrices (valid bases)##
from_basis.I; to_basis.I
except: Logger.raiseException('*from_basis* and *to_basis* should both be valid '+\
'NxN matrix-representable bases whose rows are '+\
'linearly independent basis vectors.', exception=numpy.LinAlgError)
##Convert to conventional representation##
#Columns are basis vectors#
from_basis=from_basis.T; to_basis=to_basis.T
return to_basis.I*from_basis
def change_vector_basis(vector,from_basis=numpy.eye(3),\
to_basis=numpy.eye(3),\
optimize=False):
"""
Returns *vector* rotated by angle *angle* (in degrees)
about *about_axis* (DEFAULT: [0 0 1]).
"""
if not optimize:
try:
#Make into a vector#
vector=numpy.matrix(vector).squeeze()
#Check shape#
if not vector.ndim is 1 and vector.shape[0] in (2,3):
raise ValueError
except: Logger.raiseException('*vector* must be a matrix-representable list of 2 or 3 coordinates.')
#Pad to three dimensions if necessary#
if vector.shape[0] is 2: vector=numpy.matrix(list(vector)+[0])
vector=vector.T
result=(change_basis_matrix(from_basis,to_basis,optimize=optimize)*vector).T
return numpy.array(result).squeeze()
def grid_axes(*axes):
array_axes=[]
for axis in axes:
axis=numpy.array(axis)
Logger.raiseException('Each provided axis must have a one dimensional array shape.',\
unless=((axis.ndim==1) and (0 not in axis.shape)),\
exception=ValueError)
array_axes.append(axis)
index_grids=list(numpy.mgrid.__getitem__([slice(0,len(axis)) for axis in array_axes]))
return [array_axes[i][index_grids[i]] for i in range(len(array_axes))]
#TODO: remove legacy naming
expand_axes=grid_axes
def array_from_points(points,values,dtype=None,fill=numpy.nan):
#####Check values#####
error_msg='Both *points* and *values* must be iterables of the same (non-zero) length, '+\
'the first a list of (tuple) points and the latter a list of corresponding values.'
Logger.raiseException(error_msg,unless=(hasattr(points,'__len__') and hasattr(values,'__len__')),
exception=TypeError)
Logger.raiseException(error_msg,unless=(len(points)==len(values) and len(points)>0),\
exception=IndexError)
points=list(points)
for i in range(len(points)):
####Give each point a length equal to the array shape, even if 1-D array####
if not hasattr(points[i],'__len__'): points[i]=[points[i]]
Logger.raiseException('Each point in *points* must be an (tuple) iterable of uniform length (i.e., each describing an array of a consistent shape).',\
unless=(len(points[i])==len(points[0])),\
exception=IndexError)
if dtype is None: dtype=type(values[0])
#####Get list of indices in all axes corresponding to ordered values#####
###Unzip into format [[axis1 values], [axis2 values], ...]###
unsorted_axes=list(zip(*points))
index_lists=[]
sorted_axes=[]
####Iterate through each axis to get lists of indices corresponding to each axis####
for i in range(len(unsorted_axes)):
unsorted_axis_values=list(unsorted_axes[i])
sorted_axis_values=copy.copy(unsorted_axis_values)
sorted_axis_values.sort()
sorted_axes.append(sorted_axis_values)
index_list=[0]*len(sorted_axis_values)
j=0
index=0
while j<len(sorted_axis_values):
axis_value=sorted_axis_values[j]
positions=misc.all_indices(unsorted_axis_values,axis_value)
###Fill in *index_list* at positions where *axis_value* occurs with the index value###
for position in positions:
index_list[position]=index
###Skip ahead so we don't double up on axis values we've already counted###
index+=1
j+=len(positions)
index_lists.append(index_list)
###Zip back up into same format as *points*###
index_points=list(zip(*index_lists))
####Remove non-unique values from *sorted_axes*####
axes=[]
for i in range(len(sorted_axes)):
sorted_axis=sorted_axes[i]
axis=[]
for j in range(len(sorted_axis)):
value=sorted_axis[j]
if value not in axis: axis.append(value)
axes.append(numpy.array(axis))
#####Produce array and fill it in by indices#####
###Get array shape and make empty array###
shape=[]
for i in range(len(axes)):
axis=axes[i]
shape.append(len(axis))
shape=tuple(shape)
output_array=numpy.zeros(shape,dtype=dtype)
output_array[:]=fill #fill with *fill*
###Fill in by indices###
for i in range(len(index_points)):
index=index_points[i]
value=values[i]
output_array[index]=value
return baseclasses.ArrayWithAxes(output_array,axes=tuple(axes))
def operate_on_entries(func,*inputs,**kwargs):
##Extract special keyword arguments##
exkwargs=misc.extract_kwargs(kwargs,out=None,\
dtype=None)
out=exkwargs['out']; dtype=exkwargs['dtype']
##Check function##
Logger.raiseException('*func* must be a callable object operating on %i input arguments.'%len(inputs),\
unless=hasattr(func,'__call__'), exception=TypeError)
##Check input (and output if provided)##
inputs=list(inputs)
##Verify that we have arrays##
for i in range(len(inputs)):
try: inputs[i]=numpy.array(inputs[i])
except: Logger.raiseException('Each input to *func* must be castable as an array instance.',\
exception=TypeError)
##Broadcast inputs to list of tuples##
try:
if len(inputs)>=2:
broadcast_inputs=numpy.broadcast(*inputs)
shape=broadcast_inputs.shape
linear_inputs=list(broadcast_inputs)
else:
shape=inputs[0].shape
linear_inputs=list(zip(inputs[0].ravel()))
except ValueError: Logger.raiseException('Each input to *func* must be of consistent shape '+\
'(subject to array broadcasting rules).', exception=ValueError)
##Verify they have the same shape##
if out is None:
if dtype is None: dtype=object
out=numpy.ndarray(shape,dtype=dtype)
else:
Logger.raiseException('*out* must be castable as an array instance with shape %s.'%repr(shape),\
unless=(isinstance(out,numpy.ndarray) and (out.shape==shape)),\
exception=IndexError)
if dtype!=None: out=out.astype(dtype)
###Use operator on broadcast inputs###
from numpy.lib.index_tricks import unravel_index
linear_indices=range(numpy.product(shape)) #a single line of incrementing integers
#We use linear iteration approach because it is (apparantly) the only reliable way
#to allocate memory for each entry in sequence##
for linear_index in linear_indices:
indices=unravel_index(linear_index,shape)
out[indices]=func(*linear_inputs[linear_index],**kwargs)
return out
def expanding_resize(arr,desired_shape,append_dim=True):
####This function exists because I hate the default behavior of numpy.resize and array.resize, this resize function doesn't mix BETWEEN dimensions####
#####Check inputs#####
arr=numpy.array(arr)
Logger.raiseException('*desired_shape* must be iterable.',\
unless=hasattr(desired_shape,'__len__'),\
exception=TypeError)
Logger.raiseException('*desired_shape* cannot be an empty shape.',\
unless=(len(desired_shape)>0), exception=ValueError)
arr=numpy.array(copy.copy(arr))
Logger.raiseException('All dimensions of *arr* must be non-zero.',\
unless=(0 not in arr.shape), exception=ValueError)
for item in desired_shape:
message='Each element of *desired_shape* must be a positive integer.'
Logger.raiseException(message, unless=(type(item)==int), exception=TypeError)
Logger.raiseException(message, unless=(item>0), exception=ValueError)
####If we need to append_dim dimensions####
ndim=len(desired_shape)
while ndim>len(arr.shape):
if append_dim: new_shape=arr.shape+(1,)
else: new_shape=(1,)+arr.shape
arr=numpy.reshape(arr,new_shape)
####if we need to remove dimensions####
while ndim<len(arr.shape):
if append_dim: drop_dim=-1
else: drop_dim=0
slicer=[None]*len(arr.shape); slicer[drop_dim]=0 #Retain first element in dimension to drop
arr=get_array_slice(arr,slicer) #now the final dimension is safe to remove
arr=numpy.reshape(arr,arr.shape[:drop_dim])
for i in range(ndim):
input_shape=arr.shape
size=input_shape[i]
desired_size=int(desired_shape[i])
ndiff=desired_size-size
###If there's no difference, do nothing###
if ndiff==0: continue
###If desired size is larger, we expand on either side###
elif ndiff>0:
bottom_slicer=[None]*ndim; bottom_slicer[i]=0
bottom_edge=get_array_slice(arr,bottom_slicer)
top_slicer=[None]*ndim; top_slicer[i]=input_shape[i]-1
top_edge=get_array_slice(arr,top_slicer)
##Iteratively concatenate D=N-1 slices to either side of this axis to fill up to proper size##
for j in range(ndiff):
#Decide whether to concatenate at top or bottom of axis in this iteration#
if j%2==0: to_concatenate=[bottom_edge,arr]
else: to_concatenate=[arr,top_edge]
arr=numpy.concatenate(to_concatenate,axis=i)
###If desired size is smaller, take interior slice###
elif ndiff<0:
bottom_slicer=[None]*ndim; bottom_slicer[i]=[1,None]
top_slicer=[None]*ndim; top_slicer[i]=[None,-1]
slicers=[bottom_slicer,top_slicer]
##Iteratively sub-select array, shaving off top and bottom slices##
for j in range(numpy.abs(ndiff)):
slicer=slicers[j%2]
arr=get_array_slice(arr,slicer)
return arr
def interpolating_resize(arr,desired_shape):
####This function exists because I hate the default behavior of numpy.resize and array.resize, this resize function doesn't mix BETWEEN dimensions####
try: from scipy.interpolate import interp1d
except ImportError:
##Replace interpolator with a home-cooked version?##
##Not yet.##
Logger.raiseException('SciPy module unavailable! Interpolation cannot be used, '+\
'an expanding resize will be used instead.', exception=False)
return expanding_resize(arr,desired_shape)
#####Check inputs#####
Logger.raiseException('*arr* and *desired_shape* must both be iterables.',\
unless=(hasattr(arr,'__len__') and hasattr(desired_shape,'__len__')),\
exception=TypeError)
Logger.raiseException('*desired_shape* cannot be an empty shape.',\
unless=(len(desired_shape)>0), exception=ValueError)
arr=numpy.array(copy.copy(arr))
Logger.raiseException('All dimensions of *arr* must be non-zero.',\
unless=(0 not in arr.shape), exception=ValueError)
for item in desired_shape:
message='Each element of *desired_shape* must be a positive integer.'
Logger.raiseException(message, unless=(type(item)==int), exception=TypeError)
Logger.raiseException(message, unless=(item>0), exception=ValueError)
####If we need to append dimensions####
ndim=len(desired_shape)
while ndim>len(arr.shape): arr=numpy.reshape(arr,arr.shape+(1,))
####if we need to remove dimensions####
while ndim<len(arr.shape):
slicer=[None]*len(arr.shape); slicer[-1]=0 #Take first element in last dimension
arr=get_array_slice(arr,slicer) #now the final dimension is safe to remove
arr=numpy.reshape(arr,arr.shape[:-1])
for i in range(ndim):
input_shape=arr.shape
size=input_shape[i]
desired_size=int(desired_shape[i])
ndiff=desired_size-size
###If there's no difference, do nothing###
if ndiff==0: continue
###If desired size is larger, we have to watch out if we can't interpolate###
#Instead we just replicate with concatenation#
elif size==1 and ndiff>0:
slicer=[None]*ndim; slicer[i]=0
slice=get_array_slice(arr,slicer)
for j in range(ndiff): arr=numpy.concatenate((arr,slice),axis=i)
###Otherwise we're free to use interpolation###
else:
x=numpy.linspace(0,size,size)/float(size)
x_new=numpy.linspace(0,desired_size,desired_size)/float(desired_size)
#Swap axis to the end axis - the numpy folks need to fix their code for *interp1d*,
#the returned array is shaped incorrectly if *axis=0* and *ndim>2*,
#only thing that works for sure is *axis=-1*.
arr=arr.swapaxes(i,-1)
interpolator=interp1d(x,arr,axis=-1)
arr=interpolator(x_new)
arr=arr.swapaxes(i,-1)
return arr
def value_to_index(value,x):
#####Check inputs#####
try:
x=numpy.array(x)
if x.ndim>1: raise TypeError
except TypeError:
Logger.raiseException('Axis values list *x* must be a linear numeric array',\
exception=TypeError)
#####Find most appropriate index#####
return numpy.abs(x-value).argmin()
def slice_array_by_value(value,x,arr,axis=0,squeeze=True,get_closest=False):
##THis function is largely deprecated in favor of coordinate slicing on *ArrayWithAxes* instances#
#####Find most appropriate index#####
closest_index=value_to_index(value,x)
axis=axis%arr.ndim
limits=[None]*axis+[closest_index]+[None]*(arr.ndim-axis-1)
#####Slice the array#####
sliced_arr=get_array_slice(arr,limits,squeeze=squeeze)
##Reduce to number if it has no shape##
if len(sliced_arr.shape)==0: return sliced_arr.tolist()
if get_closest==True: return (x[closest_index],sliced_arr)
else: return sliced_arr
#@TODO: remove this legacy re-name
index_array_by_value=slice_array_by_value
def remove_array_poles(arr,window=10,axis=-1):
ndim=arr.ndim
axis=axis%ndim
newarr_slices=[]
#Fill in first element
arr_first=get_array_slice(arr,(0,1),axis=axis,squeeze=False)
newarr_slices.append(arr_first)
#Smooth everything else by windows in between
arr_previous=arr_first
for i in range(arr.shape[axis]-2):
arr_windowed=get_array_slice(arr,(1+i,1+i+window),axis=axis,squeeze=False)
#Compute difference along axis in window
diff=(arr_previous-arr_windowed)**2
for j in range(ndim):
compress_axis=ndim-j-1
if compress_axis==axis: continue
diff=numpy.sum(diff,axis=compress_axis)
#Find index in window of minimal difference
index=numpy.arange(len(diff))[diff==diff.min()]
index=index[0] #pick soonest index of minimum difference
#Populate new array with element from *arr* at this window index
arr_next=get_array_slice(arr_windowed,(index,index+1),axis=axis,squeeze=False)
newarr_slices.append(arr_next)
#Call this slice now the "previous" slice in next iteration, use it for diff reference
arr_previous=arr_next
#Fill in last element
arr_previous=get_array_slice(arr,(arr.shape[axis]-1,arr.shape[axis]),axis=axis,squeeze=False)
newarr_slices.append(arr_previous)
#Make new array
newarr=numpy.concatenate(newarr_slices,axis=axis)
if isinstance(arr,baseclasses.ArrayWithAxes):
newarr=baseclasses.ArrayWithAxes(newarr)
newarr.adopt_axes(arr)
newarr=arr.__class__(newarr)
return newarr
def broadcast_items(*items):
array_items=[numpy.array(item) for item in items]
ndim_front=0
ndim_behind=numpy.sum([array_item.ndim for array_item in array_items])
broadcasted_arrays=[]
for array_item in array_items:
#If item had no shape (e.g. scalar) don't broadcast
if array_item.ndim==0:
broadcasted_arrays.append(array_item)
continue
ndim_behind-=array_item.ndim
broadcasted_shape=(1,)*ndim_front\
+array_item.shape\
+(1,)*ndim_behind
ndim_front+=array_item.ndim
broadcasted=array_item.reshape(broadcasted_shape)
broadcasted_arrays.append(broadcasted)
#For each unsized array item, replace with original item#
#(We don't want unsized arrays in the returned value)#
for i in range(len(broadcasted_arrays)):
if not broadcasted_arrays[i].ndim>=1:
broadcasted_arrays[i]=items[i]
return broadcasted_arrays
def differentiate(y,x=None,axis=0,order=1,interpolation='linear'):
"""
Differentiate an array *y* with respect to array *x*
*order* times.
OUTPUTS:
dy/dx(x')
Here *dy/dx* is the computed derivative, and *x'* values
correspond to axis coordinates at which the derivative
is computed. If interpolation is used, the primed
coordinates are identical to the input axis coordinates,
otherwise these are central-difference coordinates.
INPUTS:
*x: values for the axis over which the derivative
should be computed. The length of *x* must be the
same as the dimension of *y* along *axis*.
*axis: the array axis over which to differentiate *y*.
*order: an integer specifying the order of the
derivative to be taken in each component.
DEFAULT: 1 (first derivative)
*interpolation: specify the interpolation to be used
when calculating derivatives. Must be one of
True, False, 'linear', 'quartic', 'cubic' or 'wrap'.
If the chosen option is 'wrap', values of *y* are
assumed periodic along *axis*.
If interpolation is used, the returned axis
coordinates are identical to the input axis
coordinates.
DEFAULT: True --> linear interpolation
"""
####Establish interpolation####
if interpolation not in [False,None]:
from scipy.interpolate import interp1d as interpolator_object
#####Check inputs#####
####Check x and y####
Logger.raiseException('*y* and *x* must both be iterables.',\
unless=(hasattr(y,'__len__') and (x is None or hasattr(x,'__len__'))),\
exception=TypeError)
assert isinstance(axis,int) and isinstance(order,int)
if not isinstance(y,numpy.ndarray): y=numpy.array(y)
Logger.raiseException('*y* cannot be an empty array.', unless=(0 not in y.shape), exception=ValueError)
axis=axis%y.ndim #make axis positive
####Verify that x and y are broadcast-able####
if x is None:
if isinstance(y,baseclasses.ArrayWithAxes): x=y.axes[axis]
else: x=numpy.arange(y.shape[axis])
else:
x=numpy.array(x)
Logger.raiseException('*x* must be 1-dimensional and of the same length as the *axis* dimension of *y*.',\
unless=(len(x)==y.shape[axis] and x.ndim==1),\
exception=IndexError)
##If interpolation is not None, then we are asking for interpolation##
if interpolation is True: interpolation='linear'
if not interpolation or interpolation=='wrap':
wrap=True; interpolation_chosen=False
else: wrap=False; interpolation_chosen=True
#####Differentiate#####
global diff_x_1d
for i in range(order):
###Step 1. ###
#Differentiate x (Only needed once if we're interpolating back each time)
if interpolation_chosen==False or (interpolation_chosen==True and i==0):
#Wrap differentiation maintains same number of x points
if wrap:
diff_x_1d=numpy.roll(x,1,axis=0)-x
x_reduced=x+diff_x_1d/2.
#Differentiating reduces by 1 data point#
else:
diff_x_1d=numpy.diff(x)
x_reduced=x[:-1]+diff_x_1d/2.
##Get *x* array ready for broadcasting correctly when doing *diff_y/diff_x*##
new_shape=[1 for dim in range(y.ndim)]
new_shape[axis]=len(diff_x_1d)
diff_x=numpy.resize(diff_x_1d,new_shape)
if interpolation_chosen==True:
#First make sure x is in uniformly increasing order#
Logger.raiseException('If interpolating to original values, *x* must be in uniformly increasing order.',\
unless=(diff_x_1d>0).all(), exception=ValueError)
#linearly expand x by 1 point in both directions to permit later interpolation at edges#
#this value only used in interpolation function
#Expand analytically if x_reduced has length
x_enlarged=numpy.concatenate( ([x[0]-diff_x_1d[0]/2.],\
x_reduced,\
[x[-1]+diff_x_1d[-1]/2.]) )
###Step 2. ###
#Differentiate y
#Differentiating reduces by 1 data point
if wrap: diff_y=numpy.roll(y,1,axis=axis)-y
else: diff_y=numpy.diff(y,axis=axis)
dydx_reduced=diff_y/diff_x #broadcasting on *diff_x* works as needed
if interpolation_chosen==True:
###Step 3: ###
#linearly expand derivative *dydx_reduced* by 1 point in both directions along *axis* to permit later interpolation at edges#
#If there literally is no derivative, make one up (zeros)
if len(dydx_reduced)==0:
shape=list(y.shape)
shape[axis]=2
dydx_enlarged=numpy.zeros(shape)
else:
edge_slicer_bottom=[None for dim in range(y.ndim)]
edge_slicer_top=copy.copy(edge_slicer_bottom)
edge_slicer_bottom[axis]=0; edge_slicer_top[axis]=-1
#this value only used in interpolation function
dydx_enlarged=numpy.concatenate((get_array_slice(dydx_reduced,edge_slicer_bottom,squeeze=False),\
dydx_reduced,\
get_array_slice(dydx_reduced,edge_slicer_top,squeeze=False)),\
axis=axis)
###Step 4. ###
#interpolate back to original x values#
try:
dydx_enlarged=numpy.swapaxes(dydx_enlarged,axis,-1) #We swap axes because interp1d has a bug for axis!=-1
interpolator=interpolator_object(x_enlarged,dydx_enlarged,kind=interpolation,axis=-1,\
copy=False,bounds_error=True)
dydx=interpolator(x)
dydx=numpy.swapaxes(dydx,axis,-1)
except:
Logger.logNamespace(locals())
print('X:',x_enlarged)
print('dY/dX:',dydx_enlarged)
raise
#*x* unchanged, likewise *diff_x*
###If not interpolating, keep x, y reduced###
else:
x=x_reduced
dydx=dydx_reduced
if isinstance(y,baseclasses.AWA): axes=y.axes; axis_names=y.axis_names
else: axes=[None]*y.ndim; axis_names=None
axes[axis]=x
return baseclasses.AWA(dydx,axes=axes,axis_names=axis_names)
def gradient(y,axes=None,order=1,interpolation='linear'):
"""
Compute the gradient of an array *y* with respect
to each of its axes {*x1,x2,...*}, and provide the
axes values at which the elements of each component
of the gradient is computed.
OUTPUTS:
(g1(x'),g2(x'),...)
Here *g1*, etc. indicates the first, second, and
each component of the gradient. Primed *x'* return values
correspond to axis coordinates at which each component
is computed. If interpolation is used, the primed
coordinates are identical to the input axis coordinates,
otherwise central difference coordinates are used.
INPUTS:
*axes: (xs1, xs2,...)
A 1-D axis iterable should be provided for
each dimension of the array *y*. Alternatively,
omit {*x1,x2,...*} and the index values at each
point in *y* will be used as appropriate.
*order: an integer specifying the order of the
derivative to be taken in each component.
DEFAULT: 1 (first derivative)
*interpolation: specify the interpolation to be used
when calculating derivatives. Must be one of
True, False, 'linear', 'quartic', 'cubic'. If
interpolation is used, the returned axis
coordinates are identical to the input axis
coordinates.
DEFAULT: True (linear interpolation)
"""
if not isinstance(y,numpy.ndarray): y=numpy.array(y)
##Determine axes
if axes is None:
if isinstance(y,baseclasses.ArrayWithAxes): axes=y.axes
else: axes=[None]*y.ndim
##If *y* has 1 dimension and *axes* is not a list, interpret as single axis array#
elif y.ndim==1 and isinstance(axes,numpy.ndarray):
if axes.ndim==1: axes=[axes]
##Check each axis##
for dim in range(y.ndim):
if dim>=len(axes): axes.append(numpy.arange(y.shape[dim])); continue
elif axes[dim] is None: axes[dim]=numpy.arange(y.shape[dim]); continue
try: axes[dim]=numpy.array(axes[dim])
except TypeError:
Logger.raiseException('The axis provided for dimension %s of *y* cannot '%dim+\
'be cast as an array.',exception=TypeError)
Logger.raiseException('The axis provided for dimension %s of *y* must be '%dim+\
'one-dimensional with length %s.'%y.shape[dim],\
unless=(axes[dim].shape==(y.shape[dim],)),\
exception=ValueError)
#####Get gradient#####
grad=[]
for i in range(y.ndim):
derivative=differentiate(y,x=axes[i],axis=i,order=order,interpolation=interpolation)
grad.append(derivative)
return tuple(grad)
def zeros(y,axes=None):
"""
Compute the position of the zeros of an array. Zeros
are defined as coordinate positions where:
1) The array value is identically zero, or
2) array values cross the zero line (the position
is inferred through linear interpolation)
INPUTS:
*Non-keywords: non-keyword values should be
passed in the following way:
*zeros(x1,x2,...,y)*
A 1-D axis iterable should be provided for
each dimension of the array *y*. Alternatively,
omit {*x1,x2,...*} and the index values at each
point in *y* will be used as appropriate.
OUTPUTS:
A list of point coordinates of the form:
((x1,y1,...), (x2,y2,...), ...)
Each coordinate in the list has as many elements
as the number of dimensions in the input array *y*.
"""
#####Check all inputs#####
if not isinstance(y,numpy.ndarray): y=numpy.array(y)
##Determine axes
if axes is None:
if isinstance(y,baseclasses.ArrayWithAxes): axes=y.axes
else: axes=[None]*y.ndim
##If *y* has 1 dimension and *axes* is not a list, interpret as single axis array#
elif y.ndim==1 and isinstance(axes,numpy.ndarray):
if axes.ndim==1: axes=[axes]
##Check each axis##
for dim in range(y.ndim):
if dim>=len(axes): axes.append(numpy.arange(y.shape[dim])); continue
elif axes[dim] is None: axes[dim]=numpy.arange(y.shape[dim]); continue
try: axes[dim]=numpy.array(axes[dim])
except TypeError:
Logger.raiseException('The axis provided for dimension %s of *y* cannot '%dim+\
'be cast as an array.',exception=TypeError)
Logger.raiseException('The axis provided for dimension %s of *y* must be '%dim+\
'one-dimensional with length %s.'%y.shape[dim],\
unless=(axes[dim].shape==(y.shape[dim],)),\
exception=ValueError)
x_values=axes
####The calculation that follows uses interpolation, which we can't do if
#any dimension is trivially of length 1 - so we fudge it to length 2####
resized_y=copy.copy(y)
resized_x_values=copy.copy(x_values)
for axis in range(y.ndim):
if resized_y.shape[axis]==1:
new_shape=list(copy.copy(resized_y.shape))
new_shape[axis]=2
resized_y=numpy.resize(resized_y,tuple(new_shape))
resized_x_values[axis]=[x_values[axis][0]-.5,x_values[axis][0]+.5]
#Now we're OK for interpolation#
######1) First, identify where zeros should reside based on values that cross the zero line######
#####Calculate derivatives at interpolated grid points#####
#This is necessary to compare "apples to apples", since the only
#way to identify a zero line crossing is to evaluate derivatives
#of *sign(y)* at "in-between" points and compare along each axis.
#However each derivative must be computed at identical points.
#So, we compute a new *y* interpolated at every axis EXCEPT the
#derivative axis, so that in the end every derivative will end
#up being evaluated along an identical grid, good for comparison
derivatives=[]
interp_x_values=[]
for axis in range(y.ndim):
###Compute interpolated grid###
#Iterate over interpolation directions
#and interpolate##
interp_y=copy.copy(y)
for axis2 in range(y.ndim):
#Don't interpolate to "in-between" points along derivative axis#
if axis2==axis: continue
###Compute interpolated y grid###
reducer=[None]*interp_y.ndim
reducer[axis2]=[0,-1] #this will lop off the end point along *axis2*
dy=numpy.diff(interp_y,axis=axis2)
interp_y=get_array_slice(interp_y,reducer) #lopped
interp_y=interp_y+dy/2. #now we extend to midpoint towards the endpoint we lopped off
###Compute interpolated x axis###
current_x=copy.copy(resized_x_values[axis])
interp_x=current_x[0:-1]
dx=numpy.diff(current_x,axis=0)
interp_x=interp_x+dx/2.
###Compute derivative of *sign*###
#Now we have interpolated y grid for this derivative#
derivative=differentiate(numpy.sign(interp_y),order=1,interpolation=False,axis=axis)[1]
derivatives.append(derivative)
interp_x_values.append(interp_x)
###Find where sign of 1st deriv changes in *any* dimension###
#Iterating through dimensions and adding boolean arrays is logical *or* operation#
for i in range(len(derivatives)): #All derivatives should be of uniform shape, evaluated at the same grid
if i==0: interp_zeros=(numpy.abs(derivatives[i])>1) #changing from a finite value to 0 is insignificant, we need a *delta* of 2
else: interp_zeros+=(numpy.abs(derivatives[i])>1)
###Create array of linear indices###
from numpy.lib.index_tricks import unravel_index
all_lin_indices=numpy.arange(numpy.product(interp_zeros.shape)) #a single line of incrementing integers
all_lin_indices.resize(interp_zeros.shape) #reshaped as an array of memory-indices
###Get indices###
lin_indices=all_lin_indices[interp_zeros] #Index by an array of boolean values --> 1-D output
indices=[]
coordinates=[]
####A particular linear index corresponds to a particular point in the array adjacent to where a zero resides####
for lin_index in lin_indices:
###This is the index coordinate for this linear index####
index_set=unravel_index(lin_index,interp_zeros.shape)
coordinate_set=[]
for axis in range(len(index_set)):
axis_index=index_set[axis]
coordinate_set.append(interp_x_values[axis][axis_index])
indices.append(tuple(index_set))
coordinates.append(tuple(coordinate_set))
######2) Next, let's include the indices where the y-value actually is zero#####
all_lin_indices=numpy.arange(numpy.product(y.shape)) #a single line of incrementing integers
all_lin_indices.resize(y.shape) #reshaped as an array of memory-indices
lin_indices=all_lin_indices[y==0]
####A particular linear index corresponds to a particular point in the array that is identically zero####
for lin_index in lin_indices:
###This is the index coordinate for this linear index####
index_set=unravel_index(lin_index,y.shape)
coordinate_set=[]
for axis in range(len(index_set)):
axis_index=index_set[axis]
coordinate_set.append(x_values[axis][axis_index])
indices.append(tuple(index_set))
coordinates.append(tuple(coordinate_set))
####At last, we have everything that could be called a zero point#####
coordinates.sort(cmp=sequence_cmp) #sort values in some way (well, at least it's sorted by the first entry)
indices.sort(cmp=sequence_cmp)
return {'indices':tuple(indices),'coordinates':tuple(coordinates)}
def extrema(y,axes=None):
global Dy_signs,DDy_signs,where_maxima,where_minima,where_saddle,inner_index_grids
if axes is None and isinstance(y,baseclasses.ArrayWithAxes):
y=y.sort_by_axes()
axes=y.axes
else:
try: y=baseclasses.ArrayWithAxes(y,axes=axes)
except IndexError:
Logger.raiseException('If provided, `axes` must be an iterable of arrays sized to each dimension of `y`.',\
exception=IndexError)
### The only significant dimensions are those where size of `y` is more than two ###
sig_dims=[dim if y.shape[dim]>2 else 0 for dim in range(y.ndim)]
Logger.raiseException('`y` must have length greater than 2 along at least one axis.',\
exception=ValueError,unless=sig_dims)
index_grids=y.get_index_grids(broadcast=True) # Shape is 1 except along associated index
inner_index_grids=[baseclasses.get_array_slice(index_grids[dim], [1,-1],\
squeeze=False, axis=dim) for dim in sig_dims]
### Get array values at interior points (where local extrema are defined) ###
inner_y=y
for dim in sig_dims: inner_y=baseclasses.get_array_slice(inner_y, [1,-1],\
squeeze=False, axis=dim)
def collapse_to_interstices(arr,along_axes):
for axis in along_axes:
arr0=baseclasses.get_array_slice(arr, [0,-1], squeeze=False, axis=axis)
arr=arr0+numpy.diff(arr,axis=axis)/2.
return arr
### Evaluate first and second derivatives at interstices ###
Dys=[collapse_to_interstices(numpy.diff(y,axis=i),\
along_axes=set(sig_dims)-set((i,))) \
for i in sig_dims]
Dy_signs=[numpy.where(Dys[dim]>0,1,Dys[dim]) for dim in sig_dims] #Reduce to 1 where positive
Dy_signs=[numpy.where(Dy_signs[dim]<0,-1,Dy_signs[dim]) for dim in sig_dims] #Reduce to -1 where negative
# We've gone from evaluation at interstices to interior points only #
# Equals 1 where concavity along that axis is positive, 1 otherwise, zero when derivative has not changed signs
DDy_signs=numpy.array([collapse_to_interstices(numpy.diff(Dy_signs[dim],axis=dim),\
along_axes=set(sig_dims)-set((dim,))) \
for dim in sig_dims])
## Conditions for extrema ##
# local maximum requires DDy simultaneously <0 along all axes
# local minimum requires DDy simultaneously >0 along all axes
# local saddle requires (otherwise) abs(DDy)>0 along all axes
where_maxima=numpy.prod(DDy_signs<0,axis=0).astype(bool)
where_minima=numpy.prod(DDy_signs>0,axis=0).astype(bool)
#Saddle point condition is based on indeterminacy of Hessian matrix,
# but its eigenvalues are not an analytic function of the second
# derivatives for general n-dimensional data.
## Convert conditions into corresponding index, axis, and array values ##
d={}
axes=y.axes
for name,condition in zip(['maxima','minima'],\
[where_maxima,where_minima]):
## Suppose we don't have condition met anywhere. Empty lists. ##
if not condition.any():
d[name]={'values':[],\
'indices':[],\
'coordinates':[]}
continue
#Get partial lists of values, only with entries for significant dimensions
array_values=inner_y[condition]
partial_index_values=[inner_index_grid[condition] for inner_index_grid in inner_index_grids]
N=len(partial_index_values[0])
sig_dim=0
axis_values=[]; index_values=[]
for dim in range(y.ndim):
#If we want the coordinates along insignificant dimension, use 0,
# since the location of extrema is not well defined along such an axis
if dim not in sig_dims:
index_values.append(N*[0])
else:
index_values.append(partial_index_values[sig_dim])
sig_dim+=1
axis_values.append(axes[dim][index_values[-1]])
# Get tuples and sort them by array value #
index_tuples=list(zip(*index_values))
axis_tuples=list(zip(*axis_values))
sorted_values=sorted(zip(array_values,index_tuples,axis_tuples)) #Comparison performed on first entry
if name!='minima': sorted_values.reverse() #Want maximum value first, unless we are sorting minima
array_values,index_tuplesl,axis_tuples=list(zip(*sorted_values)) #Unpack the sorted list
d[name]={'values':array_values,\
'indices':index_tuples,\
'coordinates':axis_tuples}
return d
"""
def extrema(y,axes=None,**kws):
from scipy.interpolate import LinearNDInterpolator
####Use the same prescription as in *gradient* to expand y and axes####
if not isinstance(y,numpy.ndarray): y=numpy.array(y)
##Determine axes
if axes is None:
if isinstance(y,baseclasses.ArrayWithAxes): axes=y.axes
else: axes=[None]*y.ndim
##If *y* has 1 dimension and *axes* is not a list, interpret as single axis array#
elif y.ndim==1 and isinstance(axes,numpy.ndarray):
if axes.ndim==1: axes=[axes]
##Check each axis##
for dim in range(y.ndim):
if dim>=len(axes): axes.append(numpy.arange(y.shape[dim])); continue
elif axes[dim] is None: axes[dim]=numpy.arange(y.shape[dim]); continue
try: axes[dim]=numpy.array(axes[dim])
except TypeError:
Logger.raiseException('The axis provided for dimension %s of *y* cannot '%dim+\
'be cast as an array.',exception=TypeError)
Logger.raiseException('The axis provided for dimension %s of *y* must be '%dim+\
'one-dimensional with length %s.'%y.shape[dim],\
unless=(axes[dim].shape==(y.shape[dim],)),\
exception=ValueError)
#Turn y into an
y=baseclasses.ArrayWithAxes(y,axes=axes)
x_values=axes
#####Get gradient#####
#Gradient, no interpolation REDUCES points by 1 on each axis
g=gradient(y,axes,order=1,interpolation=None)
ndim=len(g)
####Decide on dims to include in analysis####
if kws.has_key('scan_dims'):
scan_dims=kws['scan_dims']
###Check input scan_dims###
misc.check_vars(scan_dims,int)
if not hasattr(scan_dims,'__len__'):
try: scan_dims=list(scan_dims)
except: scan_dims=[scan_dims]
###Make all input dims positive###
for i in range(len(scan_dims)):
scan_dims[i]=scan_dims[i]%ndim
else:
###Pick default - all axes###
scan_dims=range(ndim)
dgdx=[]
new_x_values=axes #x-values will be retained
for axis in scan_dims:
###Collect component of gradient for this axis###
s_reduced=numpy.sign(g[axis])
x_reduced=new_x_values[axis]
####Interpolate outwards to two extra points along *axis*####
###If gradient for this component is non-existent:###
if len(x_reduced)==0:
##Enlarge *s*
#Extend outwrds to ENLARGE points by 2 on axis by making new array
shape=list(s_reduced.shape)
shape[axis]+=2
s_enlarged=numpy.zeros(tuple(shape)) #all zeros anyway
##Enlarge *x*
#If we have x-values to start with for this axis:
try: x_enlarged=[x_values[axis][0],x_values[axis][0]+1] #it's meaningless what the second point is, it's interpolated from nothing
#If we don't:
except IndexError: x_enlarged=[0,1]
###Or gradient is full, so compute real values###
else:
##Enlarge *s*##
#Extend outwards to ENLARGE points by 2 on axis##
edge_slicer_bottom,edge_slicer_top=[None]*s_reduced.ndim,[None]*s_reduced.ndim
edge_slicer_bottom[axis]=0; edge_slicer_top[axis]=-1
s_enlarged=numpy.concatenate((get_array_slice(s_reduced,edge_slicer_bottom,squeeze=False),\
s_reduced,\
get_array_slice(s_reduced,edge_slicer_top,squeeze=False)),\
axis=axis)
##Enlarge *x*##
#If x has 2 or more elements:
try:
diff_x_bottom=x_reduced[1]-x_reduced[0]
diff_x_top=x_reduced[-1]-x_reduced[-2]
#Otherwise, go here:
except IndexError:
#Bogus differential values#
diff_x_bottom,diff_x_top=1,1
x_enlarged=numpy.concatenate(([x_reduced[0]-diff_x_bottom],x_reduced,[x_reduced[-1]+diff_x_top]))
#Compute second derivative discretely at dydx=0 points for each x
#no interpolation REDUCES points by 1 on each axis
deriv=differentiate(s_enlarged,x=x_enlarged,axis=axis,interpolation=None)
dgdx.append(deriv) #must diff. with respect to correct axis, could result in sign flip
#new dgdx has same shape as original array, same x values
#extremal points must now ==+/-1 at their locations in this array
#Interpolate y out to `x_enlarged`
#y=y.interpolate_axis(x_enlarged,axis=axis,bounds_error=False,extrapolate=True)
###Find where sign of 2nd deriv changes in *all* dimensions###
#Iteration through dimensions and multiplying boolean arrays is logical *and* operation#
for i in range(len(dgdx)):
if i==0:
minima=(dgdx[i]>0)
maxima=(dgdx[i]<0)
else:
minima*=(dgdx[i]>0)
maxima*=(dgdx[i]<0)
prod=numpy.product(numpy.array(dgdx),axis=0) #multiply components, collected in first "axis" of pseudo-array
#*abs(bool-1)* equivalent to element-wise negation
#multiplication equivalent to element-wise "and" operator
saddle=(prod!=0)*(numpy.abs(minima-1))*(numpy.abs(maxima-1))
#convert saddle points to boolean instead of 0,1 values
saddle=(saddle!=0)
###Create array of linear indices###
from numpy.lib.index_tricks import unravel_index
all_lin_indices=numpy.arange(numpy.product(prod.shape)) #a single line of incrementing integers
all_lin_indices.resize(prod.shape) #reshaped as an array of memory-indices
###Get indices/values at each minimum, maximum, saddle###
all_indices=[]
all_coordinates=[]
all_values=[]
for extrema_type in [minima,maxima,saddle]:
#Index by an array of boolean values --> 1-D output
this_type_lin_indices=all_lin_indices[extrema_type]
this_type_indices=[]
this_type_coordinates=[]
this_type_values=[]
####A particular linear index corresponds to a particular point in the array for this extremum type####
for this_type_lin_index in this_type_lin_indices:
###This is the index coordinate for this linear index####
this_type_index_set=unravel_index(this_type_lin_index,prod.shape)
###Populate axis coordinates###
this_coordinate_set=[]
for axis in range(len(this_type_index_set)):
axis_index=this_type_index_set[axis]-1 #subtract 1 since we enlarged the array by 1
#perhaps we don't have a set of x_values to draw from in *x_values*
try: this_coordinate_set.append(x_values[axis][axis_index])
#indeed we don't, just use axis index
except IndexError: this_coordinate_set.append(axis_index)
try:
this_type_indices.append(this_type_index_set)
this_type_coordinates.append(tuple(this_coordinate_set))
#Interpolate our original y-values back to the coordinate from the expanded, incommensurate grid
yinterp=y
for i in range(y.ndim): yinterp=yinterp.interpolate_axis(this_coordinate_set[-1-i],axis=-1-i,bounds_error=False)
this_type_values.append(yinterp)
except: continue
all_indices.append(this_type_indices)
all_coordinates.append(this_type_coordinates)
all_values.append(this_type_values)
###Sort by value###
min_indices,min_coordinates,min_values=all_indices[0],all_coordinates[0],all_values[0]
max_indices,max_coordinates,max_values=all_indices[1],all_coordinates[1],all_values[1]
saddle_indices,saddle_coordinates,saddle_values=all_indices[2],all_coordinates[2],all_values[2]
#A comparator for ordering the maxima in decreasing order#
def descending(a,b):
if a>b: return -1
elif a==b: return 0
else: return 1
print min_values,min_indices,min_coordinates
max_values,max_indices,max_coordinates=misc.sort_by(max_values,max_indices,max_coordinates,cmp=descending)
min_values,min_indices,min_coordinates=misc.sort_by(min_values,min_indices,min_coordinates,cmp=cmp)
return {'minima':{'indices':min_indices,'coordinates':min_coordinates,'values':min_values},\
'maxima':{'indices':max_indices,'coordinates':max_coordinates,'values':max_values},\
'saddle':{'indices':saddle_indices,'coordinates':saddle_coordinates,'values':saddle_values}}
"""
def convolve_periodically(in1,in2):
"""Perform a periodic (wrapping) convolution of the two 2-D inputs. They
must both be rank-1 real arrays of the same shape."""
from numpy.fft import fft,ifft
s1=fft(fft(in1,axis=0),axis=1)
s2=fft(fft(in2,axis=0),axis=1)
out=ifft(ifft(s1*s2,axis=0),axis=1).real
return out#/numpy.sqrt(numpy.prod(out.shape))
def convolve_periodically_new(in1,in2):
"""Perform a periodic (wrapping) convolution of the two 2-D inputs. They
must both be rank-1 real arrays of the same shape over the same manifold.
Result is normalized to represent a convolution sum. To represent an
integral, result should be multiplied by `dx*dy`, where `dx` and `dy` are
respective integral measures of `x` and `y` pixels."""
from numpy.fft import fft,ifft,fftshift,ifftshift
s1=in1; s2=in2
for i in range(2):
s1=fftshift(fft(s1,axis=i),axes=i)
s2=fftshift(fft(s2,axis=i),axes=i)
out=s1*s2
for i in range(2):
out=ifft(ifftshift(out,axes=i),axis=i)
#s1=fftshift(fft(shift(fft(fft(in1,axis=0),axis=1)
#s2=fft(fft(in2,axis=0),axis=1)
#out=ifft(ifft(s1*s2,axis=0),axis=1).real #Directly from the convolution theorem
return out.real#*numpy.prod(out.shape)
def _downcasting_spectrum_method_(old_method):
##We don't want to redefine *view*, since redefinitions call *view*##
if old_method.__name__ is 'view': return old_method
def new_method(*args,**kwargs):
##Obtain result of bound method##
result=old_method(*args,**kwargs)
##If result is another *Spectrum*, check that we have frequencies##
if isinstance(result,Spectrum):
#No frequencies, so downcast - no longer a true *Spectrum*#
if len(result.get_frequency_dimensions())==0:
return result.view(baseclasses.ArrayWithAxes)
##If result type is appropriate##
return result
##Make new method attributes mirror the input method##
for attr_name in dir(old_method):
try: setattr(new_method,attr_name,getattr(old_method,attr_name))
except: pass
setattr(new_method,'__name__',old_method.__name__)
return new_method
class Spectrum(baseclasses.ArrayWithAxes):
"""Acquire the spectrum of an input N-dimensional `source` along an
axis of your choosing. The result is a subclass of
<common.baseclasses.ArrayWithAxes>, with additional methods added for the
manipulation and display of spectral characteristics. Unlike the conventional
FFT, the resultant object is consistent with Parseval's theorem for the
Fourier transform, which like all unitary transformations satisfies:
integrate(|source|^2*dt) = integrate(|spectrum|^2*df)
In particular, the action of `Spectrum.get_inverse` returns to the original
source (internally, via the IFFT) with appropriate normalization to ensure
complete identity."""
####Overload all inherited methods with downcasting equivalent####
attr_names=dir(baseclasses.ArrayWithAxes)
for attr_name in attr_names:
method=getattr(baseclasses.ArrayWithAxes,attr_name)
##Only decorate callable methods##
if not isinstance(method,types.MethodType): continue
locals()[attr_name]=_downcasting_spectrum_method_(method)
def __new__(cls,source,axis=None,duration=None,\
fband=None,n=None,\
power=False,normalized=False,\
verbose=True,axes=None,axis_names=None,\
per_sample=False,\
window=None):
fourier_transform=True
####If new axes or axis names supplied, add to source####
if axes!=None or axis_names!=None:
if isinstance(source,baseclasses.ArrayWithAxes): source.set_axes(axes=axes,axis_names=axis_names)
else: source=baseclasses.ArrayWithAxes(source,axes=axes,axis_names=axis_names)
####If input is already a spectrum class, no need to Fourier xform unless requested####
if isinstance(source,Spectrum) and axis is None:
spectrum=source
fourier_transform=False
####We also accept *ArrayWithAxes* with frequency axes####
elif isinstance(source,baseclasses.ArrayWithAxes):
##If frequency dimensions can be identified##
if (axis is None) and True in ['Frequency' in axis_name for axis_name in source.axis_names]:
spectrum=source.view(Spectrum)
fourier_transform=False
elif 'Frequency' in source.axis_names[axis]:
spectrum=source.view(Spectrum)
fourier_transform=False
####Get spectrum and cast as new type####
if fourier_transform:
if axis is None: axis=0 #We need a default
###Require axis###
Logger.raiseException('Frequency axes can not be discerned for *source* '+\
'in its provided form. Please provide either an '+\
'*ArrayWithAxes* instance with one or more axes labeled '+\
'"... Frequency", or specify an *axis* along which to Fourier '+\
'transform.', unless=isinstance(axis,int),\
exception=TypeError)
###Check inputs###
assert isinstance(duration,number_types+(type(None),))
###See if we need to interpolate for homogeneous *axis*###
axis=axis%source.ndim
interpolate_axis=False
if not isinstance(source,baseclasses.ArrayWithAxes):
source=baseclasses.ArrayWithAxes(source,verbose=False)
else:
#*diff* must be homogeneously increasing/decreasing if we don't have to interpolate
#We make the margin of error for inhomogeneities 1/10000.
time_values=source.axes[axis]
diff=numpy.diff(time_values)
inhomogeneous=(numpy.abs(diff-diff[0])/diff[0]>=1e-5)
if True in inhomogeneous: interpolate_axis=True
axes=source.axes
axis_names=source.axis_names
###Interpolate axis to equi-spaced points if homogeneity of axis is uncertain###
if interpolate_axis:
try: from scipy.interpolate import interp1d
except ImportError: Logger.raiseException('The module <scipy> is required for interpolation to homogeneous '+\
'samples along *axis*.',exception=ImportError)
Logger.write('Interpolating to evenly-spaced samples along *axis*.')
interpolator=interp1d(axes[axis],source,axis=axis)
##Produce homogeneous axis, interpolate across it, and store it##
homog_axis=numpy.linspace(axes[axis][0],axes[axis][-1],len(axes[axis]))
source=interpolator(homog_axis)
axes[axis]=homog_axis
###Define axis, duration, nsamples###
nsamples=source.shape[axis]
if n is None: n=nsamples
if duration is None: duration=axes[axis][-1]-axes[axis][0]
###Apply window###
if window:
if hasattr(window,'__call__'):
window=window(source.shape[axis])
else:
Logger.raiseException("Window must be one of 'hanning', 'hamming', 'bartlett', 'blackman'.",\
unless=(window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']),\
exception=ValueError)
window=getattr(numpy,window)(nsamples)
window*=nsamples/numpy.sum(window)
window_shape=[1,]*source.ndim
window_shape[axis]=len(window)
window.resize(window_shape)
source=source*window
###Acquire spectrum###
dt=duration/float(nsamples-1)
spectrum=numpy.fft.fft(source,axis=axis,n=n)
frequencies=numpy.fft.fftfreq(n=n,d=dt) #duration specified as a window per-sample
###Unpack "standard" fft packing###
spectrum=numpy.fft.fftshift(spectrum,axes=[axis])
frequencies=numpy.fft.fftshift(frequencies)
###Re-normalize to maintain original total power in power spectrum###
#Before we do anything, default behavior of FFT is to ensure:
# sum(abs(spectrum)**2)=sum(abs(source)**2)*n_FFT
#The following normalization ensures Parseval's theorem applies
# identically between the function and its spectrum inside their
# respective integration domains
spectrum*=cls.parseval_consistent_prefactor(dt)
#This additional normalization will make the spectrum
# describe the amplitude of power spectral density per
# sample of the input
if per_sample: spectrum/=numpy.sqrt(nsamples)
###Cast result as spectrum class###
spectrum=super(cls,cls).__new__(cls,spectrum)
##Set up new axes##
axes[axis]=frequencies
axis_names[axis]+=' Frequency'
spectrum.set_axes(axes,axis_names,verbose=verbose)
##Since we took Fourier transform, it's obvious this is not a power or normalized spectrum##
power=False
normalized=False
###Impose frequency limits if provided###
if fband!=None and axis!=None:
spectrum=spectrum.impose_fband(fband,axis=axis,closest=True)
###Set status as power/normalized/per-sample spectrum##
if power:
spectrum[:]=numpy.real(spectrum)
spectrum._power_=True
if normalized:
spectrum._normalized_=True
if per_sample:
spectrum._per_sample_=True
return spectrum
def __array_finalize__(self,obj):
##First use super class finalization##
super(Spectrum,self).__array_finalize__(obj)
##Include some tags about the character of the spectrum##
if not hasattr(self,'_normalized_'):
##Can inherit tag from parent##
if isinstance(obj,Spectrum): self._normalized_=obj._normalized_
else: self._normalized_=False
if not hasattr(self,'_power_'):
##Can inherit tag from parent##
if isinstance(obj,Spectrum): self._power_=obj._power_
else: self._power_=False
if not hasattr(self,'_per_sample_'):
##Can inherit tag from parent##
if isinstance(obj,Spectrum): self._per_sample_=obj._per_sample_
else: self._per_sample_=False
def __getattribute__(self,attr_name):
method_mapping={'power':'get_power_spectrum',\
'folded':'fold_frequencies',\
'frequency_dims':'get_frequency_dimensions',\
'geometric_dims':'get_geometric_dimensions',\
'frequency_axes':'get_frequency_axes',\
'geometric_axes':'get_geometric_axes',\
'frequency_axis_names':'get_frequency_axis_names',\
'geometric_axis_names':'get_geometric_axis_names',\
'phase':'get_phase',\
'geometric_mean':'get_geometric_mean',\
'geometric_min':'get_geometric_min',\
'geometric_max':'get_geometric_max',\
'geometric_sum':'get_geometric_sum',\
'geometric_integral':'get_geometric_integral',\
'inverse':'get_inverse'}
for key in list(method_mapping.keys()):
if attr_name==key:
method=getattr(self,method_mapping[key])
return method()
return super(Spectrum,self).__getattribute__(attr_name)
##Overload *set_axes* in order to make sure frequency axes are preserved##
def set_axes(self,axes=None,axis_names=None,\
verbose=True,intermediate=False):
##Vet *axis_names* to preserve frequency axes##
if not intermediate and hasattr(axis_names,'__len__'):
axis_names=list(axis_names)
while len(axis_names)<self.ndim: axis_names+=[None]
##Only pay attention to names that correspond with current frequency dim##
frequency_dims=self.get_frequency_dimensions()
for dim in frequency_dims:
##Check axis name##
provided_name=axis_names[dim]
if isinstance(provided_name,str):
if not 'Frequency' in provided_name:
axis_names[dim]+=' Frequency'
if verbose:
Logger.write('%s:\n'%type(self)+\
'\tAxis name "%s" will be coerced to "%s" '%(provided_name,axis_names[dim])+\
'to reflect that dimension %s is a frequency dimension.'%dim)
return super(Spectrum,self).set_axes(axes=axes,axis_names=axis_names,\
verbose=verbose,intermediate=intermediate)
@staticmethod
def parseval_consistent_prefactor(dt=None,fmax=None):
if dt is not None: return dt
elif fmax is not None: return 1/(fmax*2) #twice Nyquist frequency gives original sampling interval `dt`
def is_power_spectrum(self):
return self._power_
def is_normalized(self):
return self._normalized_
def is_per_sample(self):
return self._per_sample_
def adopt_attributes(self,other):
Logger.raiseException('`other` must be another `Spectrum` instance in order to adopt attributes.',\
exception=TypeError,unless=isinstance(other,Spectrum))
self._power_=other.is_power_spectrum()
self._normalized_=other.is_normalized()
self._per_sample_=other.is_per_sample()
def get_frequency_dimensions(self):
axis_names=self.get_axis_names()
frequency_dims=[]
for i in range(self.ndim):
if 'Frequency' in axis_names[i]: frequency_dims.append(i)
return frequency_dims
def get_geometric_dimensions(self):
all_dims=set(range(self.ndim))
frequency_dims=set(self.get_frequency_dimensions())
geometric_dims=list(all_dims-frequency_dims)
return geometric_dims
def get_frequency_axes(self):
frequency_dims=self.get_frequency_dimensions()
axes=self.get_axes()
return [axes[dim] for dim in frequency_dims]
def get_geometric_axes(self):
geometric_dims=self.get_geometric_dimensions()
axes=self.get_axes()
return [axes[dim] for dim in geometric_dims]
def get_frequency_axis_names(self):
frequency_dims=self.get_frequency_dimensions()
axis_names=self.get_axis_names()
return [axis_names[dim] for dim in frequency_dims]
def get_geometric_axis_names(self):
geometric_dims=self.get_geometric_dimensions()
axis_names=self.get_axis_names()
return [axis_names[dim] for dim in geometric_dims]
def get_power_spectrum(self):
if self.is_power_spectrum(): return self
else:
##Get magnitude squared for power
power_spectrum=numpy.abs(self)**2
power_spectrum._power_=True
return power_spectrum
def get_phase(self,unwrap=True,discont=numpy.pi):
phase=numpy.angle(self) #return radians
#Try to make continuous#
if unwrap:
try:
for axis in self.frequency_dims: phase=numpy.unwrap(phase,axis=axis,discont=discont)
except ImportError: pass
#Something stupid happens here if the *imag(...)* function passes parent as
#an ndarray object to *__array_finalize__*, attributes aren't inherited - so force it
if not isinstance(phase,Spectrum): phase=0*self+phase #Inherit from parent dammit!!
phase._power_=False
phase[numpy.isnan(phase)]=0 #Define nan phase to be zero
return phase.astype(float)
def get_geometric_mean(self):
##Sum along each geometric dimension##
geometric_dims=self.get_geometric_dimensions()
geometric_dims.reverse() #Reverse ordering so affecting posterior dimensions won't affect positions of anterior axes
geometric_mean=self
for dim in geometric_dims: geometric_mean=geometric_mean.mean(axis=dim)
return geometric_mean
def get_geometric_max(self):
##Sum along each geometric dimension##
geometric_dims=self.get_geometric_dimensions()
geometric_dims.reverse() #Reverse ordering so affecting posterior dimensions won't affect positions of anterior axes
geometric_max=self
for dim in geometric_dims: geometric_max=geometric_max.max(axis=dim)
return geometric_max
def get_geometric_min(self):
##Sum along each geometric dimension##
geometric_dims=self.get_geometric_dimensions()
geometric_dims.reverse() #Reverse ordering so affecting posterior dimensions won't affect positions of anterior axes
geometric_min=self
for dim in geometric_dims: geometric_min=geometric_min.min(axis=dim)
return geometric_min
def get_geometric_sum(self):
##Sum along each geometric dimension##
geometric_dims=self.get_geometric_dimensions()
geometric_dims.reverse() #Reverse ordering so affecting posterior dimensions won't affect positions of anterior axes
geometric_sum=self
for dim in geometric_dims: geometric_sum=geometric_sum.sum(axis=dim)
##Reset axes##
geometric_sum.set_axes(self.get_frequency_axes(),\
self.get_frequency_axis_names())
geometric_sum=Spectrum(geometric_sum,power=self.is_power_spectrum(),\
normalized=self.is_normalized())
return geometric_sum
def get_geometric_integral(self,integration=None):
##Use default integration scheme##
if integration is None:
from scipy.integrate import trapz as integration
##Integrate along each geometric dimension##
shape=self.shape
geometric_dims=self.get_geometric_dimensions()
geometric_dims.reverse() #Reverse ordering so affecting posterior dimensions won't affect positions of anterior axes
geometric_integral=self
for dim in geometric_dims: geometric_integral=geometric_integral.integrate(axis=dim,integration=integration)
##Reset axes##
#result of integration is likely *ndarray*, so cast as *ArrayWithAxes*#
geometric_integral=baseclasses.ArrayWithAxes(geometric_integral,\
axes=self.get_frequency_axes(),\
axis_names=self.get_frequency_axis_names())
geometric_integral=Spectrum(geometric_integral,power=self.is_power_spectrum(),\
normalized=self.is_normalized())
return geometric_integral
def get_inverse(self,axis=None,n=None,origin=None,offset=None):
"""
`offset` will be applied to the axis values along the inverted dimension.
`origin` will be taken as the origin of the IFFT along the axis with "offset"
axis values. This means that if `origin!=offset`, the IFFT output will be
rolled along the inverted axis to coincide with the desired `origin` position.
"""
Logger.raiseException('This is a power spectrum. Power spectra cannot be '+\
'deterministically inverse-Fourier-transformed!',\
unless=(not self.is_power_spectrum()),\
exception=ValueError)
##Interpret axis along which to inverse transform##
frequency_dims=self.get_frequency_dimensions()
frequency_names=self.get_frequency_axis_names()
axes=self.get_axes()
if isinstance(axis,int):
axis=axis%self.ndim
Logger.raiseException('If provided as an integer, *axis* must be one of the '+\
'following frequency dimensions:\n'+\
'\t%s'%repr(frequency_dims),\
unless=(axis in frequency_dims),\
exception=IndexError)
elif isinstance(axis,str):
Logger.raiseException('If provided as a string, *axis* must be one of the '+\
'following frequency axis names:\n'+\
'\t%s'%repr(frequency_names),\
exception=IndexError)
axis=self.get_axis_names().index(axis)
elif axis is None and len(frequency_dims)==1: axis=frequency_dims[0]
else: Logger.raiseException('This spectrum has more than a single frequency axis! '+\
'Please provide a valid specific frequency *axis* integer dimension '+\
'or axis name.', exception=ValueError)
###First interpolate back to FFT-consistent axis values###
freq_window=axes[axis].max()-axes[axis].min()
df=numpy.min(numpy.abs(numpy.diff(axes[axis])))
nsamples=int(numpy.round(freq_window/df))+1
if n is None: n=nsamples
dt=1/float(freq_window)*nsamples/float(n) #duration of each sampling in putative inverse transform
freqs=sorted(numpy.fft.fftfreq(n=n,d=dt)) #duration specified as a window per-sample
fftconsistent_self=self.interpolate_axis(freqs,axis=axis,fill_value=0,bounds_error=False)
##Just to see how the FFT-consistent version looks
#self.fftconsistent_self=fftconsistent_self
###Pack up into "standard" fft packing###
shifted_self=numpy.fft.ifftshift(fftconsistent_self,axes=[axis])
###Re-normalize to maintain the original normalization condition of FFT: ###
# sum(abs(spectrum)**2)=sum(abs(source)**2)*n_FFT
shifted_self/=self.parseval_consistent_prefactor(dt) #We multiplied this earlier, now remove
if self.is_per_sample(): shifted_self*=numpy.sqrt(nsamples)
###Acquire inverse spectrum###
if offset is None: offset=0
inverse=numpy.fft.ifft(shifted_self,axis=axis,n=n)
positions=numpy.linspace(0,n*dt,n)+offset
if origin is not None:
offset=origin-positions[0] #if `offset` becomes positive, `mean_pos` is too small and positions need to be up-shifted
nshift=int(offset/dt)# if desired desired origin is more positive than minimum position, offset is positive and roll forward
inverse=numpy.roll(inverse,nshift,axis=axis)
###Re-assign axes and axis names###
axes=self.get_axes()
axis_names=self.get_axis_names()
axis_name=axis_names[axis]
frequency_exp=re.compile('\s+Frequency$')
new_axis_name=re.sub(frequency_exp,'',axis_name)
axis_names[axis]=new_axis_name
axes[axis]=positions
#Result of integration is likely *ndarray*, so cast as *ArrayWithAxes*#
inverse=baseclasses.ArrayWithAxes(inverse,axes=axes,axis_names=axis_names)
#If frequency axes remain, re-cast as spectrum#
if len(frequency_dims)>=2: inverse=Spectrum(inverse,normalized=self.is_normalized())
return inverse
def bandpass(self,fband,axis,closest=False):
##Check fband##
message='*fband* must be an iterable of length 2, both entries positive frequency values.'
Logger.raiseException(message,unless=hasattr(fband,'__len__'), exception=TypeError)
Logger.raiseException(message,unless=(len(fband)==2), exception=IndexError)
Logger.raiseException(message,unless=(fband[0]>=0 and fband[1]>=0), exception=ValueError)
##Check axis, make sure it's a frequency axis##
frequency_dims=self.frequency_dims
if isinstance(axis,str):
axis_names=self.axis_names
Logger.raiseException('If a string, *axis* must be a frequency axis among %s'%axis_names,\
unless=(axis in axis_names), exception=IndexError)
axis=axis_names.index(axis)
else:
assert isinstance(axis,int)
axis=axis%self.ndim
Logger.raiseException('*axis* must be one of the following frequency dimensions for '+\
'this spectrum: %s'%frequency_dims, unless=(axis in frequency_dims),\
exception=IndexError)
frequencies=self.get_axes()[axis]
abs_frequencies=numpy.abs(frequencies)
limiting_indices=(abs_frequencies>=numpy.min(fband))*(abs_frequencies<=numpy.max(fband))
if not limiting_indices.any() and closest:
diff_lower=numpy.abs(abs_frequencies-numpy.min(fband))
diff_upper=numpy.abs(abs_frequencies-numpy.max(fband))
closest_lower=abs_frequencies[diff_lower==diff_lower.min()].min()
closest_upper=abs_frequencies[diff_upper==diff_upper.min()].max()
Logger.warning('No frequency channels in the range of %s were '%repr(fband)+\
'found, so the closest available range %s will '%repr((closest_lower,closest_upper))+\
'instead be imposed.')
limiting_indices=(abs_frequencies>=closest_lower)*(abs_frequencies<=closest_upper)
##Multiply spectrum by a mask which removes the unwanted frequency channels##
mask_shape=[1 for dim in range(self.ndim)]; mask_shape[axis]=len(limiting_indices)
mask=limiting_indices.reshape(mask_shape)
spectrum=self.copy()
spectrum*=mask
return spectrum
def reduce_to_fband(self,fband,axis,closest=False):
##Check fband##
message='*fband* must be an iterable of length 2, both entries positive frequency values.'
Logger.raiseException(message,unless=hasattr(fband,'__len__'), exception=TypeError)
Logger.raiseException(message,unless=(len(fband)==2), exception=IndexError)
Logger.raiseException(message,unless=(fband[0]>=0 and fband[1]>=0), exception=ValueError)
##Check axis, make sure it's a frequency axis##
frequency_dims=self.frequency_dims
if isinstance(axis,str):
axis_names=self.axis_names
Logger.raiseException('If a string, *axis* must be a frequency axis among %s'%axis_names,\
unless=(axis in axis_names), exception=IndexError)
axis=axis_names.index(axis)
else:
assert isinstance(axis,int)
axis=axis%self.ndim
Logger.raiseException('*axis* must be one of the following frequency dimensions for '+\
'this spectrum: %s'%frequency_dims, unless=(axis in frequency_dims),\
exception=IndexError)
frequencies=self.get_axes()[axis]
abs_frequencies=numpy.abs(frequencies)
limiting_indices=(abs_frequencies>=numpy.min(fband))*(abs_frequencies<=numpy.max(fband))
if not limiting_indices.any() and closest:
diff_lower=numpy.abs(abs_frequencies-numpy.min(fband))
diff_upper=numpy.abs(abs_frequencies-numpy.max(fband))
closest_lower=abs_frequencies[diff_lower==diff_lower.min()].min()
closest_upper=abs_frequencies[diff_upper==diff_upper.min()].max()
Logger.warning('No frequency channels in the range of %s were '%repr(fband)+\
'found, so the closest available range %s will '%repr((closest_lower,closest_upper))+\
'instead be imposed.')
limiting_indices=(abs_frequencies>=closest_lower)*(abs_frequencies<=closest_upper)
##See if frequency limits aren valid##
Logger.raiseException('Frequency channels in the range of %s are '%repr(fband)+\
'not available for the obtained spectrum. Instead, *fband* '+\
'must span some (wider) subset in the range of %s. '%repr((numpy.min(abs_frequencies),\
numpy.max(abs_frequencies)))+\
'Alternatively, use *closest=True* to accept frequency channels '+\
'closest to the desired frequency band.',\
unless=limiting_indices.any(), exception=IndexError)
##Boolean slicing would inevitably destroy axes information##
#We must be careful to record the axes so we can re-apply after slicing#
#Prepare a new set of axes with the correct frequencies#
axis_names=self.axis_names
new_axes=self.axes
limited_frequencies=frequencies[limiting_indices]
new_axes[axis]=limited_frequencies
##Perform slicing on vanilla array##
limiting_slice=[None for i in range(self.ndim)]
limiting_slice[axis]=limiting_indices
spectrum=get_array_slice(numpy.asarray(self), limiting_slice, squeeze=False)
##Re-set axes and transform back into spectrum##
spectrum=baseclasses.ArrayWithAxes(spectrum,axes=new_axes,axis_names=axis_names) #Re-set axes
spectrum=Spectrum(spectrum)
return spectrum
impose_fband=reduce_to_fband
def fold_frequencies(self):
####We'll need interpolation####
try: from scipy.interpolate import interp1d
except ImportError: Logger.raiseException('The module <scipy.interpolate> is required for \
this operation, but is not available.',\
exception=ImportError)
####Prepare container for folded frequencies spectrum####
if self.is_power_spectrum(): s=self.copy()
else: s=self.get_power_spectrum()
####Store parameters describing spectrum####
axes=s.axes
axis_names=s.axis_names
normalized=s.is_normalized()
power=s.is_power_spectrum()
##Add negative frequency channels to positive ones, keep only positive frequencies##
for dim in self.get_frequency_dimensions():
fs=axes[dim]
where_pos=fs>=0
where_neg=fs<0
pos_half=get_array_slice(s,where_pos,axis=dim)
neg_half=get_array_slice(s,where_neg,axis=dim)
pos_fs=pos_half.axes[dim]
neg_fs=neg_half.axes[dim]
##If no negative frequencies to fold here, move on!##
if not len(neg_fs): continue
##Determine overlap range##
pos_fband=[pos_fs.min(),pos_fs.max()]
where_neg_overlap=(neg_fs>=-pos_fband[1])*(neg_fs<=-pos_fband[0])
neg_overlap_fs=neg_fs[where_neg_overlap]
overlap_fband=[numpy.abs(neg_overlap_fs).min(),\
numpy.abs(neg_overlap_fs).max()]
where_pos_overlap=(pos_fs>=overlap_fband[0])*(pos_fs<=overlap_fband[1])
##Get positive and negative halves in this range only##
neg_half=get_array_slice(neg_half,where_neg_overlap,axis=dim)
pos_half=get_array_slice(pos_half,where_pos_overlap,axis=dim)
neg_half_mirror=neg_half.coordinate_slice([None,None,-1],axis=dim)
##Reverse the negative half by index and give it positive frequencies, then coincide it with positive half##
neg_half_mirror_axes=neg_half_mirror.axes
neg_half_mirror_axes[dim]*=-1
neg_half_mirror.set_axes(neg_half_mirror_axes)
neg_half_mirror_coinciding=pos_half.get_coinciding_spectrum(neg_half_mirror)
##Make s the positive part, and add negative contribution##
new_s=pos_half
new_s+=neg_half_mirror_coinciding
#If original spectrum was normalized, our folded result only really makes sense as a
#normalized spectrum if we had folded both s1 and s2, THEN normalized s1 to s2.
#Equivalently, we can divide by 2 right now so that it's if we had treated
#s1 and s2 on the same footing.
if s.is_normalized(): new_s/=2.
#Concatenate the DC channel, if present
if 0 in fs:
where_DC=[numpy.argmin(numpy.abs(fs))]
DC_slice=get_array_slice(s,where_DC,axis=dim)
#Concatenate will
these_axes=new_s.axes; these_axis_names=new_s.axis_names
these_axes[dim]=[0]+list(these_axes[dim])
new_s=numpy.concatenate((DC_slice,new_s),axis=dim)
new_s=Spectrum(new_s,axes=these_axes,axis_names=these_axis_names,axis=dim)
new_s.adopt_attributes(s)
s=new_s
return s
def spectral_overlap(self,spectrum):
assert isinstance(spectrum,Spectrum)
try: overlap_spectrum=numpy.real(self*numpy.conj(spectrum))
except:
Logger.raiseException('To obtain an overlap spectrum, *spectrum*, '+\
'must be of shape %s.'%repr(self.shape),\
exception=IndexError)
##Define inner product result as a power spectrum##
overlap_spectrum._power_=True
return overlap_spectrum
def get_coinciding_spectrum(self,spectrum,geometric_resize='interpolate',copy=True):
try: from scipy.interpolate import interp1d
except ImportError:
Logger.raiseException('The module <scipy> is required for spectral interpolation, but it is not available! '+\
'This function is unusable until <scipy> is added to the library of Python modules.',\
exception=ImportError)
####Check input####
assert isinstance(spectrum,Spectrum)
Logger.raiseException('*spectrum* must have the same number of '+\
'frequency dimensions (%s) as the present spectrum.'%len(self.frequency_dims),\
unless=(len(spectrum.frequency_dims)==len(self.frequency_dims)),\
exception=IndexError)
geometric_resize_options=['interpolate','expand']
Logger.raiseException('*geometric_resize* must be one of %s.'%geometric_resize_options,\
unless=geometric_resize in geometric_resize_options,\
exception=ValueError)
####Copy normalizing spectrum, we will change some its characteristics####
if copy: spectrum=spectrum.copy()
else: spectrum=spectrum.view()
###Expand out normalizing spectrum to correct number of geometric dimensions###
self_geometric_dims=self.geometric_dims
self_axes=self.axes
spectrum_axes=spectrum.axes
dummy_shape=list(spectrum.shape)
##Start inserting fake geometric dimensions##
geometric_dims=
|
numpy.array(spectrum.geometric_dims)
|
numpy.array
|
""" Functions used by the ccc_calculator.py """
import numpy as np
constants = {
"Na": 6.02214e23, # Avogadro number
"kb": 1.38065e-23, # Boltzmann constant
"e0": 1.60217e-19, # elementary charge in As
"eps0": 8.854e-12, # vacuum dielectric permittivity
}
def find_roots(y, x):
"""
Finds all roots (zeros) of the why vs ex function.
It gets the roots by looking where the y crosses 0 and then
linerly interpolates between these two points.
Parameters
----------
y : array of n elements
y-values of a function
x : array of n elements
x-values of a function
Returns
-------
roots: array
Array which holds all the found roots.
"""
roots = []
previous_y = y[0]
previous_x = x[0]
for ex, why in zip(x, y):
if previous_y * why < 0:
delta_y = why - previous_y
delta_x = ex - previous_x
result = previous_x + delta_x / delta_y * (0 - previous_y)
roots.append(result)
previous_y = why
previous_x = ex
return
|
np.array(roots)
|
numpy.array
|
import numpy as np
import glob, os, pickle, json, copy, sys, h5py
from statistics import mode
import plyfile
from pyntcloud import PyntCloud
from plyfile import PlyData, PlyElement
from collections import Counter
MTML_VOXEL_SIZE = 0.1 # size for voxel
def make_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def read_label_ply(filename):
plydata = PlyData.read(filename)
x = np.asarray(plydata.elements[0].data['x'])
y = np.asarray(plydata.elements[0].data['y'])
z = np.asarray(plydata.elements[0].data['z'])
label = np.asarray(plydata.elements[0].data['label'])
return
|
np.stack([x,y,z], axis=1)
|
numpy.stack
|
# Set Strips
# Set the strip leds to preset levels.
import sys, traceback, random
from numpy import array, full
class strip_animation():
def __init__(self,datastore):
self.datastore=datastore
def emit_row(self):
try:
row_arr=
|
full((self.datastore.LED_COUNT,4),0)
|
numpy.full
|
import sys
import unittest
import numpy as np
import pandas as pd
import itertools
from pathlib import Path
sys.path.append('../src/')
from sensorutils.datasets.unimib import UniMib, load, load_raw, reformat
class UniMibTest(unittest.TestCase):
path = None
@classmethod
def setUpClass(cls) -> None:
if cls.path is None:
raise RuntimeError('dataset path is not specified')
def setUp(self):
self.loader = UniMib(self.path)
def tearDown(self):
pass
def test_load_fn(self):
def _check_common(self, data, meta, dtype):
# compare between data and meta
self.assertEqual(len(data), len(meta))
# meta
## type check
self.assertIsInstance(meta, pd.DataFrame)
## shape and column check
if dtype in ['full', 'adl', 'fall']:
self.assertSetEqual(set(meta.columns), set(['activity', 'subject', 'trial_id']))
elif dtype == 'raw':
self.assertSetEqual(set(meta.columns), set(['activity', 'subject', 'trial_id', 'gender', 'age', 'height', 'weight']))
else:
self.fail(f'Unexpected case, dtype: {dtype}')
## data type check
# flags_dtype = [dt == np.dtype(np.int8) or dt == np.dtype(np.int16) or dt == np.dtype(np.int32) or dt == np.dtype(np.int64) for dt in meta.dtypes]
flags_dtype = [dt == np.dtype(np.int8) for dt in meta.dtypes]
self.assertTrue(all(flags_dtype))
## data check
if dtype == 'full':
self.assertSetEqual(set(np.unique(meta['activity'])), set(range(1, 17+1)))
self.assertSetEqual(set(np.unique(meta['subject'])), set(range(1, 30+1)))
self.assertSetEqual(set(np.unique(meta['trial_id'])), set(range(1, 6+1)))
elif dtype == 'adl':
self.assertSetEqual(set(np.unique(meta['activity'])), set(range(1, 9+1)))
self.assertSetEqual(set(np.unique(meta['subject'])), set(range(1, 30+1)))
self.assertSetEqual(set(
|
np.unique(meta['trial_id'])
|
numpy.unique
|
import numpy as np
import time
import tkinter as tk
UNIT = 100
DUNGEON_LENGTH = 5
ORIGIN =
|
np.array([UNIT / 2, UNIT / 2])
|
numpy.array
|
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import camog._cfastcsv as cfastcsv
def _do_parse_headers(csv_str, sep=',', nthreads=4):
return cfastcsv.parse_csv(csv_str, sep, nthreads, 0, 1)[0]
def _do_parse_both(csv_str, sep=',', nthreads=4, nheaders=1):
return cfastcsv.parse_csv(csv_str, sep, nthreads, 0, nheaders)
def test_headers1():
res = _do_parse_headers('123,456,789')
assert isinstance(res, list)
assert res == ['123', '456', '789']
def test_empty_headers1():
res = _do_parse_headers(',123,')
assert isinstance(res, list)
assert res == ['', '123', '']
def test_headers_data():
headers, data = _do_parse_both('123,456,789\n123,456,789\n')
assert headers == ['123', '456', '789']
assert np.all(data[0] == np.array([123]))
assert np.all(data[1] == np.array([456]))
assert np.all(data[2] ==
|
np.array([789])
|
numpy.array
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import nose
import copy
import numpy as np
from nose.tools import assert_raises
from numpy.testing import (assert_allclose, assert_array_equal,
assert_almost_equal)
from binet import op
import pycuda.gpuarray as gpuarray
op.init_gpu()
def test_togpu():
X = np.random.randn(3, 5)
Xd = op.to_gpu(X)
assert type(Xd) == gpuarray.GPUArray
assert Xd.shape == X.shape
Xd2 = op.to_gpu(Xd)
assert Xd2 is Xd
def test_tonumpy():
X = np.random.randn(3, 5)
Xd = op.to_gpu(X)
Xh = op.to_cpu(Xd)
assert type(Xh) == np.ndarray
assert Xh.shape == X.shape
assert_allclose(Xh, X)
X2 = op.to_cpu(X)
assert X2 is X
def test_togpu_class():
class MyTest:
def __init__(self):
self.X = np.random.randn(3, 5)
t = MyTest()
Td = op.to_gpu(t)
assert type(Td.X) == gpuarray.GPUArray, "type is %s" % type(Td.X)
assert Td.X.shape == (3, 5)
def test_tonumpy_class():
class MyTest:
def __init__(self):
self.X = np.random.randn(3, 5)
t = MyTest()
Td = op.to_gpu(t)
Th = op.to_cpu(Td)
assert type(Th.X) == np.ndarray
assert Th.X.shape == (3, 5)
def test_tognumpy_list():
X = [np.random.randn(3, 5), "teststring"]
Xd = op.to_gpu(X)
Xh = op.to_cpu(Xd)
assert type(Xh[0]) == np.ndarray
assert Xh[0].shape == X[0].shape
assert_array_equal(Xh[0], X[0])
def test_togpu_list():
X = [np.random.randn(3, 5), "teststring"]
X_orig = copy.deepcopy(X)
Xd = op.to_gpu(X)
assert type(Xd[0]) == op.gpuarray.GPUArray
assert Xd[0].shape == X_orig[0].shape
Xh = op.to_cpu(Xd[0])
assert_allclose(Xh, X_orig[0])
def test_togpu_dict():
X = {'arr': np.random.randn(3, 5), 'str': "teststring"}
X_orig = copy.deepcopy(X)
Xd = op.to_gpu(X)
assert type(Xd['arr']) == op.gpuarray.GPUArray
assert Xd['arr'].shape == X_orig['arr'].shape
Xh = op.to_cpu(Xd['arr'])
assert_allclose(Xh, X_orig['arr'])
def run_function(X, Y_expected, func, rtol=1e-6, with_inplace_test=True, **kwargs):
# CPU, with target argument
Y = np.empty_like(Y_expected)
Yhr = func(X, out=Y, **kwargs)
assert_allclose(Y_expected, Yhr, err_msg="CPU with target", rtol=rtol)
assert Yhr is Y
# CPU, no target argument
Yhr = func(X, **kwargs)
assert_allclose(Y_expected, Yhr, err_msg="CPU, no target", rtol=rtol)
if with_inplace_test:
X2 = X.copy()
Yhr = func(X2, out=X2, **kwargs)
assert_allclose(Y_expected, Yhr, err_msg="CPU, inplace target", rtol=rtol)
assert Yhr is X2
kwargs = op.to_gpu(kwargs)
# GPU, with target
Xd = op.to_gpu(X)
Yd = gpuarray.empty_like(op.to_gpu(Y_expected))
Ydr = func(Xd, out=Yd, **kwargs)
assert_allclose(Y_expected, op.to_cpu(Ydr), err_msg="GPU with target", rtol=rtol)
assert Ydr is Yd
# GPU, no target
Ydr = func(Xd, **kwargs)
assert_allclose(Y_expected, op.to_cpu(Ydr), err_msg="GPU, no target", rtol=rtol)
if with_inplace_test:
Ydr = func(Xd, out=Xd, **kwargs)
assert_allclose(Y_expected, op.to_cpu(Ydr), err_msg="GPU, inplace target", rtol=rtol)
assert Ydr is Xd
def run_function_with_axis(X, ax0_expected, ax1_expected, noax_expected, func, rtol=1e-6):
# CPU, no target argument
ah0 = func(X, axis=0)
assert_allclose(ax0_expected, ah0, err_msg="CPU, axis=0", rtol=rtol)
ah1 = func(X, axis=1)
assert_allclose(ax1_expected, ah1, err_msg="CPU, axis=1", rtol=rtol)
if noax_expected is not None:
ah = func(X)
assert_allclose(noax_expected, ah, err_msg="CPU, axis=1", rtol=rtol)
Xd = op.to_gpu(X)
# GPU, no target
ad0 = func(Xd, axis=0)
assert_allclose(ax0_expected, op.to_cpu(ad0), err_msg="GPU, axis=0", rtol=rtol)
ad1 = func(Xd, axis=1)
assert_allclose(ax1_expected, op.to_cpu(ad1), err_msg="GPU, axis=1", rtol=rtol)
if noax_expected is not None:
ad = func(Xd)
assert_allclose(noax_expected, op.to_cpu(ad), err_msg="GPU, axis=1", rtol=rtol)
def test_relu():
X = np.random.randn(3, 5).astype(np.float32)
Y_expected = X.copy()
Y_expected[Y_expected <= 0.0] = 0.0
run_function(X, Y_expected, op.relu)
def test_abs():
X = np.random.randn(3, 5).astype(np.float32)
Y_expected = X.copy()
Y_expected[Y_expected <= 0.0] *= -1
run_function(X, Y_expected, op.abs)
def test_sigmoid():
X = np.random.randn(3, 5).astype(np.float32)
Y_expected = 1.0 / (1.0 + np.exp(-X))
run_function(X, Y_expected, op.sigmoid, rtol=1e-4)
def test_tanh():
X = np.random.randn(3, 5).astype(np.float32)
Y_expected = 2 * (1.0 / (1.0 + np.exp(-2*X))) - 1.0
run_function(X, Y_expected, op.tanh, rtol=1e-4)
def test_drelu_delta():
X = np.random.randn(3, 5).astype(np.float32)
A = 5*np.random.randn(3, 5).astype(np.float32)
D = 5*np.random.randn(3, 5).astype(np.float32)
D_expected = D * (A > 0)
Dd = op.to_gpu(D)
Yh = op.drelu_delta(D, A, X)
assert_allclose(D_expected, D, rtol=1e-5, err_msg="CPU")
Ad = op.to_gpu(A)
Xd = op.to_gpu(X)
op.drelu_delta(Dd, Ad, Xd)
assert_allclose(D_expected, op.to_cpu(Dd), rtol=1e-5, err_msg="GPU")
def test_dtanh_delta():
X = np.random.randn(3, 5).astype(np.float32)
A = 5*np.random.randn(3, 5).astype(np.float32)
D = 5*np.random.randn(3, 5).astype(np.float32)
D_expected = D * (1 - A*A)
Dd = op.to_gpu(D)
Yh = op.dtanh_delta(D, A, X)
assert_allclose(D_expected, D, rtol=1e-5, err_msg="CPU")
Ad = op.to_gpu(A)
Xd = op.to_gpu(X)
op.dtanh_delta(Dd, Ad, Xd)
assert_allclose(D_expected, op.to_cpu(Dd), rtol=1e-5, err_msg="GPU")
def test_dsigmoid_delta():
X = np.random.randn(3, 5).astype(np.float32)
A = 5*np.random.randn(30, 50).astype(np.float32)
D = 5*np.random.randn(30, 50).astype(np.float32)
D_expected = D * A*(1 - A)
Dd = op.to_gpu(D)
Yh = op.dsigmoid_delta(D, A, X)
assert_allclose(D_expected, D, rtol=1e-5, err_msg="CPU")
Ad = op.to_gpu(A)
Xd = op.to_gpu(X)
op.dsigmoid_delta(Dd, Ad, Xd)
assert_allclose(D_expected, op.to_cpu(Dd), rtol=1e-5, err_msg="GPU")
def test_toplayer_delta():
X = np.random.randn(3, 5).astype(np.float32)
A = 5*np.random.randn(30, 50).astype(np.float32)
D = 5*np.random.randn(30, 50).astype(np.float32)
D_expected = D.copy()
D_expected = A - D_expected
Dd = op.to_gpu(D)
Yh = op.toplayer_delta(A, D, X)
assert_allclose(D_expected, Yh, rtol=1e-5, err_msg="CPU")
Ad = op.to_gpu(A)
Xd = op.to_gpu(X)
Yhd = op.toplayer_delta(Ad, Dd, Xd)
assert_allclose(D_expected, op.to_cpu(Yhd), rtol=1e-5, err_msg="GPU")
def test_softmax():
X = np.random.randn(30, 50).astype(np.float32)
E = np.exp(X)
Y_expected = E / np.sum(E, axis=1).reshape(-1, 1)
run_function(X, Y_expected, op.softmax, rtol=1e-4)
X = 10000*np.random.randn(30, 50).astype(np.float32)
Y = op.softmax(X)
assert np.all(np.isfinite(Y))
Y = op.softmax(op.to_gpu(X))
assert np.all(np.isfinite(op.to_cpu(Y)))
def test_add_matvec():
X = np.random.randn(3, 4).astype(np.float32)
b1 = np.random.randn(4, 1).astype(np.float32)
b2 = np.random.randn(3, 1).astype(np.float32)
Y_expected1 = X + b1.T
Y_expected2 = X + b2
assert_allclose(Y_expected1, op.add_matvec(X, b1, 1))
assert_allclose(Y_expected2, op.add_matvec(X, b2, 0))
Xd = op.to_gpu(X)
b1d = op.to_gpu(b1)
b2d = op.to_gpu(b2)
assert_allclose(Y_expected1, op.to_cpu(op.add_matvec(Xd, b1d, 1)))
assert_allclose(Y_expected2, op.to_cpu(op.add_matvec(Xd, b2d, 0)))
def test_rand():
X =
|
np.empty((1000, 1000), dtype=np.float32)
|
numpy.empty
|
# -*- coding: utf-8 -*-
"""
Utilities for working with related individuals (crosses, families, etc.).
See also the examples at:
- http://nbviewer.ipython.org/github/alimanfoo/anhima/blob/master/examples/ped.ipynb
""" # noqa
from __future__ import division, print_function, absolute_import
# third party dependencies
import numpy as np
import numexpr as ne
import pandas
# internal dependencies
import anhima.gt
# constants to represent inheritance states
INHERIT_UNDETERMINED = 0
INHERIT_PARENT1 = 1
INHERIT_PARENT2 = 2
INHERIT_NONSEG_REF = 3
INHERIT_NONSEG_ALT = 4
INHERIT_NONPARENTAL = 5
INHERIT_PARENT_MISSING = 6
INHERIT_MISSING = 7
INHERITANCE_STATES = range(8)
INHERITANCE_LABELS = ('undetermined', 'parent1', 'parent2', 'non-seg ref',
'non-seg alt', 'non-parental', 'parent missing',
'missing')
def diploid_inheritance(parent_diplotype, gamete_haplotypes):
"""
Determine the transmission of parental alleles to a set of gametes.
Parameters
----------
parent_diplotype : array_like, shape (n_variants, 2)
An array of phased genotypes for a single diploid individual, where
each element of the array is an integer corresponding to an allele
index (-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
gamete_haplotypes : array_like, shape (n_variants, n_gametes)
An array of haplotypes for a set of gametes derived from the given
parent, where each element of the array is an integer corresponding
to an allele index (-1 = missing, 0 = reference allele, 1 = first
alternate allele, 2 = second alternate allele, etc.).
Returns
-------
inheritance : ndarray, uint8, shape (n_variants, n_gametes)
An array of integers coding the allelic inheritance, where 1 =
inheritance from first parental haplotype, 2 = inheritance from second
parental haplotype, 3 = inheritance of reference allele from parent
that is homozygous for the reference allele, 4 = inheritance of
alternate allele from parent that is homozygous for the alternate
allele, 5 = non-parental allele, 6 = parental genotype is missing,
7 = gamete allele is missing.
"""
# normalise inputs
parent_diplotype = np.asarray(parent_diplotype)
assert parent_diplotype.ndim == 2
assert parent_diplotype.shape[1] == 2
gamete_haplotypes = np.asarray(gamete_haplotypes)
assert gamete_haplotypes.ndim == 2
# convenience variables
parent1 = parent_diplotype[:, 0, np.newaxis] # noqa
parent2 = parent_diplotype[:, 1, np.newaxis] # noqa
gamete_is_missing = gamete_haplotypes < 0
parent_is_missing = np.any(parent_diplotype < 0, axis=1)
parent_is_hom_ref = anhima.gt.is_hom_ref(parent_diplotype)[:, np.newaxis] # noqa
parent_is_het = anhima.gt.is_het(parent_diplotype)[:, np.newaxis] # noqa
parent_is_hom_alt = anhima.gt.is_hom_alt(parent_diplotype)[:, np.newaxis] # noqa
# need this for broadcasting, but also need to retain original for later
parent_is_missing_bc = parent_is_missing[:, np.newaxis] # noqa
# N.B., use numexpr below where possible to avoid temporary arrays
# utility variable, identify allele calls where inheritance can be
# determined
callable = ne.evaluate('~gamete_is_missing & ~parent_is_missing_bc') # noqa
callable_seg = ne.evaluate('callable & parent_is_het') # noqa
# main inheritance states
inherit_parent1 = ne.evaluate(
'callable_seg & (gamete_haplotypes == parent1)'
)
inherit_parent2 = ne.evaluate(
'callable_seg & (gamete_haplotypes == parent2)'
)
nonseg_ref = ne.evaluate(
'callable & parent_is_hom_ref & (gamete_haplotypes == parent1)'
)
nonseg_alt = ne.evaluate(
'callable & parent_is_hom_alt & (gamete_haplotypes == parent1)'
)
nonparental = ne.evaluate(
'callable & (gamete_haplotypes != parent1)'
' & (gamete_haplotypes != parent2)'
)
# record inheritance states
# N.B., order in which these are set matters
inheritance = np.zeros_like(gamete_haplotypes, dtype='u1')
inheritance[inherit_parent1] = INHERIT_PARENT1
inheritance[inherit_parent2] = INHERIT_PARENT2
inheritance[nonseg_ref] = INHERIT_NONSEG_REF
inheritance[nonseg_alt] = INHERIT_NONSEG_ALT
inheritance[nonparental] = INHERIT_NONPARENTAL
inheritance[parent_is_missing] = INHERIT_PARENT_MISSING
inheritance[gamete_is_missing] = INHERIT_MISSING
return inheritance
def diploid_mendelian_error_biallelic(parental_genotypes, progeny_genotypes):
"""Implementation of function to find Mendelian errors optimised for
biallelic variants.
"""
# recode genotypes for convenience
parental_genotypes_012 = anhima.gt.as_012(parental_genotypes)
progeny_genotypes_012 = anhima.gt.as_012(progeny_genotypes)
# convenience variables
p1 = parental_genotypes_012[:, 0, np.newaxis] # noqa
p2 = parental_genotypes_012[:, 1, np.newaxis] # noqa
o = progeny_genotypes_012 # noqa
# enumerate all possible combinations of Mendel error genotypes
ex = '((p1 == 0) & (p2 == 0) & (o == 1))' \
' + ((p1 == 0) & (p2 == 0) & (o == 2)) * 2' \
' + ((p1 == 2) & (p2 == 2) & (o == 1))' \
' + ((p1 == 2) & (p2 == 2) & (o == 0)) * 2' \
' + ((p1 == 0) & (p2 == 2) & (o == 0))' \
' + ((p1 == 0) & (p2 == 2) & (o == 2))' \
' + ((p1 == 2) & (p2 == 0) & (o == 0))' \
' + ((p1 == 2) & (p2 == 0) & (o == 2))' \
' + ((p1 == 0) & (p2 == 1) & (o == 2))' \
' + ((p1 == 1) & (p2 == 0) & (o == 2))' \
' + ((p1 == 2) & (p2 == 1) & (o == 0))' \
' + ((p1 == 1) & (p2 == 2) & (o == 0))'
errors = ne.evaluate(ex).astype('u1')
return errors
def diploid_mendelian_error_multiallelic(parental_genotypes, progeny_genotypes,
max_allele):
"""Implementation of function to find Mendelian errors generalised for
multiallelic variants.
"""
# transform genotypes into per-call allele counts
alleles = range(max_allele + 1)
p = anhima.gt.as_allele_counts(parental_genotypes, alleles=alleles)
o = anhima.gt.as_allele_counts(progeny_genotypes, alleles=alleles)
# detect nonparental and hemiparental inheritance by comparing allele
# counts between parents and progeny
ps = p.sum(axis=1)[:, np.newaxis] # add allele counts for both parents
ac_diff = (o - ps).astype('i1')
ac_diff[ac_diff < 0] = 0
# sum over all alleles
errors = np.sum(ac_diff, axis=2).astype('u1')
# detect uniparental inheritance by finding cases where no alleles are
# shared between parents, then comparing progeny allele counts to each
# parent
pc1 = p[:, 0, np.newaxis, :]
pc2 = p[:, 1, np.newaxis, :]
# find variants where parents don't share any alleles
is_shared_allele = (pc1 > 0) & (pc2 > 0)
no_shared_alleles = ~np.any(is_shared_allele, axis=2)
# find calls where progeny genotype is identical to one or the other parent
errors[
no_shared_alleles
& (np.all(o == pc1, axis=2)
| np.all(o == pc2, axis=2))
] = 1
# retrofit where either or both parent has a missing call
is_parent_missing = anhima.gt.is_missing(parental_genotypes)
errors[np.any(is_parent_missing, axis=1)] = 0
return errors
def diploid_mendelian_error(parental_genotypes, progeny_genotypes):
"""Find impossible genotypes according to Mendelian inheritance laws.
Parameters
----------
parental_genotypes : array_like, int
An array of shape (n_variants, 2, 2) where each element of the array
is an integer corresponding to an allele index (-1 = missing,
0 = reference allele, 1 = first alternate allele, 2 = second
alternate allele, etc.).
progeny_genotypes : array_like, int
An array of shape (n_variants, n_progeny, 2) where each element of the
array is an integer corresponding to an allele index (-1 = missing,
0 = reference allele, 1 = first alternate allele, 2 = second
alternate allele, etc.).
Returns
-------
errors : ndarray, uint8
An array of shape (n_variants, n_progeny) where each element counts
the number of non-Mendelian alleles in a progeny genotype call.
See Also
--------
count_diploid_mendelian_error
Notes
-----
Not applicable to polyploid genotype calls.
Applicable to multiallelic variants.
Assumes that genotypes are unphased.
"""
# check inputs
parental_genotypes = np.asarray(parental_genotypes)
progeny_genotypes = np.asarray(progeny_genotypes)
assert parental_genotypes.ndim == 3
assert progeny_genotypes.ndim == 3
# check the number of variants is equal in parents and progeny
assert parental_genotypes.shape[0] == progeny_genotypes.shape[0]
# check the number of parents
assert parental_genotypes.shape[1] == 2
# check the ploidy
assert parental_genotypes.shape[2] == progeny_genotypes.shape[2] == 2
# determine which implementation to use
max_allele = max(
|
np.amax(parental_genotypes)
|
numpy.amax
|
from __future__ import division, print_function
import cmath
import time
from copy import copy
import os
import argparse
import inspect
from collections import OrderedDict
from timeit import default_timer as timer
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import numpy as np
from numpy import pi, radians, sin, cos, sqrt, clip
from numpy.random import poisson, uniform, randn, rand
from numpy.polynomial.legendre import leggauss
from scipy.integrate import simps
from scipy.special import j1 as J1
try:
from numba import njit, prange
# SAS_NUMBA: 0=None, 1=CPU, 2=GPU
SAS_NUMBA = int(os.environ.get("SAS_NUMBA", "1"))
USE_NUMBA = SAS_NUMBA > 0
USE_CUDA = SAS_NUMBA > 1
except ImportError:
USE_NUMBA = USE_CUDA = False
# Definition of rotation matrices comes from wikipedia:
# https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations
def Rx(angle):
"""Construct a matrix to rotate points about *x* by *angle* degrees."""
a = radians(angle)
R = [[1, 0, 0],
[0, +cos(a), -sin(a)],
[0, +sin(a), +cos(a)]]
return np.matrix(R)
def Ry(angle):
"""Construct a matrix to rotate points about *y* by *angle* degrees."""
a = radians(angle)
R = [[+cos(a), 0, +sin(a)],
[0, 1, 0],
[-sin(a), 0, +cos(a)]]
return np.matrix(R)
def Rz(angle):
"""Construct a matrix to rotate points about *z* by *angle* degrees."""
a = radians(angle)
R = [[+cos(a), -sin(a), 0],
[+sin(a), +cos(a), 0],
[0, 0, 1]]
return np.matrix(R)
def pol2rec(r, theta, phi):
"""
Convert from 3D polar coordinates to rectangular coordinates.
"""
theta, phi = radians(theta), radians(phi)
x = +r * sin(theta) * cos(phi)
y = +r * sin(theta)*sin(phi)
z = +r * cos(theta)
return x, y, z
def rotation(theta, phi, psi):
"""
Apply the jitter transform to a set of points.
Points are stored in a 3 x n numpy matrix, not a numpy array or tuple.
"""
return Rx(phi)*Ry(theta)*Rz(psi)
def apply_view(points, view):
"""
Apply the view transform (theta, phi, psi) to a set of points.
Points are stored in a 3 x n numpy array.
View angles are in degrees.
"""
theta, phi, psi = view
return np.asarray((Rz(phi)*Ry(theta)*Rz(psi))*np.matrix(points.T)).T
def invert_view(qx, qy, view):
"""
Return (qa, qb, qc) for the (theta, phi, psi) view angle at detector
pixel (qx, qy).
View angles are in degrees.
"""
theta, phi, psi = view
q = np.vstack((qx.flatten(), qy.flatten(), 0*qx.flatten()))
return np.asarray((Rz(-psi)*Ry(-theta)*Rz(-phi))*np.matrix(q))
class Shape:
rotation = np.matrix([[1., 0, 0], [0, 1, 0], [0, 0, 1]])
center = np.array([0., 0., 0.])[:, None]
r_max = None
is_magnetic = False
def volume(self):
# type: () -> float
raise NotImplementedError()
def sample(self, density):
# type: (float) -> np.ndarray[N], np.ndarray[N, 3]
raise NotImplementedError()
def dims(self):
# type: () -> float, float, float
raise NotImplementedError()
def rotate(self, theta, phi, psi):
self.rotation = rotation(theta, phi, psi) * self.rotation
return self
def shift(self, x, y, z):
self.center = self.center + np.array([x, y, z])[:, None]
return self
def _adjust(self, points):
points = np.asarray(self.rotation * np.matrix(points.T)) + self.center
return points.T
def r_bins(self, q, over_sampling=1, r_step=None):
return r_bins(q, r_max=self.r_max, r_step=r_step,
over_sampling=over_sampling)
class Composite(Shape):
def __init__(self, shapes, center=(0, 0, 0), orientation=(0, 0, 0)):
self.shapes = shapes
self.rotate(*orientation)
self.shift(*center)
# Find the worst case distance between any two points amongst a set
# of shapes independent of orientation. This could easily be a
# factor of two worse than necessary, e.g., a pair of thin rods
# end-to-end vs the same pair side-by-side.
distances = [((s1.r_max + s2.r_max)/2
+ sqrt(np.sum((s1.center - s2.center)**2)))
for s1 in shapes
for s2 in shapes]
self.r_max = max(distances + [s.r_max for s in shapes])
self.volume = sum(shape.volume for shape in self.shapes)
def sample(self, density):
values, points = zip(*(shape.sample(density) for shape in self.shapes))
return np.hstack(values), self._adjust(np.vstack(points))
class Box(Shape):
def __init__(self, a, b, c,
value, center=(0, 0, 0), orientation=(0, 0, 0)):
self.value = np.asarray(value)
self.rotate(*orientation)
self.shift(*center)
self.a, self.b, self.c = a, b, c
self._scale = np.array([a/2, b/2, c/2])[None, :]
self.r_max = sqrt(a**2 + b**2 + c**2)
self.dims = a, b, c
self.volume = a*b*c
def sample(self, density):
num_points = poisson(density*self.volume)
points = self._scale*uniform(-1, 1, size=(num_points, 3))
values = self.value.repeat(points.shape[0])
return values, self._adjust(points)
class EllipticalCylinder(Shape):
def __init__(self, ra, rb, length,
value, center=(0, 0, 0), orientation=(0, 0, 0)):
self.value = np.asarray(value)
self.rotate(*orientation)
self.shift(*center)
self.ra, self.rb, self.length = ra, rb, length
self._scale = np.array([ra, rb, length/2])[None, :]
self.r_max = sqrt(4*max(ra, rb)**2 + length**2)
self.dims = 2*ra, 2*rb, length
self.volume = pi*ra*rb*length
def sample(self, density):
# randomly sample from a box of side length 2*r, excluding anything
# not in the cylinder
num_points = poisson(density*4*self.ra*self.rb*self.length)
points = uniform(-1, 1, size=(num_points, 3))
radius = points[:, 0]**2 + points[:, 1]**2
points = points[radius <= 1]
values = self.value.repeat(points.shape[0])
return values, self._adjust(self._scale*points)
class EllipticalBicelle(Shape):
def __init__(self, ra, rb, length,
thick_rim, thick_face,
value_core, value_rim, value_face,
center=(0, 0, 0), orientation=(0, 0, 0)):
self.rotate(*orientation)
self.shift(*center)
self.value = value_core
self.ra, self.rb, self.length = ra, rb, length
self.thick_rim, self.thick_face = thick_rim, thick_face
self.value_rim, self.value_face = value_rim, value_face
# reset cylinder to outer dimensions for calculating scale, etc.
ra = self.ra + self.thick_rim
rb = self.rb + self.thick_rim
length = self.length + 2*self.thick_face
self._scale = np.array([ra, rb, length/2])[None, :]
self.r_max = sqrt(4*max(ra, rb)**2 + length**2)
self.dims = 2*ra, 2*rb, length
self.volume = pi*ra*rb*length
def sample(self, density):
# randomly sample from a box of side length 2*r, excluding anything
# not in the cylinder
ra = self.ra + self.thick_rim
rb = self.rb + self.thick_rim
length = self.length + 2*self.thick_face
num_points = poisson(density*4*ra*rb*length)
points = uniform(-1, 1, size=(num_points, 3))
radius = points[:, 0]**2 + points[:, 1]**2
points = points[radius <= 1]
# set all to core value first
values = np.ones_like(points[:, 0])*self.value
# then set value to face value if |z| > face/(length/2))
values[abs(points[:, 2]) > self.length/(self.length + 2*self.thick_face)] = self.value_face
# finally set value to rim value if outside the core ellipse
radius = (points[:, 0]**2*(1 + self.thick_rim/self.ra)**2
+ points[:, 1]**2*(1 + self.thick_rim/self.rb)**2)
values[radius>1] = self.value_rim
return values, self._adjust(self._scale*points)
class TruncatedSphere(Shape):
"""
Sphere of radius r, with points z < -h truncated.
"""
def __init__(self, r, h, value, center=(0, 0, 0), orientation=(0, 0, 0)):
self.value = np.asarray(value)
self.rotate(*orientation)
self.shift(*center)
self.r, self.h = r, h
# Max distance between points in the shape is the maximum diameter
self.r_max = 2*r if h >= 0 else 2*
|
sqrt(r**2 - h**2)
|
numpy.sqrt
|
#load data
import numpy as np
data = np.load("GOH-derivs.npz")
invs = data['invs']
dWdI1 = data['dWdI1']
dWdI4 = data['dWdI4']
stretches = data['stretches']
stress = data['stress']
nprotocol = 8
def stress_from_inv(dWdI1,dWdI4):
n = len(dWdI1)
S = []
M = np.array([1.,0.,0.])
for i in range(n):
F = np.array([[stretches[i,0],0,0],[0,stretches[i,1],0],[0,0,1./stretches[i,0]/stretches[i,1]]])
s = 2*dWdI1[i]*F@(F.T) + 2*dWdI4[i]*F@(np.outer(M,M))@(F.T)
s -= s[2,2]*np.eye(3)
S += [s[0,0],s[1,1]]
return S
################## GP Part ####################
import torch
import sys
from os.path import dirname, abspath
sys.path.insert(0,dirname(dirname(dirname(abspath(__file__)))))
import gpytorch
train_x = invs.copy()
train_x[:,0] -= 3.
train_x[:,1] -= 1.
#train_x[:,1] *= 10.
#train_x[:,0] *= 10.
train_x = torch.from_numpy(train_x).float()
train_y = torch.vstack((torch.atleast_2d(torch.from_numpy(dWdI1)), torch.atleast_2d(torch.from_numpy(dWdI4)))).T.reshape(-1).float()
train_y += 0.02 * torch.randn(train_y.shape) #add noise
ndata,ndim = train_x.shape
train_index = torch.empty(ndata,ndim+1,dtype=bool)
train_index[:,0]=False
train_index[:,1:]=True
class LinearMeanGrad(gpytorch.means.Mean):
def __init__(self, input_size, batch_shape=torch.Size(), bias=True):
super().__init__()
self.dim = input_size
self.register_parameter(name="weights", parameter=torch.nn.Parameter(torch.randn(*batch_shape, input_size, 1)))
if bias:
self.register_parameter(name="bias", parameter=torch.nn.Parameter(torch.randn(*batch_shape, 1)))
else:
self.bias = None
def forward(self, x):
res = x.matmul(self.weights)
if self.bias is not None:
res = res + self.bias
dres = self.weights.expand(self.dim,x.shape[-2]).T #will not work for batches
return torch.hstack((res,dres))
class GPModelWithDerivatives(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPModelWithDerivatives, self).__init__(train_x, train_y, likelihood)
#self.mean_module = gpytorch.means.ConstantMeanGrad()
self.mean_module = LinearMeanGrad(2,bias=False)
self.base_kernel = gpytorch.kernels.RBFKernelGrad(ard_num_dims=2)
self.covar_module = gpytorch.kernels.ScaleKernel(self.base_kernel)
def forward(self, x, index):
index = index.reshape(-1)
mean_x = self.mean_module(x).reshape(-1)[index]
full_kernel = self.covar_module(x)
covar_x = full_kernel[..., index,:][...,:,index]
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood() # Value + x-derivative + y-derivative
model = GPModelWithDerivatives((train_x,train_index), train_y, likelihood)
# this is for running the notebook in our testing framework
import os
smoke_test = ('CI' in os.environ)
training_iter = 2 if smoke_test else 50
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.05) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for i in range(training_iter):
optimizer.zero_grad()
output = model(train_x,train_index)
loss = -mll(output, train_y)
loss.backward()
print("Iter %d/%d - Loss: %.3f lengthscales: %.3f, %.3f noise: %.3f" % (
i + 1, training_iter, loss.item(),
model.covar_module.base_kernel.lengthscale.squeeze()[0],
model.covar_module.base_kernel.lengthscale.squeeze()[1],
model.likelihood.noise.item()
))
optimizer.step()
torch.save(model.state_dict(), 'model_state.pth')
# Set into eval mode
model.eval()
likelihood.eval()
predictions = likelihood(model(train_x,train_index))
means = predictions.mean.detach().numpy()
dWdI1p = means[::2]
dWdI4p = means[1::2]
stressp = np.array(stress_from_inv(dWdI1p,dWdI4p)).reshape(-1,2)
################# Plotting to compare ##################
from matplotlib import pyplot as plt
#plt.plot(invs[:,0]-3,dWdI1,'o')
#plt.plot(invs[:,1]-1,dWdI4,'o')
#plt.plot(invs[:,0].reshape(nprotocol,-1).T-3,dWdI1p.reshape(nprotocol,-1).T)
#plt.plot(invs[:,1].reshape(nprotocol,-1).T-1,dWdI4p.reshape(nprotocol,-1).T)
#plt.show()
#plt.plot(stress[:,0].reshape(nprotocol,-1).T)
#plt.plot(stressp[:,0].reshape(nprotocol,-1).T)
#plt.show()
#plt.plot(stress[:,1].reshape(nprotocol,-1).T)
#plt.plot(stressp[:,1].reshape(nprotocol,-1).T)
#plt.show()
from mpl_toolkits.mplot3d import Axes3D
# Initialize plot
fig = plt.figure(figsize=(10, 6))
ax1 = fig.add_subplot(1,2, 1, projection='3d')
ax2 = fig.add_subplot(1,2, 2, projection='3d')
color_idx =
|
np.linspace(0, 1, nprotocol)
|
numpy.linspace
|
import numpy as np
import pandas as pd
from scipy.fftpack import fft, ifft, fftfreq
import itertools
import copy
from pathlib import Path
from ..core import split_using_sliding_window, split_using_target
from typing import Union, List, Dict, Tuple
from .base import BaseDataset
__all__ = ['HHAR', 'load']
# Meta Info
# ATTRIBUTES = ['acc','gyro']
DEVICE_TYPES = ['Phone', 'Watch']
SENSOR_TYPES = ['accelerometer', 'gyroscope']
SUBJECTS = dict(zip(['a','b','c','d','e','f','g','h','i'], list(range(9))))
ACTIVITIES = {
'bike': 1,
'sit': 2,
'stand': 3,
'walk': 4,
'stairsup': 5,
'stairsdown': 6,
'null': 0,
}
PHONE_DEVICES = {
'nexus4_1': 0, 'nexus4_2': 1,
's3_1': 2, 's3_2': 3,
's3mini_1': 4,'s3mini_2': 5,
'samsungold_1': 6, 'samsungold_2': 7,
}
WATCH_DEVICES = {
'gear_1': 8, 'gear_2': 9,
'lgwatch_1': 10, 'lgwatch_2': 11,
}
Column = [
'Index',
'Arrival_Time',
'Creation_Time',
'x',
'y',
'z',
'User',
'Model', 'Device', 'gt',
]
def __id2act(act_id:int) -> str:
return ACTIVITIES[act_id]
def __act2id(act:str) -> int:
if act in ACTIVITIES:
return ACTIVITIES[act]
raise ValueError(f'Unknown activity ({act})')
def __name2id(name:str, name_list:Dict[str, int]) -> int:
if name in name_list:
return name_list[name]
raise ValueError(f'Unknown name ({name})')
class HHAR(BaseDataset):
"""HHAR
HHAR <https://archive.ics.uci.edu/ml/datasets/Heterogeneity+Activity+Recognition> データセットの行動分類を行うためのローダクラス
"""
def __init__(self, path:Union[str,Path]):
"""
Parameters
----------
path : Union[str,Path]
Directory Path that include csv file: Phones_[accelerometer,gyroscope].csv, Watch_[accelerometer,gyroscope].csv
"""
if type(path) is str:
path = Path(path)
super().__init__(path)
def load(self, sensor_types:Union[List[str], str], device_types:Union[List[str], str], window_size:int, stride:int, subjects:Union[list, None]=None):
"""HHARの読み込みとsliding-window
Parameters
----------
sensor_type:
センサタイプ."acceleromter" or "gyroscope" or ["acceleromter", "gyroscope"]
[Caution!!!]
"accelerometer"と"gyroscope"の同時指定は非推奨.
device_type:
デバイスタイプ."Phone" or "Watch"
window_size: int
フレーム分けするサンプルサイズ
stride: int
ウィンドウの移動幅
subjects:
ロードする被験者を指定
Returns
-------
(x_frames, y_frames): tuple
sliding-windowで切り出した入力とターゲットのフレームリスト
y_framesはデータセット内の値をそのまま返すため,分類で用いる際はラベルの再割り当てが必要となることに注意
y_framesのshapeは(*, 4)であり,axis=1ではUser, Model, Device, Activityの順でデータが格納されている.
"""
if isinstance(sensor_types, str):
sensor_types = [sensor_types]
if not isinstance(sensor_types, list):
raise TypeError('expected type of "sensor_types" is str or list, but got {}'.format(type(sensor_types)))
if isinstance(device_types, str):
sensor_types = [sensor_types]
if not isinstance(device_types, list):
raise TypeError('expected type of "device_types" is str or list, but got {}'.format(type(device_types)))
segments = []
for dev_type in device_types:
segments += load(self.path, sensor_type=sensor_types, device_type=dev_type)
n_ch = len(sensor_types)
segments = [seg.reshape(-1, n_ch*10) for seg in segments]
frames = []
for seg in segments:
fs = split_using_sliding_window(
np.array(seg), window_size=window_size, stride=stride,
ftrim=0, btrim=0,
return_error_value=None)
if fs is not None:
frames += [fs]
else:
# print('no frame')
pass
frames = np.concatenate(frames)
N, ws, _ = frames.shape
frames = frames.reshape([N, ws, n_ch, 10])
x_frames = frames[:, :, :, 3:6].reshape([N, ws, -1])
y_frames = frames[:, 0, 0, 6:]
# subject filtering
if subjects is not None:
flags = np.zeros(len(x_frames), dtype=np.bool)
for sub in subjects:
flags = np.logical_or(flags, y_frames[:, 0] == SUBJECTS[sub])
x_frames = x_frames[flags]
y_frames = y_frames[flags]
return x_frames, y_frames
def _load_as_dataframe(path:Path, device_type:str):
df = pd.read_csv(path)
df['gt'] = df['gt'].fillna('null')
df['gt'] = df['gt'].map(__act2id)
df['User'] = df['User'].map(lambda x: __name2id(x, SUBJECTS))
dev_list = copy.deepcopy(PHONE_DEVICES) if device_type == DEVICE_TYPES[0] else copy.deepcopy(WATCH_DEVICES)
df['Device'] = df['Device'].map(lambda x: __name2id(x, dev_list))
return df
def _load_segments(path:Path, sensor_type:str, device_type:str):
"""
HHARデータセットでは被験者は決められたスクリプトに従って行動しそれに伴うセンサデータを収集している.
被験者はHHARで採用されたすべてのデバイスでデータの収集を行っているため,
下記のコードのように被験者とデバイスのすべての組み合わせでセグメントに分割することで,
加速度センサとジャイロセンサを連結しやすくしている.
"""
df = _load_as_dataframe(path, device_type)
if device_type == DEVICE_TYPES[0]:
n_dev, base = 8, 0
elif device_type == DEVICE_TYPES[1]:
n_dev, base = 4, 8
segments = {}
# split by subjects
splited_sub = split_using_target(df.to_numpy(), df['User'].to_numpy())
for sub in range(9):
segments[sub] = {}
if sub in splited_sub:
assert len(splited_sub[sub]) == 1, 'detect not expected pattern'
seg = splited_sub[sub][0]
# split by device
splited_sub_dev = split_using_target(seg, seg[:, -2])
else:
assert True, 'detect not expected pattern'
splited_sub_dev = {}
# PhoneとWatchで最小のIDが異なる
for dev in range(base, base+n_dev):
segments[sub][dev] = {}
if dev in splited_sub_dev:
assert len(splited_sub_dev[dev]) == 1, 'detect not expected pattern, ({})'.format(len(splited_sub_dev[dev]))
seg = splited_sub_dev[dev][0]
splited_sub_dev_act = split_using_target(seg, seg[:, -1])
else:
assert True, 'detect not expected pattern'
splited_sub_dev_act = {}
for act in range(0, 7):
if act in splited_sub_dev_act:
segments[sub][dev][act] = splited_sub_dev_act[act]
else:
segments[sub][dev][act] = None
return segments
def _lpf(y:np.ndarray, fpass:int, fs:int) -> np.ndarray:
"""low pass filter
Parameters
----------
y: np.ndarray
source data
fpass: float
catoff frequency
fs: int
sampling frequency
Returns
-------
np.ndarray:
filtered data
"""
yf = fft(y.copy())
freq = fftfreq(len(y), 1./fs)
idx = np.logical_or(freq > fpass, freq < -fpass)
yf[idx] = 0.
yd = ifft(yf)
yd = np.real(yd)
return yd
def _align_creation_time(seg_acc, seg_gyro):
if seg_acc[0, 2] == seg_gyro[0, 2]:
return seg_acc, seg_gyro
elif seg_acc[0, 2] < seg_gyro[0, 2]:
fst, snd = 0, 1
elif seg_acc[0, 2] > seg_gyro[0, 2]:
fst, snd = 1, 0
segs = [seg_acc, seg_gyro]
if segs[snd][0, 2] >= segs[fst][-1, 2]:
raise RuntimeError('This is bug. Probably, Correspondence between acc and gyro is invalid.')
for i in range(1, len(segs[fst])):
if segs[fst][i, 2] == segs[snd][0, 2]:
segs[fst] = segs[fst][i:]
return segs
elif segs[fst][i, 2] > segs[snd][0, 2]:
if segs[fst][i-1, 2] - segs[snd][0, 2] < segs[fst][i, 2] - segs[snd][0, 2]:
segs[fst] = segs[fst][i-1:]
else:
segs[fst] = segs[fst][i:]
return segs
def load(path:Path, sensor_type:Union[str, List[str]], device_type:str='Watch') -> List[np.ndarray]:
"""HHAR の加速度センサデータの読み込み関数
Parameters
----------
path: Path
HHARデータセットのディレクトリ
sensor_type:
"accelerometer" or "gyroscope" or ["accelerometer", "gyroscope"]
[Caution!!!]
"accelerometer"と"gyroscope"の同時指定は非推奨.
device_type:
"Watch" or "Phone"
Returns
-------
segments: list
被験者・デバイス・行動ラベルをもとにセグメンテーションされたデータ
See Also
--------
HHARデータセットは加速度センサデータとジャイロセンサデータを同時に入力として用いることをあまり想定していない(っぽい).
そのため,厳密に加速度とジャイロセンサデータの連結しよとすると非常にコストが高い.
そこで今回は精度を犠牲にして計算コストを下げている.
"""
if (device_type[0] == 'w') or (device_type[0] == 'W'):
device_type = DEVICE_TYPES[1]
else:
device_type = DEVICE_TYPES[0] # default
print('Device: {}'.format(device_type))
if isinstance(sensor_type, (list, tuple, np.ndarray)):
if len(sensor_type) == 0:
raise ValueError('specified at least one type')
if not (set(sensor_type) <= set(SENSOR_TYPES)):
raise ValueError('include unknown sensor type, {}'.format(sensor_type))
elif isinstance(sensor_type, str):
if sensor_type not in SENSOR_TYPES:
raise ValueError('unknown sensor type, {}'.format(sensor_type))
else:
raise TypeError('expected type of "sensor_type" is list, tuple, numpy.ndarray or str, but got {}'.format(type(sensor_type)))
print('Sensor type: {}'.format(sensor_type))
# prepare csv path
sensor_type_list = [sensor_type] if isinstance(sensor_type, str) else sensor_type
sensor_type_list = sorted(sensor_type_list) # accelerometer, gyroの順になることを保証する
if device_type == DEVICE_TYPES[0]:
csv_files = list(path / (f'Phones_{sensor_type}.csv') for sensor_type in sensor_type_list)
elif device_type == DEVICE_TYPES[1]:
csv_files = list(path / (f'Watch_{sensor_type}.csv') for sensor_type in sensor_type_list)
if len(sensor_type_list) == 1:
df = _load_as_dataframe(csv_files[0], device_type)
domains = (df['gt'] + df['User']*10 + df['Device']*100).to_numpy()
segments = split_using_target(df.to_numpy(), domains)
segments = list(itertools.chain(*list(segments.values())))
elif set(sensor_type_list) == set(SENSOR_TYPES):
segs = [_load_segments(csv_path, sensor_type, device_type) for sensor_type, csv_path in zip(sensor_type_list, csv_files)]
segs_acc_sub_dev_act, segs_gyro_sub_dev_act = segs
if device_type == DEVICE_TYPES[0]:
n_dev, base = 8, 0
elif device_type == DEVICE_TYPES[1]:
n_dev, base = 4, 8
# concat acc and gyro
segments = []
patterns = list(itertools.product(range(9), range(base, base+n_dev), range(7))) # subject, device, activity
for sub, dev, act in patterns:
s_accs, s_gyros = segs_acc_sub_dev_act[sub][dev][act], segs_gyro_sub_dev_act[sub][dev][act]
# このパターンは主に欠損値でヒット
if s_accs is None or s_gyros is None:
# print(' > [skip] ({})-({})-({}), seg_acc = {}, seg_gyro = {}'.format(sub, dev, act, type(s_accs), type(s_gyros)))
continue
# このパターンでは加速度とジャイロでセグメント数がずれているときにヒット
# ただし,セグメント数のずれはわずかなラベリングのずれによる者であるので大部分の対応関係は保たれているはず
if len(s_accs) != len(s_gyros):
# print(' > [Warning] length of s_accs and s_gyros are different, {}, {}'.format(len(s_accs), len(s_gyros)))
continue
for s_acc, s_gyro in zip(s_accs, s_gyros):
# s3_1とs3_2は加速度センサとジャイロセンサのサンプリング周波数が異なるためダウンサンプリングで対応
# このパターンはcreation_timeのずれが大きいためこれを許容するかは検討の余地がある
if device_type == DEVICE_TYPES[0] and s_acc[0, -2] in [2, 3]:
assert s_gyro[0, -2] in [2, 3], 'this is bug'
# s_gyro[:, 3] = _lpf(s_gyro[:, 3], fpass=150, fs=200)
# s_gyro[:, 4] = _lpf(s_gyro[:, 4], fpass=150, fs=200)
# s_gyro[:, 5] = _lpf(s_gyro[:, 5], fpass=150, fs=200)
# s_gyro = s_gyro[::2]
continue
try:
s_acc, s_gyro = _align_creation_time(s_acc, s_gyro)
except RuntimeError as e:
# Watchではなぜかこれに引っかかるsegmentが多数ある
print(f'>>> {e}')
continue
segs = [s_acc, s_gyro]
min_seg_idx = 0 if len(s_acc) - len(s_gyro) <= 0 else 1
other_idx = (min_seg_idx + 1) % 2
min_len_seg = len(segs[min_seg_idx])
# segmentの長さを比較
# print('diff of length of segments: {}, {} ns'.format(len(segs[0]) - len(segs[1]), (segs[0][0, 2]-segs[1][0, 2])*1e-9))
# 各セグメントの先頭のcreation timeにほとんど差がないため,
# 先頭をそろえて長さを短いほうに合わせることで対応
segs[other_idx] = segs[other_idx][:min_len_seg]
# 先頭のcreation timeを比較
# d = (segs[min_seg_idx][0, 2] - segs[other_idx][0, 2]) * 1e-9
# if abs(d) > 1e-3:
# print('diff of creation time in front: {} ns'.format(d))
segs = np.concatenate([
|
np.expand_dims(segs[0], 1)
|
numpy.expand_dims
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 14:46:21 2021
@author: michael
"""
# Environment
from abc import ABC
import gym
from gym import spaces
import numpy as np
from scipy.integrate import solve_ivp
from scipy.stats import bernoulli
from collections import deque
from gym.utils import seeding
import torch
import argparse
class CancerControl(gym.Env, ABC):
def __init__(self, patient, t=0.):
# patient is a dictionary: the patient specific parameters:
# A, alpha, K, pars, initial states and the terminal states of original drug administration scheme
# time step: one day
self.t = t
self.A, self.K, self.pars, self.init_states, self.terminate_states, self.weight, \
self.base, self.m1, self.m2, drug_decay, drug_length = patient
self.terminate_states[0] = 5e+8
# the terminal state of sensitive cancer cell is replaced by the capacity of sensitive cancer cell
# note that the terminal state of the AI cancer cell is the mainly focus in our problem
self.gamma = 0.99 # RL discount factor
# observation space is a continuous space
low = np.array([0., 0., 0., -1, -1, -1, 0], dtype=np.float32)
high = np.array([1, 1, 1, 1, 1, 1, 1], dtype=np.float32)
self.observation_space = spaces.Box(low=low, high=high)
self.treatOnOff = 1 # default On and changes in every step function
self.cpa = np.array([0, 50, 100, 150, 200])
self.leu = np.array([0, 7.5])
self._action_set = np.stack((np.tile(self.cpa, 2), np.sort(self.leu.repeat(5))), axis=1)
self.action_space = spaces.Discrete(10)
self.steps = 0
self.penalty_index = np.zeros(2, dtype = np.float)
self.reward_index = 0
# the first two denotes the drug dosage, the last two denotes the duration time of each drug's treatment,
# the longest duration for the first line drug is 300 weeks,
# for second line drug, the longest duration is 12 months
# Note that for LEU, the total dosage should be 7.5*8 ml for one treatment duration
pp = (self.A, self.K, self.pars)
self.cancerode = CancerODEGlv(*pp)
self.dose = np.zeros(2)
self.normalized_coef = np.append(self.K, self.K[0] / (1.1 * 5) * self.cancerode.cell_size * 22.1).reshape(-1)
# self._dose = np.zeros(2)
self.dosage_arr = []
self.leu_on = False
self._action = None
self.drug_penalty_decay = drug_decay
self.drug_penalty_length = drug_length
self.drug_penalty_index = 0
self.rest_decay = 0.95
self.max_episodes_steps = 120
self.metastasis_ai_deque = deque(maxlen=121)
self.metastasis_ad_deque = deque(maxlen=121)
def CancerEvo(self, dose):
# get the cell counts and the PSA level of each day
# 28 days for one action
###################
ts = np.linspace(start=self.t, stop=self.t + 28 - 1, num=28, dtype=np.int)
dose_leu = dose[1]
temp = np.zeros(ts.shape[0], dtype=np.float)
if dose_leu != 0:
if not self.leu_on:
temp[0:7 * 1] = - 3.75 / 6 * np.linspace(0, 7, 7, endpoint= False)
temp[(7 * 1):(7 * 3)] = (7.5 + 3.75) / (20 - 6) * np.linspace(7, 21, 14, endpoint= False) + (
7.5 - (7.5 + 3.75) / (20 - 6) * 20)
temp[(7 * 3):] = 7.5
else:
temp[:] = 7.5
else: # current dosage is 0
temp[:] = 0
drug = np.repeat(dose.reshape(1, -1), ts.shape[0], axis=0)
drug[:, 1] = temp
self.cancerode.ts = ts
# normalization the drug concentration
self.cancerode.drug = drug * np.array([1 / 200, 1 / 7.5])
y0 = self.states.copy()
# dose = torch.from_numpy(dose_)
t_interval = (int(self.t), int(self.t) + 28 - 1)
out = solve_ivp(self.cancerode.forward, t_span=t_interval, y0=y0, t_eval=ts, method="DOP853") #, atol=1e-7, rtol=1e-5)
# out = Solve_ivp.solver(self.cancerode.forward, ts = ts, y0 = y0, params = (), atol=1e-08, rtol = 1e-05)
dy = self.cancerode.forward(t = int(self.t) + 28 - 1, y = out.y[:,-1].reshape(-1))
return out.y, dy
def step(self, action):
if self.steps == 0:
self.states = self.init_states.copy()
self.penalty_index = np.zeros(2, dtype=np.float)
self.reward_index = 0
x0, _ = self.init_states[0:2].copy(), self.init_states[2].copy()
self.leu_on = False
# By taking action into next state, and obtain new state and reward
# the action is the next month's dosage
# update states
phi0, _ = self._utilize(self.t, action)
dose_ = self._action_set[action]
self.dose = dose_
_dose = self.dosage_arr[-1] if self.steps > 0 else np.zeros(2, dtype=np.float)
_dose_leu = _dose[1]
self.leu_on = bool(_dose_leu)
# penalty index for the continuous drug administration, and reward index for no-drug administration
if (dose_ == 0).all() and (self.penalty_index >= 1).all():
self.reward_index += 1
self.penalty_index -= np.ones(2, dtype=np.float)
if dose_[0] != 0 and dose_[1] == 0:
self.penalty_index[0] += 1.
if self.penalty_index[1] >= 1:
self.penalty_index[1] -= 1.
self.reward_index = 0
if dose_[1] != 0 and dose_[0] == 0:
self.penalty_index[1] += 1.
if self.penalty_index[0] >= 1:
self.penalty_index[0] -= 1
self.reward_index = 0
if (dose_ != 0).all():
self.reward_index = 0
self.penalty_index += np.ones(2, dtype=np.float)
if bool(action):
self.drug_penalty_index += 1
else:
self.drug_penalty_index -= 1 if self.drug_penalty_index > 0 else 0
evolution, df = self.CancerEvo(dose_)
self.states = evolution[:, -1].clip(
|
np.array([10, 10, 0])
|
numpy.array
|
import os
import numpy as np
from load_error_files import print_error
rtol = 1e-6
atol = 1e-10
def updater(err_dict, err, filename=None, mech_info={}):
def __get_size(name):
if 'rop' in name:
if 'net' in name or 'fwd' in name:
return mech_info['n_reactions']
else:
return mech_info['n_reversible']
elif 'wdot' in name:
return mech_info['n_species']
elif 'phi' in name:
return mech_info['n_species'] + 1
for name in err:
if 'value' in name or 'component' in name or 'store' in name:
continue
errs = err[name]
values = err[name + '_value']
errs = errs / (atol + rtol * np.abs(values))
if ('rop_fwd' == name or 'rop_rev' == name) and 'store' in name and np.any(
errs > 1e-4):
from time import ctime
print(filename, ctime(os.path.getmtime(filename)))
print('Bad data detected...')
precs = None
if 'rop_net' in name:
# calculate the precision norms
precs = err['rop_component'] / (atol + rtol *
|
np.abs(values)
|
numpy.abs
|
import json
import math
from collections import deque
from typing import List
import numpy as np
import requests
from multi_agents.plastic.plastic_client.policy import PolicyClient
from multi_agents.dqn_agent.replay_buffer import Transition
from multi_agents import config
class BehaviourDistClient:
INITIAL_VALUE = 1.0
""" Map from probabilities to different policies"""
def __init__(self, policies: List[PolicyClient], num_features: int,
agent_type: str, history_len: int = 1,
correct_policy: str = None, model_type: str = "stochastic"):
# Attributes:
self.num_teams = len(policies)
self._policies = np.array(policies)
self._team_names = np.array([policy.team_name for policy in policies])
self._probabilities = np.array([self.INITIAL_VALUE for _ in policies])
self.agent_type = agent_type
# Memory Bounded Agent:
if agent_type == config.AGENT_TYPE_MEMORY_BOUNDED:
self._transitions_history = deque(maxlen=history_len)
# n - bounds the maximum allowed loss
self.n = 1 / history_len
# Plastic Agent:
elif agent_type == config.AGENT_TYPE_PLASTIC:
self._transitions_history = None
# n - bounds the maximum allowed loss (Plastic used 0.1)
self.n = 0.1
# Random Policy:
elif agent_type == config.AGENT_TYPE_RANDOM_POLICY:
self._transitions_history = None
self.n = 0
# Correct Policy
else:
self._transitions_history = None
self.n = None
self._probabilities = np.array(
[1 if t == correct_policy else 0 for t in self._team_names])
print(self._probabilities)
print(correct_policy, self._team_names)
# Max variation of features (used as norm):
max_arr = np.array([1]*num_features)
min_arr = np.array([-1]*num_features)
self.features_max_variation = np.linalg.norm(max_arr-min_arr)
# Beliefs mode:
if model_type in ["stochastic", "adversarial"]:
self.model_type = model_type
else:
raise ValueError("Mode:type. Expected (stochastic, adversarial)")
@property
def team_names(self) -> np.ndarray:
return self._team_names
def _is_stochastic(self) -> bool:
return self.model_type == "stochastic"
def _is_adversarial(self) -> bool:
return self.model_type == "adversarial"
def _set_policy(self, team_name: str, policy: PolicyClient):
idx = np.where(self._team_names == team_name)[0][0]
self._policies[idx] = policy
def _request_similarity(self, transition: Transition) -> dict:
data = {"state": transition.obs.tolist(),
"next_state": transition.new_obs.tolist()}
response = requests.post(url=config.URL_POST_SIMILARITY,
json=json.dumps(data))
return response.json()
def _normalize_probabilities(self):
# Convert values to the sum of 1:
self._probabilities /= self._probabilities.sum()
def _calc_similarity_array(self, transition: Transition) -> np.ndarray:
"""
Returns the likelihood of all the models being the one which the agent
is interacting with.
The likelihood is a float value [0, 1]. The lowest value, the similar
it is.
"""
dist_list = []
sim_dict = self._request_similarity(transition=transition)
for team in self.team_names:
dist_list.append(sim_dict[team])
similarity_array = np.array(dist_list)
# Normalize values:
similarity_array = similarity_array / self.features_max_variation
return similarity_array
def _baysian_update(self, transition: Transition, n: float = 0.1):
""" polynomial weights algorithm from regret minimization
function UpdateBeliefs(BehDistr, s, a): 15:
for (π,m) ∈ BehDistr do
loss = 1 −P(a|m, s)
BehDistr(m)∗ = (1 − ηloss)
Normalize BehDistr
return BehDistr
"""
# Get similarity between all the available policies:
similarity_array = self._calc_similarity_array(transition)
# Invert values: (the most similar is near 1, else near 0)
likelihood_array = 1 - similarity_array
# Re-calculate probabilities:
for idx in range(len(self._probabilities)):
# loss = 1 −P(a|m, s)
loss = 1 - likelihood_array[idx]
# BehaviorDistr(m)∗=( 1−η.loss):
self._probabilities[idx] *= (1 - (n * loss))
def _adversarial_update(self, transition: Transition, n: float = 0.1):
""" Adversarial Plastic Policy """
# Get similarity between models. The lowest value, the similar it is:
similarity_array = self._calc_similarity_array(transition)
# Re-calculate probabilities:
for idx, prob in enumerate(self._probabilities):
# £.di
var = n * similarity_array[idx]
# Wi(t+1) = Wi(t).exp(-£.di):
self._probabilities[idx] = prob * math.exp(-var)
def _calc_new_prob(self, transition: Transition):
"""
@param transition:
"""
if self._is_stochastic():
return self._baysian_update(transition)
elif self._is_adversarial():
return self._adversarial_update(transition)
else:
raise ValueError()
def _select_stochastic_action(self, s: np.ndarray, legal_actions: list):
policy: PolicyClient = self.get_best_policy()
q_predict = policy.dqn.predict(s)[0]
# Set illegal actions to zero:
for i in range(len(q_predict)):
if i not in legal_actions:
q_predict[i] = -2000
# Greedy choice:
max_list = np.where(q_predict == q_predict.max())
if len(max_list[0]) > 1:
action = np.random.choice(max_list[0])
else:
action = np.argmax(q_predict)
return int(action)
def _select_adversarial_action(self, s: np.ndarray, legal_actions: list):
""" Adversarial Plastic Policy """
# Get Advice vectors (£) for each team:
advice_vectors = []
for policy in self._policies:
q_predict = policy.dqn.predict(s)[0]
# Add to advice vectors:
advice_vectors.append(q_predict)
# Get Wt:
total_probabilities = self._probabilities.sum()
# Calculate actions probabilities:
num_actions = len(advice_vectors[0])
act_probs = np.array([-2000] * num_actions)
for action in legal_actions:
action_teams_prob = 0
for t_idx, t_prob in enumerate(self._probabilities):
action_prob = t_prob * advice_vectors[t_idx][action]
action_teams_prob += (action_prob / total_probabilities)
act_probs[action] = action_teams_prob
# Greedy choice:
max_list = np.where(act_probs == act_probs.max())
if len(max_list[0]) > 1:
action = np.random.choice(max_list[0])
else:
action = np.argmax(act_probs)
return int(action)
def get_probability(self, team_name: str) -> float:
idx = np.where(self._team_names == team_name)[0][0]
return self._probabilities[idx]
def get_probabilities_dict(self) -> dict:
aux_dict = {}
for team_name in self.team_names:
aux_dict[team_name] = self.get_probability(team_name)
return aux_dict
def update_beliefs(self, transition: Transition):
"""
@param transition: Transition
@return behaviour_dist: updated probability distr
"""
num_teams = len(self._team_names)
# RANDOM Policy:
if self.agent_type == config.AGENT_TYPE_RANDOM_POLICY:
self._probabilities = np.random.random(num_teams)
# Correct Policy:
elif self.agent_type == config.AGENT_TYPE_CORRECT_POLICY:
pass
# Memory Bounded:
elif self.agent_type == config.AGENT_TYPE_MEMORY_BOUNDED:
self._transitions_history.append(transition)
self._probabilities =
|
np.array([self.INITIAL_VALUE] * num_teams)
|
numpy.array
|
'''Class JuMEG_Epocher_Events
Class to extract event/epoch information and save to hdf5
extract mne-events per condition, save to HDF5 file
----------------------------------------------------------------
Author:
--------
<NAME> <<EMAIL>>
Updates:
----------------------------------------------------------------
update: 19.06.2018
complete new, support for IOD and eyetracking events
----------------------------------------------------------------
Example:
--------
#--- example via obj:
from jumeg.epocher.jumeg_epocher import jumeg_epocher
from jumeg.epocher.jumeg_epocher_epochs import JuMEG_Epocher_Epochs
from jumeg.jumeg_base import jumeg_base as jb
#--
jumeg_epocher.template_path ='.'
jumeg_epocher.verbose = verbose
#---
jumeg_epocher_epochs = JuMEG_Epocher_Epochs()
#---
fname = test.fif
raw = None
condition_list = ["Cond1","Condi2"]
#--- events: finding events, store into pandas dataframe ansd save as hdf5
#--- parameter for apply_events_to_hdf
evt_param = { "condition_list":condition_list,
"template_path": template_path,
"template_name": template_name,
"verbose" : verbose
}
(_,raw,epocher_hdf_fname) = jumeg_epocher.apply_events(fname,raw=raw,**evt_param)
#--- epochs
ep_param={
"condition_list": condition_list,
"template_path" : template_path,
"template_name" : template_name,
"verbose" : verbose,
"parameter":{
"event_extention": ".eve",
"save_condition":{"events":True,"epochs":True,"evoked":True}
}}
#---
print "---> EPOCHER Epochs"
print " -> File : "+ fname
print " -> Epocher Template: "+ template_name+"\n"
jumeg_epocher.apply_epochs(fname=fname,raw=raw,**ep_param)
'''
import sys,logging
import numpy as np
import pandas as pd
import mne
from copy import deepcopy
from jumeg.base.jumeg_base import jumeg_base,JuMEG_Base_Basic
from jumeg.epocher.jumeg_epocher_hdf import JuMEG_Epocher_HDF
logger = logging.getLogger('jumeg')
__version__="2020.01.07.001"
pd.set_option('display.precision', 3)
class JuMEG_Epocher_Channel_Baseline(object):
"""
base class for baseline dict definitions & properties
"baseline" :{"method":"avg","type_input":"iod_onset","baseline": [null,0]}
ToDO: use __slots__?
"""
def __init__(self,parameter=None,label="baseline"):
super(JuMEG_Epocher_Channel_Baseline,self).__init__()
self._param = parameter
self._label = label
#---
def _get_param(self,key=None):
try:
if key in self._param[self._label]:
return self._param[self._label][key]
except:
pass
return None
#---
def _set_param(self,key=None,val=None):
self._param[self._label][key] = val
#---baseline type
@property
def method(self): return self._get_param("method")
@method.setter
def method(self,v): self._set_param("method",v)
#---baseline type
@property
def type_input(self): return self._get_param("type_input")
@type_input.setter
def type_put(self,v): self._set_param("type_input",v)
#---baseline
@property
def baseline(self): return self._get_param("baseline")
@baseline.setter
def baseline(self,v): self._set_param("baseline",v)
#---baseline
@property
def onset(self):
if type( self._get_param("baseline") ) is list: return self.baseline[0]
#---baseline
@property
def offset(self):
if type( self._get_param("baseline") ) is list: return self.baseline[1]
#---
def info(self):
"""
logs parameter with logger.info
:return:
"""
logger.info( jumeg_base.pp_list2str(self._param) )
class JuMEG_Epocher_Basic(JuMEG_Base_Basic):
"""
base class for definitions & properties
"""
def __init__(self):
super().__init__()
self._rt_type_list = ['MISSED', 'TOEARLY', 'WRONG', 'HIT']
self._data_frame_stimulus_cols = ['id','onset','offset']
self._data_frame_response_cols = ['rt_type','rt','rt_id','rt_onset','rt_offset','rt_index','rt_counts','bads','selected','weighted_selected']
self._stat_postfix = '-epocher-stats.csv'
self._idx_bad = -1
#---
@property
def idx_bad(self): return self._idx_bad
#---
@property
def data_frame_stimulus_cols(self): return self._data_frame_stimulus_cols
@data_frame_stimulus_cols.setter
def data_frame_stimulus_cols(self,v): self._data_frame_stimulus_cols = v
#---
@property
def data_frame_response_cols (self): return self._data_frame_response_cols
@data_frame_response_cols.setter
def data_frame_response_cols(self,v): self._data_frame_response_cols = v
#--- rt_type list: 'MISSED', 'TOEARLY', 'WRONG', 'HIT'
@property
def rt_type_list(self): return self._rt_type_list
#--- rt type index: 'MISSED', 'TOEARLY', 'WRONG', 'HIT'
def rt_type_as_index(self,s):
return self._rt_type_list.index( s.upper() )
@property
def idx_missed(self): return self._rt_type_list.index( 'MISSED')
@property
def idx_toearly(self): return self._rt_type_list.index( 'TOEARLY')
@property
def idx_wrong(self): return self._rt_type_list.index( 'WRONG')
@property
def idx_hit(self): return self._rt_type_list.index( 'HIT')
#---
@property
def data_frame_stimulus_cols(self): return self._data_frame_stimulus_cols
@data_frame_stimulus_cols.setter
def data_frame_stimulus_cols(self,v): self._data_frame_stimulus_cols = v
#---
@property
def data_frame_response_cols(self): return self._data_frame_response_cols
@data_frame_response_cols.setter
def data_frame_response_cols(self,v): self._data_frame_response_cols = v
#--- events stat file (output as csv)
@property
def stat_postfix(self): return self._stat_postfix
@stat_postfix.setter
def stat_postfix(self, v): self._stat_postfix = v
class JuMEG_Epocher_Events_Channel_BaseBase(object):
""" base class to handel epocher template channel parameter
Parameter:
----------
label : first-level key in dictionary <None>
parameter: epocher template parameter as dictionary <None>
Example:
--------
iod_parameter= {"marker" :{"channel":"StimImageOnset","type_input":"img_onset","prefix":"img"},
"response":{"matching":true,"channel":"IOD","type_input":"iod_onset","prefix":"iod"}
}
response = JuMEG_Epocher_Events_Channel_Base(label="response",parameter= iod_parameter])
print respone.channel
>> IOD
"""
def __init__(self,label=None,parameter=None):
self.label = label
self._param = parameter
#---
def get_channel_parameter(self,key=None,prefix=None):
try:
if prefix:
k = prefix + '_' + key
return self._param[self.label][k]
else:
return self._param[self.label][key]
except:
pass
return None
#return self._param
#---
def set_channel_parameter(self,key=None,val=None,prefix=None):
if key:
if prefix:
self._param[self.label][prefix + '_' + key] = val
else:
self._param[self.label][key] = val
#---
@property
def matching(self):
return self.get_channel_parameter(key="matching")
@matching.setter
def matching(self,v):
self.get_channel_parameter(key="matching",val=v)
#---
@property
def channel(self):
return self.get_channel_parameter(key="channel")
@channel.setter
def channel(self,v):
self.set_channel_parameter(key="channel",val=v)
#---
@property
def prefix(self): return self.get_channel_parameter(key="prefix")
@prefix.setter
def prefix(self,v): self.set_channel_parameter(key="prefix",val=v)
class JuMEG_Epocher_Events_Channel_Base(JuMEG_Epocher_Events_Channel_BaseBase):
""" base class to handel epocher template channel parameter
Parameter:
----------
label : first-level key in dictionary <None>
parameter: epocher template parameter as dictionary <None>
Example:
--------
iod_parameter= {"marker" :{"channel":"StimImageOnset","type_input":"img_onset","prefix":"img"},
"response":{"matching":true,"channel":"IOD","type_input":"iod_onset","prefix":"iod"}
}
response = JuMEG_Epocher_Events_Channel_Base(label="response",parameter= iod_parameter])
print respone.channel
>> IOD
"""
def __init__(self,label=None,parameter=None):
super().__init__()
self.label = label
self._param = parameter
#---
@property
def matching_type(self): return self.get_channel_parameter(key="matching_type")
@matching_type.setter
def matching_type(self,v): self.get_channel_parameter(key="matching_type",val=v)
#---
@property
def type_input(self): return self.get_channel_parameter(key="type_input")
@type_input.setter
def type_input(self,v): self.set_channel_parameter(key="type_input",val=v)
#---
@property
def type_offset(self): return self.get_channel_parameter(key="type_offset")
@type_offset.setter
def type_offset(self,v): self.set_channel_parameter(key="type_offset",val=v)
#---
@property
def type_output(self): return self.get_channel_parameter(key="type_output")
@type_output.setter
def type_output(self,v): self.set_channel_parameter(key="type_output",val=v)
#---type_result: "hit","wrong","missed"
@property
def type_result(self): return self.get_channel_parameter(key="type_result")
@type_result.setter
def type_result(self,v): self.set_channel_parameter(key="type_result",val=v)
#---
@property
def channel_parameter(self): return self._param[self.channel]
#---
@property
def parameter(self): return self._param
@parameter.setter
def parameter(self,v): self._param = v
#---
@property
def get_value_with_prefix(self,v): return self.get_channel_parameter(self,key=v,prefix=self.prefix)
#---
def get_parameter(self,k): return self._param[k]
#---
def set_parameter(self,k,v): self._param[k]=v
#---
@property
def time_pre(self): return self.get_parameter("time_pre")
@time_pre.setter
def time_pre(self,v): self.set_parameter("time_pre",v)
#---
@property
def time_post(self): return self.get_parameter("time_post")
@time_post.setter
def time_post(self,v): self.set_parameter("time_post",v)
#---
def info(self):
"""
logs parameter with logger.info
:return:
"""
logger.info( jumeg_base.pp_list2str(self._param) )
class JuMEG_Epocher_Events_Channel_IOD(object):
"""class to handel epocher template IOD parameter
Parameter:
----------
label : first-level key in dictionary <iod>
parameter: epocher template parameter as dictionary <None>
Return:
--------
None
Example:
--------
input json dictonary
parameter={
"default":{
"Stim":{ "events":{"stim_channel":"STI 014","output":"onset","consecutive":true,"min_duration":0.0005,"shortest_event":1,"mask":0},
"event_id":84,"and_mask":255,"system_delay_ms":0.0,"early_ids_to_ignore":null},
"IOD":{ "events":{ "stim_channel":"STI 013","output":"onset","consecutive":true,"min_duration":0.0005,"shortest_event":1,"mask":0},
"and_mask":128,"window":[0.0,0.2],"counts":"first","system_delay_ms":0.0,"early_ids_to_ignore":null,"event_id":128,"and_mask":255}
},
"cond1":{
"postfix":"cond1",
"info" :" my comments",
"iod" :{"marker" :{"channel":"StimImageOnset","type_input":"img_onset","prefix":"img"},
"response":{"matching":true,"channel":"IOD","type_input":"iod_onset","prefix":"iod"}},
"StimImageOnset" : {"event_id":94},
"IOD" : {"event_id":128}
}
}
iod = JuMEG_Epocher_Events_Channel_IOD(label="response",parameter= parameter["condi1"])
print iod.response.channel
>> IOD
"""
def __init__(self,label="iod",parameter=None):
#super(JuMEG_Epocher_Events_Channel_IOD,self).__init__(label="iod",meter=None)
self._info = None
self.label = label
self._param = parameter
self.response = JuMEG_Epocher_Events_Channel_Base(label="response",parameter=parameter["iod"])
self.marker = JuMEG_Epocher_Events_Channel_Base(label="marker", parameter=parameter["iod"])
#---
@property
def iod_matching(self): return self.response.matching
@iod_matching.setter
def iod_matching(self,v): self.response.matching = v
#---
@property
def info(self): return self._info
@info.setter
def info(self,v): self._info = v
#---
@property
def parameter(self): return self._param
@parameter.setter
def parameter(self,v):
self._param = v
self.response.parameter = v["iod"]
self.marker.parameter = v["iod"]
#---
@property
def response_channel_parameter(self): return self._param[self.response.channel]
#---
@property
def marker_channel_parameter(self): return self._param[self.marker.channel]
#---
def info(self):
"""
logs parameter with logger.info
:return:
"""
logger.info( jumeg_base.pp_list2str(self._param) )
class JuMEG_Epocher_Events_Channel(JuMEG_Epocher_Events_Channel_Base):
'''
class for marker and response channel
'''
def __init__(self,label=None,parameter=None):
super().__init__(label=label,parameter=parameter)
self._info = None
#---
@property
def info(self): return self._info
@info.setter
def info(self,v): self._info = v
#---
@property
def stim_channel(self): return self._param[ self.channel ]["events"]["stim_channel"]
@property
def stim_output(self): return self._param[ self.channel ]["events"]["output"]
class JuMEG_Epocher_Events_Window(JuMEG_Epocher_Events_Channel):
"""
sub class, wrapper for this dict
"window_matching":{
"matching": true,
"channel": "ET_Events",
"window_onset": "iod_onset",
"window_offset": "resp_onset",
"event_type": "onset",
"prefix": "wet"
},
Parameter:
----------
param: label
param: parameter
"""
def __init__(self,label=None,parameter=None):
super().__init__(label=label,parameter=parameter)
@property
def window_onset(self): return self.get_channel_parameter(key="window_onset")
@property
def window_offset(self): return self.get_channel_parameter(key="window_offset")
@property
def event_type(self): return self.get_channel_parameter(key="event_type")
#---
class JuMEG_Epocher_ResponseMatching(JuMEG_Epocher_Basic):
"""
CLS to do response matching
for help refer to JuMEG_Epocher_ResponseMatching.apply() function
"""
#---
def __init__(self,raw=None,stim_df=None,stim_param=None,stim_type_input="onset",stim_prefix="stim",resp_df=None,
resp_param=None,resp_type_input="onset",resp_type_offset="offset",resp_prefix="resp",verbose=False,debug=False):
super().__init__()
self.column_name_list_update = ['div','type','index','counts']
self.column_name_list_extend = ['bads','selected','weighted_selected']
self.raw = raw
self.verbose = verbose
self.debug = debug
self.stim_df_orig = stim_df
self.stim_df = None
self.stim_param = stim_param
self.stim_type_input = stim_type_input
self.stim_prefix = stim_prefix
#---
self.resp_df_orig = resp_df
self.resp_df = None
self.resp_param = resp_param
self.resp_type_input = resp_type_input
self.resp_type_offset = resp_type_offset
self.resp_prefix = resp_prefix
#---
self.DataFrame = None
#---
@property
def div_column(self): return self.resp_prefix+"-div"
def reset_dataframe(self,max_rows):
"""
reset output pandas dataframe
add stimulus,response data frame columns and extend with prefix
init with zeros x MAXROWS
Parameters
----------
max_rows: number of dataframe rows
Returns
-------
dataframe[ zeros x MaxRows ]
"""
col=[]
col.extend( self.stim_df.columns.tolist() )
col.extend( self.resp_df.columns.tolist() )
for key in self.column_name_list_update:
if self.resp_prefix:
k = self.resp_prefix +'_'+ key
else: k = key
if k not in col:
col.append( k )
for k in self.column_name_list_extend:
if k not in col:
col.append(k)
return pd.DataFrame(0,index=range(max_rows),columns=col)
def update(self,**kwargs):
""" update CLS parameter
Parameters
----------
raw : raw obj [None]
used to calc time-window-range in TSLs
stim_df : pandas.DataFrame [None]
stimulus channel data frame
stim_param : dict() [None]
stimulus parameter from template
stim_type_input : string ["onset"]
data frame column name to process as stimulus input
stim_prefix : string ["iod"]
stimulus column name prefix e.g. to distinguish between different "onset" columns
resp_df : pandas.DataFrame [None]
response channel data frame
resp_param : dict() [None]
response parameter from template
resp_type_input : string ["onset"]
data frame column name to process as response input
resp_prefix : string ["iod"]
response column name prefix e.g. to distinguish between different "onset" columns
verbose : bool [False]
printing information debug
"""
self.raw = kwargs.get("raw",self.raw)
self.stim_param = kwargs.get("stim_param",self.stim_param)
self.stim_type_input = kwargs.get("stim_type_input",self.stim_type_input)
self.stim_prefix = kwargs.get("stim_prefix",self.stim_prefix)
self.resp_param = kwargs.get("resp_param",self.resp_param)
self.resp_type_input = kwargs.get("resp_type_input",self.resp_type_input)
self.resp_type_offset= kwargs.get("resp_type_offset",self.resp_type_offset)
self.resp_prefix = kwargs.get("resp_prefix",self.resp_prefix)
if "verbose" in kwargs.keys():
self.verbose = kwargs.get("verbose")
if "stim_df" in kwargs.keys():
self.stim_df = None
self.stim_df_orig = kwargs.get("stim_df") # df
if "resp_df" in kwargs.keys():
self.resp_df = None
self.resp_df_orig = kwargs.get("resp_df") # df
self.DataFrame = None
if not self.resp_type_offset:
self.resp_type_offset = self.resp_type_input
def _ck_errors(self):
""" checking for errors
Returns:
--------
False if error
"""
#--- ck errors
err_msg =[]
if (self.raw is None):
err_msg.append("ERROR no RAW obj. provided")
if (self.stim_df_orig is None):
err_msg.append("ERROR no Stimulus-Data-Frame obj. provided")
if (self.stim_param is None):
err_msg.append("ERROR no stimulus parameter obj. provided")
if (self.stim_type_input is None):
err_msg.append("ERROR no stimulus type input provided")
if (self.resp_df_orig is None):
err_msg.append("ERROR no Response-Data-Frame obj. provided")
if (self.resp_param is None):
err_msg.append("ERROR no response parameter obj. provided")
if (self.resp_type_input is None):
err_msg.append("ERROR no response type input provided")
try:
if err_msg :
raise(ValueError)
except:
logger.exception(jumeg_base.pp_list2str(err_msg,"JuMEG Epocher Response Matching ERROR check"))
return False
return True
def calc_max_rows(self,tsl0=None,tsl1=None,resp_event_id=None,early_ids_to_ignore=None):
"""
counting the necessary number of rows for dataframe in advance
depreached
Parameter
---------
tsl0 : response window start in tsls <None>
tsl1 : response window end in tsls <None>
resp_event_id : response event ids <None>
early_ids_to_ignore : ignore this response ids if there are to early pressed <None>
Returns
-------
number of rows to setup the dataframe
"""
max_rows = 0
#--- get rt important part of respose df
resp_tsls = self.resp_df[ self.resp_type_input ]
for idx in self.stim_df.index :
# st_tsl_onset = self.stim_df[ self.stim_type_input ][idx]
st_window_tsl0 = self.stim_df[ self.stim_type_input ][idx] + tsl0
st_window_tsl1 = self.stim_df[ self.stim_type_input ][idx] + tsl1
if (st_window_tsl0 < 0) or (st_window_tsl1 < 0) : continue
#--- ck for toearly responses
if tsl0 > 0:
resp_index = self.find_toearly(tsl1=tsl0,early_ids_to_ignore=early_ids_to_ignore)
if isinstance(resp_index, np.ndarray):
max_rows+=1
continue
#--- find index of responses from window-start till end of res_event_type array [e.g. onset / offset]
resp_in_index = self.resp_df[ ( st_window_tsl0 <= resp_tsls ) & ( resp_tsls <= st_window_tsl1 ) ].index
#--- MISSED response
if resp_in_index.empty:
max_rows+=1
continue
#---count == all
#--- no response count limit e.g. eye-tracking saccards
#--- count defined resp ids ignore others
if self.resp_param['counts'] == 'all':
#--- get True/False index
idx_isin_true = np.where( self.resp_df[self.resp_prefix + "_id"][ resp_in_index ].isin( resp_event_id ) )[0]
max_rows+=idx_isin_true.size
#--- ck if first resp is True/False e.g. IOD matching
elif self.resp_param['counts'] == 'first':
max_rows+=1
#--- ck for response count limit
elif self.resp_param['counts']:
#--- found responses are <= allowed resp counts
max_rows+=resp_in_index.size
else:
#--- Wrong: found response counts > counts
max_rows+=resp_in_index.size
return max_rows
#---
def info(self):
"""
print info
Parameter
---------
Return
--------
prints statistic from column <response-prefix> -div
e.g. prints differences in tsls between stimulus and IOD onset
"""
logger.info("---> Info Response Matching:\n{}".format(self.DataFrame.to_string()))
ddiv = self.DataFrame[ self.resp_prefix + "_div" ]
zero_ep = ddiv[ddiv == 0 ]
n_zeros = ( ddiv == 0 ).sum()
tsldiv = abs( ddiv.replace(0,np.NaN) )
dmean = tsldiv.mean()
dstd = tsldiv.std()
dmin = tsldiv.min()
dmax = tsldiv.max()
tdmean,tdstd,tdmin,tdmax = 0,0,0,0
if self.raw:
if not np.isnan(dmean):
tdmean = self.raw.times[ int(dmean)]
if not np.isnan(dstd):
tdstd = self.raw.times[ int(dstd )]
if not np.isnan(dmin):
tdmin = self.raw.times[ int(dmin )]
if not np.isnan(dmax):
tdmax = self.raw.times[ int(dmax )]
logger.info("\n".join(["\n --> Response Matching time difference [ms]",
" -> bad epochs count : {:d}".format(n_zeros),
" -> bad epochs : {}\n".format(zero_ep),
" -> mean [ s ]: {:3.3f} std: {:3.3f} max: {:3.3f} min: {:3.3f}".format(tdmean,tdstd,tdmin,tdmax),
" -> mean [tsl]: {:3.3f} std: {:3.3f} max: {:3.3f} min: {:3.3f}".format(dmean,dstd,dmin,dmax),"-"*50]))
#---
def _set_stim_df_resp(self,df,stim_idx=None,df_idx=None,resp_idx=None,resp_type=0,counts=0):
"""set dataframe row
Parameter
---------
df : dataframe
stim_idx : index <None>
df_idx : <None>
resp_idx : <None>
resp_type: <0>
counts : <0>
Return
--------
dataframe
"""
for col in self.stim_df.columns:
df[col][df_idx] = self.stim_df[col][stim_idx]
if self.is_number( resp_idx ):
for col in self.resp_df.columns:
df[col][df_idx] = self.resp_df[col][resp_idx]
df[self.resp_prefix +'_index'][df_idx] = resp_idx
df[self.resp_prefix + '_type'][df_idx] = resp_type
df[self.resp_prefix + "_div"][df_idx] = df[self.resp_type_input][df_idx] - df[self.stim_type_input][df_idx]
else:
for col in self.resp_df.columns:
df[col][df_idx] = 0
df[self.resp_prefix +'_index'][df_idx] = -1 # None/nan needs change to np.float
df[self.resp_prefix +'_type'][df_idx] = self.idx_missed # resp_type
df[self.resp_prefix + "_div"][df_idx] = 0 # None
df[self.resp_prefix + "_counts"][df_idx] = counts
return df
def _set_hit(self,df,stim_idx=None,df_idx=None,resp_idx=None):
""" set dataframe row for correct responses
Parameter
---------
df : dataframe
stim_idx : index <None>
df_idx : <None>
resp_idx : <None>
resp_type: <0>
counts : <0>
Return
--------
dataframe index
"""
cnt = 1
if isinstance(resp_idx,(list)):
for ridx in resp_idx:
self._set_stim_df_resp(df,stim_idx=stim_idx,df_idx=df_idx,resp_idx=ridx,resp_type=self.idx_hit,counts=cnt)
cnt += 1
else:
self._set_stim_df_resp(df,stim_idx=stim_idx,df_idx=df_idx,resp_idx=resp_idx,resp_type=self.idx_hit,counts=cnt)
return df_idx
def _set_wrong(self,df,stim_idx=None,df_idx=None,resp_idx=None):
""" set dataframe row for wrong responses
Parameter
---------
df : dataframe
stim_idx : index <None>
df_idx : <None>
resp_idx : <None>
resp_type: <0>
counts : <0>
Return
--------
dataframe index
"""
cnt = 0
for ridx in resp_idx:
#df_idx += 1
cnt += 1
self._set_stim_df_resp(df,stim_idx=stim_idx,df_idx=df_idx,resp_idx=ridx,resp_type=self.idx_wrong,counts=cnt)
return df_idx
#---
def find_toearly(self,tsl0=0,tsl1=None,early_ids_to_ignore=None):
""" look for part of to early response in dataframe
Parameters
----------
tsl0 : start tsl range <None>
tsl1 : end tsl range <None>
early_ids_to_ignores: ignore this ids in window tsl-onset <= tsl < tsl0 <None>
Return
------
array Int64Index([ number of toearly responses ], dtype='int64')
"""
if self.resp_param["early_ids_to_ignore"] == 'all':
return
early_idx = self.resp_df[ ( tsl0 <= self.resp_df[ self.resp_type_input ] ) & ( self.resp_df[ self.resp_type_input ] < tsl1 ) ].index
#--- ck for button is pressed released
if self.resp_type_input != self.resp_type_offset:
early_idx_off = self.resp_df[ ( tsl0 <= self.resp_df[ self.resp_type_offset] ) & ( self.resp_df[ self.resp_type_offset ] < tsl1 ) ].index
early_idx = np.unique( np.concatenate((early_idx,early_idx_off), axis=0) )
if early_idx.any():
if self.resp_param['early_ids_to_ignore']:
if early_ids_to_ignore.any():
evt_found = self.resp_df[self.resp_prefix + "_id"][ early_idx ].isin( early_ids_to_ignore ) # true or false
if evt_found.all():
return
found = np.where( evt_found == False )[0]
return found
else:
return early_idx
return None
#---
def apply(self,*kargs, **kwargs):
"""
apply response matching
matching correct responses with respect to <stimulus channel> <output type> (onset,offset)
Parameters
----------
raw : raw obj [None]
used to calc time-window-range in TSLs
stim_df : pandas.DataFrame [None]
stimulus channel data frame
stim_param : dict() [None]
stimulus parameter from template
stim_type_input : string ["onset"]
data frame column name to process as stimulus input
stim_prefix : string ["iod"]
stimulus column name prefix e.g. to distinguish between different "onset" columns
resp_df : pandas.DataFrame [None]
response channel data frame
resp_param : dict() [None]
response parameter from template
resp_type_input : string ["onset"]
data frame column name to process as response input
resp_prefix : string ["iod"]
response column name prefix e.g. to distinguish between different "onset" columns
verbose : bool [False]
printing information debug
Returns
-------
pandas.DataFrame
"""
self.update(*kargs,**kwargs)
if not self._ck_errors():
return
#--- ck RT window range
if ( self.resp_param['window'][0] >= self.resp_param['window'][1] ):
logger.error(" --> ERROR in response parameter window range: start: {} > end: {}".format(self.resp_param['window'][0],self.resp_param['window'][1]))
return
(r_window_tsl_start, r_window_tsl_end ) = self.raw.time_as_index( self.resp_param['window'] );
#--- get respose code -> event_id [int or string] as np array
resp_event_id = jumeg_base.range_to_numpy( self.resp_param['event_id'] )
#logger.info("events: {} stim_prefix: {} res_prefix: {}".format(resp_event_id,self.stim_prefix,self.resp_prefix))
#logger.info("stim_df : {}".format(self.stim_df_orig.columns))
#--- ck/get STIMULUS/MARKER channel event ids to ignore
if self.stim_param.get("event_ids_to_ignore"):
event_ids_to_ignore = jumeg_base.range_to_numpy( self.stim_param["event_ids_to_ignore"] )
evt_label = self.stim_param.get("event_prefix","stim")+"_id"
idx = np.where( ~self.stim_df_orig[evt_label].isin(event_ids_to_ignore) )[0]
self.stim_df = self.stim_df_orig.loc[idx,:]
else:
self.stim_df = self.stim_df_orig
#--- get response ids to ignore
if self.resp_param.get("event_ids_to_ignore"):
event_ids_to_ignore = jumeg_base.range_to_numpy( self.resp_param["event_ids_to_ignore"] )
resp_label = self.resp_param.get("event_prefix","resp" )+"_id"
idx = np.where( ~self.resp_df_orig[resp_label].isin(event_ids_to_ignore) )[0]
self.resp_df = self.resp_df_orig.loc[idx,:]
else:
self.resp_df = self.resp_df_orig
#--- ck if any toearly-id is defined, returns None if not
early_ids_to_ignore = None
if self.resp_param.get("early_ids_to_ignore"):
if self.resp_param["early_ids_to_ignore"] != 'all':
early_ids_to_ignore = jumeg_base.range_to_numpy( self.resp_param['early_ids_to_ignore'] )
#--- loop for all stim events
ridx = 0
df_idx = -1
#--- get rt important part of respose df
resp_tsls = self.resp_df[ self.resp_type_input ]
max_rows = self.calc_max_rows(tsl0=r_window_tsl_start,tsl1=r_window_tsl_end,resp_event_id=resp_event_id,early_ids_to_ignore=early_ids_to_ignore)
df = self.reset_dataframe( max_rows ) #len(self.stim_df.index)) #max_rows)
for idx in self.stim_df.index :
st_window_tsl0 = self.stim_df[ self.stim_type_input ][idx] + r_window_tsl_start
st_window_tsl1 = self.stim_df[ self.stim_type_input ][idx] + r_window_tsl_end
if (st_window_tsl0 < 0) or (st_window_tsl1 < 0) : continue
#logger.info(" -> resp param wtsl0: {} wtsl1:{} idx:{}".format(st_window_tsl0,st_window_tsl1,idx))
#--- to-early responses e.g. response winfow[0.01,1,0] => => 0<= toearly window < 0.01
if r_window_tsl_start > 0:
resp_index = self.find_toearly(tsl1=st_window_tsl0,early_ids_to_ignore=early_ids_to_ignore)
if isinstance(resp_index, np.ndarray):
df_idx +=1
self._set_stim_df_resp(df,stim_idx=idx,df_idx=idx,resp_idx=ridx,resp_type=self.idx_toearly,counts=resp_index.size )
if self.debug:
logger.debug("--->ToEarly : {}\n --> stim df: {}".format(df_idx,self.stim_df))
continue
#--- find index of responses from window-start till end of res_event_type array [e.g. onset / offset]
resp_in_index = self.resp_df[ ( st_window_tsl0 <= resp_tsls ) & ( resp_tsls <= st_window_tsl1) ].index
if self.debug:
logger.debug(" -> resp in idx : {} \n -> tsls:\n{}\n -> {}".format(resp_in_index,resp_tsls,self.resp_df.loc[ resp_in_index,:]))
#--- MISSED response
if resp_in_index.empty:
df_idx += 1
self._set_stim_df_resp( df,stim_idx=idx,df_idx=idx,resp_idx=None,resp_type=self.idx_missed,counts=0 )
if self.debug:
logger.debug("---> MISSED: idx:{}\n -> {}".format(idx,self.resp_df.loc[resp_in_index,:]))
continue
#---count == all
#--- no response count limit e.g. eye-tracking saccards
#--- count defined resp ids ignore others
#--- e.g.: find 11 in seq starting with 5 => 5,7,8,11 => resp_event_id =[11]
if self.resp_param['counts'] == 'all':
#--- get True/False index
idx_isin_true = np.where( self.resp_df[self.resp_prefix + "_id"][ resp_in_index ].isin( resp_event_id ) )[0]
#--- get index of True Hits
resp_in_idx_hits = resp_in_index[idx_isin_true]
if self.debug:
logger.debug("---> <counts == all>: idx:{}\n -> {}".format(idx_isin_true,resp_in_idx_hits))
df_idx += 1
if idx_isin_true.shape[0]:
self._set_hit(df,stim_idx=idx,df_idx=idx,resp_idx=resp_in_idx_hits)
else:
self._set_stim_df_resp( df,stim_idx=idx,df_idx=idx,resp_idx=None,resp_type=self.idx_missed,counts=0 )
if self.debug:
logger.debug("---> MISSED in <counts == all>: idx:{}\n -> {}".format(idx,self.resp_df.loc[resp_in_index,:]))
elif self.resp_param['counts'] == 'any':
#--- get True/False index
idx_isin_true = np.where( self.resp_df[self.resp_prefix + "_id"][ resp_in_index ].isin( resp_event_id ) )[0]
logger.info("ANY:\n {}\n event id: {}\n isin: {}".format(self.resp_df[self.resp_prefix + "_id"][ resp_in_index ],resp_event_id,idx_isin_true ))
df_idx += 1
if idx_isin_true.shape[0]:
#--- get index of True Hits
resp_in_idx_hits = resp_in_index[ idx_isin_true[0] ]
# if self.debug:
logger.info("---> <counts == any>: idx:{}\n -> {}".format(idx_isin_true,resp_in_idx_hits))
self._set_hit(df,stim_idx=idx,df_idx=idx,resp_idx=resp_in_idx_hits)
else:
self._set_stim_df_resp( df,stim_idx=idx,df_idx=idx,resp_idx=None,resp_type=self.idx_missed,counts=0 )
if self.debug:
logger.debug("---> MISSED in <counts == all>: idx:{}\n -> {}".format(idx,self.resp_df.loc[resp_in_index,:]))
#--- ck if first resp is True/False e.g. IOD matching
elif self.resp_param['counts'] == 'first':
if ( self.resp_df[self.resp_prefix + "_id"][ resp_in_index[0] ] in resp_event_id ):
df_idx += 1
self._set_hit(df,stim_idx=idx,df_idx=idx,resp_idx=[ resp_in_index[0] ] )
else:
df_idx += 1
self._set_wrong(df,stim_idx=idx,df_idx=idx,resp_idx=[resp_in_index[0]] )
#--- ck for response count limit
elif self.resp_param['counts']:
#--- found responses are <= allowed resp counts
if ( resp_in_index.size <= self.resp_param['counts'] ):
#--- HITS: all responses are in response event id
if np.all( self.resp_df[self.resp_prefix + "_id"][ resp_in_index ].isin( resp_event_id ) ) :
df_idx += 1
self._set_hit(df,stim_idx=idx,df_idx=idx,resp_idx=resp_in_index )
else:
#--- Wrong: not all responses are in response event id =>found responses are > allowed resp counts
df_idx += 1
self._set_wrong(df,stim_idx=idx,df_idx=idx,resp_idx=resp_in_index)
#--- Wrong: found response counts > counts
else:
df_idx += 1
self._set_wrong(df,stim_idx=idx,df_idx=idx,resp_idx=resp_in_index)
self.DataFrame = df
if self.debug:
self.info()
return df
class JuMEG_Epocher_WindowMatching(JuMEG_Epocher_Basic):
"""
find events form events-dataframe
which occures in a window between <marker_onset> and <marker_offset> given by a marker-window
Parameters
----------
raw : raw obj [None]
used to calc time-window-range in TSLs
marker_df : pandas.DataFrame [None]
data frame with onset/offset columns
window_onset : string ["onset"]
window-DataFrame column window onset
window_offset : string ["offset"]
window-DataFrame column window offset
event_df : pandas.DataFrame events
e.g. event/response codes from channel in <stim> <resp> group e.g.: STI 014/STI 013
event_type : DataFrame column label e.g: <prefix>onset
verbose : bool [False]
printing information debug
debug : bool [False]
Example Template:
-----------------
"SeResp": {
"postfix": "SeResp",
"time_pre": -0.2,
"time_post": 6.0,
"info": "search task IODonset and first buton press",
"marker": {
"channel": "StimImageOnset",
"type_input": "iod_onset",
"type_output": "iod_onset",
"prefix": "iod",
"type_result": "hit"
},
"response": {
"matching": true,
"channel": "RESPONSE",
"type_input": "resp_onset",
"type_offset": "resp_offset",
"prefix": "resp"
},
"window_matching":{
"matching": true,
"channel": "ETevents",
"window_onset": "iod_onset",
"window_offset": "resp_onset",
"event_type": "stim_onset",
"prefix": "winET"
},
"StimImageOnset": {
"event_id": 84
},
"RESPONSE": {
"events": {
"stim_channel": "STI 013",
"output": "onset",
"consecutive": true,
"min_duration": 0.0005,
"shortest_event": 1,
"initial_event": true,
"mask": null
},
"window": [
0.0,
6.0
],
"counts": "first",
"system_delay_ms": 0.0,
"early_ids_to_ignore": null,
"event_id": "1,2",
"and_mask": 3
},
"ETevents": {
"event_id": "250-260"
}
}
"""
def __init__(self,**kwargs):
super().__init__()
self.raw = None
self.window_onset = None
self.window_offset = None
self.marker_df = None
self.event_df = None
self.event_type = None
self.DataFrame = None
self.verbose = False
self.debug = False
def update(self,**kwargs):
"""
update CLS parameter
Parameters
----------
raw : raw obj [None]
used to calc time-window-range in TSLs
marker_df : pandas.DataFrame [None]
data frame with onset/offset columns
window_onset : string ["onset"]
window-DataFrame column window onset
window_offset : string ["offset"]
window-dataframe column window offset
events_df : pandas.DataFrame events
e.g. event/response codes from channel in <stim> <resp> group e.g.: STI 014/STI 013
verbose : bool [False]
printing information debug
debug : bool [False]
"""
self.raw = kwargs.get("raw",self.raw)
self.window_onset = kwargs.get("window_onset",self.window_onset)
self.window_offset = kwargs.get("window_offset",self.window_offset)
self.event_type = kwargs.get("event_type",self.event_type)
if "verbose" in kwargs.keys():
self.verbose = kwargs.get("verbose")
if "marker_df" in kwargs.keys():
self.marker_df = kwargs.get("marker_df") # df
if "event_df" in kwargs.keys():
self.event_df = kwargs.get("event_df") # df
self.DataFrame = None
#---
def info(self):
"""
print info
Parameter
---------
Return
--------
prints DataFrame
"""
logger.info("---> Info Window Matching:\n"+
" --> window onset : {}".format(self.window_onset) +
" --> window offset: {}".format(self.window_onset) +
" --> event type : {}".format(self.event_type) +
" --> DataFrame:\n{}".format(self.DataFrame.to_string()))
def apply(self,**kwargs):
self.update(**kwargs)
#--- get onset or offsets from events
evt = self.event_df[self.event_type]
cl1 = self.event_df.columns.tolist()
cl2 = self.marker_df.columns.tolist()
cl = []
cl.extend(cl1)
cl.extend(cl2)
dfs = []
for idx in self.marker_df.index:
wdf = self.marker_df.iloc[idx] # get series
c1 = self.event_df[ self.event_type ] >= wdf[ self.window_onset ]
c2 = self.event_df[ self.event_type ] < wdf[ self.window_offset ]
df = self.event_df[c1 & c2]
if not df.any: continue
#--- numpy
d = np.zeros([len(df),len(cl)],dtype=np.int32)
d[:,0:len(cl1)] += df.get_values()
d[:,len(cl1): ] += wdf.get_values()
dfs.append(pd.DataFrame(d,columns=cl,index=df.index.get_values()))
# print("HITS df last:\n{}".format(dfs[-1]))
self.DataFrame = pd.concat(dfs)
self.DataFrame.reset_index(drop=False,inplace=True)
self.DataFrame["selected"]=1
#---
if self.debug:
self.info()
return self.DataFrame
class JuMEG_Epocher_Events(JuMEG_Epocher_HDF,JuMEG_Epocher_Basic):
'''
Main class to find events
-> reading epocher event template file
-> for each condition find events using mne.find_events function
-> looking for IOD and response matching
-> store results into pandas dataframes
-> save as hdf5 for later to generate epochs,averages
Example
--------
from jumeg.epocher.jumeg_epocher import jumeg_epocher
from jumeg.epocher.jumeg_epocher_epochs import JuMEG_Epocher_Epochs
jumeg_epocher_epochs = JuMEG_Epocher_Epochs()
jumeg_epocher.template_path = '.'
condition_list = ['test01','test02']
fname = "./test01.fif"
param = { "condition_list":condition_list,
"do_run": True,
"template_name": "TEST01",
"save": True
}
(_,raw,epocher_hdf_fname) = jumeg_epocher.apply_events_to_hdf(fname,**param)
'''
#---
def __init__(self):
super().__init__()
self.parameter= None
self.iod = None
self.stimulus = None
self.response = None
self.window = None
self.event_data_parameter={"events":{
"stim_channel" : "STI 014",
"output" : "onset",
"consecutive" : True,
"min_duration" : 0.0001,
"shortest_event" : 1,
"initial_event" : True,
"mask" : None
},
"event_id" : None,
"and_mask" : None,
"system_delay_ms" : 0.0
}
self.ResponseMatching = JuMEG_Epocher_ResponseMatching()
self.WindowMatching = JuMEG_Epocher_WindowMatching()
#---
@property
def event_data_stim_channel(self): return self.event_data_parameter["events"]["stim_channel"]
@event_data_stim_channel.setter
def event_data_stim_channel(self,v): self.event_data_parameter["events"]["stim_channel"]=v
#---
def channel_events_to_dataframe(self):
"""
find events from groups
mne.pick_types(stim=True)
stimulus group <stim> [STI 014 TRIGGER, ET_events, ...]
mne.pick_types(resp=True)
response group [STI 013 RESPONSE,...]
store event information as pandas dataframe and save in hdf5 obj key: /events
"""
#--- stimulus channel group
for ch_idx in jumeg_base.picks.stim(self.raw):
#print(ch_idx)
ch_label = jumeg_base.picks.picks2labels(self.raw,ch_idx)
#print(ch_label)
self.event_data_stim_channel = ch_label
self._channel_events_dataframe_to_hdf(ch_label,"stim")
#--- response channel group
for ch_idx in jumeg_base.picks.response(self.raw):
ch_label = jumeg_base.picks.picks2labels(self.raw,ch_idx)
self.event_data_stim_channel = ch_label
self._channel_events_dataframe_to_hdf(ch_label,"resp")
#---
def _channel_events_dataframe_to_hdf(self,ch_label,prefix):
""" save channel event dataframe to HDF obj
Parameter
---------
string : channel label e.g.: 'STI 014'
pd dataframe:
dict : info dict with parameter
Results
-------
None
"""
self.event_data_stim_channel = ch_label
#print(self.event_data_parameter)
found = self.events_find_events(self.raw,prefix=prefix,**self.event_data_parameter)
#print(found)
if found:
df = found[0]
info = found[1]
#if type(df) == "<class 'pandas.core.frame.DataFrame'>":
key = self.hdf_node_name_channel_events +"/"+ ch_label
storer_attrs = {'info_parameter': info}
self.hdf_obj_update_dataframe(df.astype(np.int64),key=key,**storer_attrs )
# https://stackoverflow.com/questions/17468878/pandas-python-how-to-count-the-number-of-records-or-rows-in-a-dataframe
if self.verbose:
ids = pd.unique(df.iloc[:,0])
label = df.columns[0]
ids.sort()
msg = [ "Events in DataFrame column: {}".format(label) ]
for id in ids:
df_id = df[ df[ label ] == id ]
msg.append(" -> id: {:4d} counts: {:5d}".format(id,len(df_id.index)) )
logger.info("\n".join(msg))
#---
def apply_iod_matching(self,raw=None):
'''
apply image-onset-detection (IOD),
generate pandas dataframe with columns for iod
e.g. from template parameter for a condition
"iod" :{"marker" :{"channel":"StimImageOnset","type_input":"img_onset","prefix":"img"},
"response":{"matching":true,"channel":"IOD","type_input":"iod_onset","prefix":"iod"}},
Parameters
----------
raw: obj [None]
mne.raw obj
Returns
--------
pandas dataframe
columns with
marker-prefix => id,onset,offset
response-prefix => type,div,id,onset,offset,index,counts
additionalcolumns => bads,selected,weighted_sel
marker info
'''
if not self.iod.iod_matching: return None,None
#--- marker events .e.g. STIMULUS
logger.info(self.iod.marker_channel_parameter)
try:
mrk_df,mrk_info = self.events_find_events(raw,prefix=self.iod.marker.prefix,**self.iod.marker_channel_parameter)
except:
logger.warning("WARNING: IOD Matching: no events found: \n{}\n ".format(self.event_data_parameter))
return None,None
#--- resonse eventse.g. IOD
resp_df,resp_info = self.events_find_events(raw,prefix=self.iod.response.prefix,**self.iod.response_channel_parameter)
if resp_info.get("system_delay_is_applied"):
mrk_info["system_delay_is_applied"] = True
df = self.ResponseMatching.apply(raw=raw,stim_df=mrk_df,resp_df=resp_df,
stim_param = deepcopy(self.iod.marker_channel_parameter),
stim_type_input = self.iod.marker.type_input,
stim_prefix = self.iod.marker.prefix,
resp_param = deepcopy(self.iod.response_channel_parameter),
resp_type_input = self.iod.response.type_input,
resp_prefix = self.iod.response.prefix,
verbose = self.verbose,
debug = self.debug
)
if self.verbose:
if "iod_div" in df.columns:
logger.info("Stimulus Onset and IOD div [tsl] mean: {0:3.1f} std:{1:3.1f}".format(df["iod_div"].mean(),df["iod_div"].std()))
return df,mrk_info
#---
def update_parameter(self,param=None):
'''update parameter
-> init with default parameter
-> merege and overwrite defaults with parameter defined for condition
-> init special objs (marker,response,iod) if defined
Parameter
---------
param: <None>
'''
self.parameter = None
self.parameter = deepcopy( self.template_data['default'] )
self.parameter = self.template_update_and_merge_dict( self.parameter,param )
#---
self.marker = JuMEG_Epocher_Events_Channel(label="marker",parameter=self.parameter)
self.response = JuMEG_Epocher_Events_Channel(label="response",parameter=self.parameter)
self.window = JuMEG_Epocher_Events_Window(label="window_matching",parameter=self.parameter)
self.iod = JuMEG_Epocher_Events_Channel_IOD(label="iod",parameter=self.parameter)
#---
def events_store_to_hdf(self,fname=None,raw=None,condition_list=None,overwrite_hdf=False,use_yaml=True,
template_path=None,template_name=None,hdf_path=None,verbose=False,debug=False):
"""
find & store epocher data to hdf5:
-> readding parameter from epocher template file
-> find events from raw-obj using mne.find_events
-> apply response matching if true
-> save results in pandas dataframes & HDF fromat
Parameter
---------
fname : string, fif file name <None>
fname : string, fif file name <None>
raw : raw obj <None>
raw : raw obj <None>
condition_list: list of conditions to process
select special conditions from epocher template
default: <None> , will process all defined in template
overwrite_hdf : flag for overwriting output HDF file <False>
template_path : path to jumeg epocher templates
template_name : name of template e.g: experimnet name
use_yaml : True; yaml format or json
hdf_path : path to hdf file <None> if None use fif-file path
verbose : flag, <False>
debug : flag, <False>
Results
-------
raw obj
string: FIF file name
"""
#--- read template file
self.use_yaml = use_yaml
if template_name:
self.template_name = template_name
if template_path:
self.template_path = template_path
if verbose:
self.verbose = verbose
if debug:
self.debug = debug
self.template_update_file()
self.raw,fname = jumeg_base.get_raw_obj(fname,raw=raw)
#--- init obj
self.hdf_obj_init(raw=self.raw,hdf_path=hdf_path,overwrite=overwrite_hdf)
self.channel_events_to_dataframe()
if not condition_list :
condition_list = self.template_data.keys()
#--- condi loop
# for condi, param, in self.template_data.items():
for condi in condition_list:
param = self.template_data.get(condi)
#--- check if condi is defined
if not param:
msg = "---> no condition key found in template data\n"
msg+= " -> condition: {}\n".format(condi)
msg+= " -> template file: {}\n".format(self.template_name)
if self.debug:
msg+=" -> template data:\n"+ self.pp_list2str(self.template_data)
logger.exception(msg)
#if self.exit_error_in_condition:
#sys.exit()
#--- check for real condition
if condi == 'default': continue
#--- check for condition in list
if condi not in self.template_data.keys(): continue
#--- update & merge condi self.parameter with defaults
self.update_parameter(param=param)
iod_data_frame = None
logger.info("Epocher start store events into HDF\n --> condition: "+ condi)
if not self.marker.channel_parameter: continue
#--- stimulus init dict's & dataframes
marker_info = dict()
marker_data_frame = pd.DataFrame()
response_data_frame = pd.DataFrame()
response_info = dict()
window_data_frame = pd.DataFrame()
window_info = dict()
if self.verbose:
logger.info('EPOCHER Template: %s Condition: %s' %(self.template_name,condi)+
'\n -> find events and epochs, save epocher output in HDF5 format')
#self.pp(self.parameter,head=" -> parameter")
#--- iod matching ckek if true and if channe == stimulus channel
if self.iod.iod_matching:
iod_data_frame,iod_info = self.apply_iod_matching(raw=self.raw)
if iod_data_frame is None: continue
marker_data_frame = iod_data_frame
marker_info = iod_info
#--- copy iod df to res or stim df
# if self.response.matching:
# if ( self.iod.marker.channel != self.marker.channel ):
# response_data_frame = iod_data_frame
# response_info = iod_info
#--- ck if not stimulus_data_frame
if marker_data_frame.empty :
logger.info("MARKER CHANNEL -> find events => condition: "+ condi +"\n ---> marker channel: "+ self.marker.channel)
if self.verbose:
logger.info(self.pp_list2str( self.marker.parameter,head=" -> Marker Channel parameter:"))
marker_data_frame,marker_info = self.events_find_events(self.raw,prefix=self.marker.prefix,**self.marker.channel_parameter)
#---
if marker_data_frame.empty: continue
marker_data_frame['bads'] = 0
marker_data_frame['selected'] = 0
marker_data_frame['weighted_selected']= 0
if self.verbose:
logger.info("Marker Epocher Events Data Frame [marker channel]: "+ condi)
#--- Marker Matching task
#--- match between stimulus and response or vice versa
#--- get all response events for condtion e.g. button press 4
#--- apply window mathing : find first,all responses in window
if self.response.matching :
logger.info("Marker Matching -> matching marker & response channel: {}\n".format(condi)+
" -> marker channel : {}\n".format(self.marker.channel)+
" -> response channel : {}".format(self.response.channel) )
#--- look for all responses => 'event_id' = None
if response_data_frame.empty:
res_channel_param = deepcopy(self.response.channel_parameter)
res_channel_param['event_id'] = None
response_data_frame,response_info = self.events_find_events(self.raw,prefix=self.response.prefix,**res_channel_param)
if self.verbose:
logger.info(self.pp_list2str(self.response.parameter,
head="---> Response Epocher Events Data Frame [response channel] : " + self.response.channel))
#logger.info(marker_data_frame)
#--- update stimulus epochs with response matching
marker_data_frame = self.ResponseMatching.apply(raw = self.raw,
stim_df = marker_data_frame,
stim_param = deepcopy(self.marker.channel_parameter),
stim_type_input = self.marker.type_input,
stim_prefix = self.marker.prefix,
#---
resp_df = response_data_frame,
resp_param = deepcopy(self.response.channel_parameter),
resp_type_input = self.response.type_input,
resp_type_offset= self.response.type_offset,
resp_prefix = self.response.prefix,
#---
verbose = self.verbose
)
#--- window matching, find events in window
if self.window.matching:
logger.info("window matching => marker data frame:\n{}".format( marker_data_frame.to_string() ))
event_df = self.hdf_obj_get_channel_dataframe(self.window.stim_channel)
logger.info("window matching => event type: {}\n -> DataFrame:\n{}".format(self.window.event_type,event_df.to_string()))
if event_df.any:
window_data_frame = self.WindowMatching.apply(raw=self.raw,verbose=self.verbose,
marker_df = marker_data_frame,
window_onset = self.window.window_onset,
window_offset = self.window.window_offset,
event_df = event_df,
event_type = self.window.event_type
)
if self.verbose:
type = self.response.prefix +'_type'
hits = marker_data_frame[type]
idx = np.where( hits == self.rt_type_as_index( self.marker.type_result ) )[0]
msg=["Response Matching DataFrame : " + condi,
" -> correct : {:d} / {:d}".format(len(idx),len(marker_data_frame.index)),
" -> marker type : {}".format(type),
"-"*40,
"{}".format( marker_data_frame.to_string() )
]
logger.info("\n".join(msg))
else:
#--- not response matching should be all e.g. hits
mrk_type = self.marker.prefix +'_type'
if mrk_type not in marker_data_frame :
marker_data_frame[ mrk_type ] = self.rt_type_as_index( self.marker.type_result )
if self.verbose:
mrk_type = self.marker.prefix +'_type'
hits = marker_data_frame[mrk_type]
idx = np.where( hits == self.rt_type_as_index( self.marker.type_result ) )[0]
msg=["Marker Matching DataFrame : " + condi,
" -> correct : {:d} / {:d}".format(len(idx),len(marker_data_frame.index)),
" -> marker type : {}".format(mrk_type),
"-"*40,
"{}".format( marker_data_frame.to_string() )
]
logger.info("\n".join(msg))
key = self.hdf_node_name_epocher +'/'+condi
storer_attrs = {'epocher_parameter': self.parameter,'info_parameter':marker_info}
if self.window.matching:
self.hdf_obj_update_dataframe(window_data_frame.astype(np.int32),key=key,**storer_attrs)
else:
#--- marker dataframe
self.hdf_obj_update_dataframe(marker_data_frame.astype(np.int32),key=key,**storer_attrs )
self.HDFobj.close()
logger.info("DONE save epocher data into HDF5 : " + self.hdf_filename)
return self.raw,fname
#---
def events_find_events(self,raw,prefix=None,**param):
"""find events with <mne.find_events()>
Parameters
---------
raw : raw obj
prefix: prefix for columns <None>
param : parameter like <**kwargs>
{'event_id': 40, 'and_mask': 255,
'events': {'consecutive': True, 'output':'step','stim_channel': 'STI 014',
'min_duration':0.002,'shortest_event': 2,'mask': 0}
}
Returns
--------
pandas data-frame with epoch event structure for e.g. stimulus, response channel
id : event id
offset : np array with TSL event code offset
onset : np array with TSL event code onset
if <prefix> columns are labeled with <prefix>
e.g.: prefix=img => img_onset
dict() with event structure for stimulus or response channel
sfreq : sampling frequency => raw.info['sfreq']
duration : {mean,min,max} in TSL
system_delay_is_applied : True/False
--> if true <system_delay_ms> converted to TSLs and added to the TSLs in onset,offset
(TSL => timeslices,samples)
"""
if raw is None:
logger.error("ERROR in <get_event_structure: raw obj is None")
return
#---
df = pd.DataFrame(columns = self.data_frame_stimulus_cols)
ev_id_idx = np.array([])
ev_onset = np.array([])
ev_offset = np.array([])
#---add prefix to col name
if prefix:
for k in self.data_frame_stimulus_cols:
df.rename(columns={k: prefix+'_'+k},inplace=True )
col_id = prefix+"_id"
col_onset = prefix+"_onset"
col_offset= prefix+"_offset"
else:
col_id = "id"
col_onset = "onset"
col_offset= "offset"
#---
events = deepcopy(param['events'])
events['output'] = 'step'
# self.pp( events )
logger.info(events)
#--- check if channel label in raw
if not jumeg_base.picks.labels2picks(raw,events["stim_channel"]):
return df,dict()
if self.verbose:
logger.debug("mne.find_events:\n"+ self.pp_list2str(events))
ev = mne.find_events(raw, **events) #-- return int64
# self.pp(ev)
#--- apply and mask e.g. 255 get the first 8 bits in Trigger channel
if param['and_mask']:
ev[:, 1:] =
|
np.bitwise_and(ev[:, 1:], param['and_mask'])
|
numpy.bitwise_and
|
"""
This is the main script for predicting a segmentation of an input MRA image. Segmentations can be predicted for multiple
models eather on rough grid (the parameters are then read out from the Unet/models/tuned_params.cvs file) or on fine
grid.
"""
import os
from scipy.ndimage.filters import convolve
import numpy as np
import helper
import time
class Predictor():
def __init__(self, model, train_metadata, prob_dir, error_dir, patients, patients_dir, label_filename, threshold=0.5):
self.model = model
self.train_metadata = train_metadata
self.PROB_DIR = prob_dir
self.ERROR_DIR = error_dir
self.patients = patients
self.PATIENTS_DIR = patients_dir
self.threshold = threshold
self.label_filename = label_filename
return
# where to save probability map from validation as nifti
def get_probs_filepath(self, patient):
return os.path.join(self.PROB_DIR, 'probs_' + patient + '_.nii')
# where to save error mask
def get_errormasks_filepath(self, patient):
return os.path.join(self.ERROR_DIR, 'error_mask_' + patient + '_.nii')
def predict(self, patch_size, data_dir, patch_size_z=None):
print('________________________________________________________________________________')
print('patient dir:', data_dir)
# -----------------------------------------------------------
# LOADING MODEL, IMAGE AND MASK
# -----------------------------------------------------------
print('> Loading image...')
img_mat = helper.load_nifti_mat_from_file(
os.path.join(data_dir, '001.nii')).astype(np.float32)
print('> Loading mask...')
if not os.path.exists(os.path.join(data_dir, 'mask.nii')):
avg_mat = convolve(img_mat.astype(dtype=float), np.ones((16,16,16), dtype=float)/4096, mode='constant', cval=0)
mask_mat = np.where(avg_mat > 10.0, 1, 0)
helper.create_and_save_nifti(mask_mat, os.path.join(data_dir, 'mask.nii'))
else:
mask_mat = helper.load_nifti_mat_from_file(
os.path.join(data_dir, 'mask.nii'))
# -----------------------------------------------------------
# PREDICTION
# -----------------------------------------------------------
# the segmentation is going to be saved in this probability matrix
prob_mat =
|
np.zeros(img_mat.shape, dtype=np.float32)
|
numpy.zeros
|
import os
import numpy as np
import nibabel as nib
import torch
from torch.nn import functional as F
import niclib as nl
### Relevant paths
data_path = 'path/to/campinas-mini' # Change this to point to the dataset in your filesystem
data_path = '/media/user/dades/DATASETS/campinas-mini' # Change this to point to the dataset in your filesystem
checkpoints_path = nl.make_dir('checkpoints/')
results_path = nl.make_dir('results/')
metrics_path = nl.make_dir('metrics/')
log_path = nl.make_dir('log/')
### 1. Dataset load
case_paths = [f.path for f in os.scandir(data_path) if f.is_dir()]
case_paths, test_case_paths = case_paths[:-3], case_paths[-3:] # Set aside 3 images for testing
print("Loading training dataset with {} images...".format(len(case_paths)))
def load_case(case_path):
t1_nifti = nib.load(os.path.join(case_path, 't1.nii.gz'))
t1_img = np.expand_dims(t1_nifti.get_data(), axis=0) # Add single channel modality
tissue_filepaths = [os.path.join(case_path, 'fast_3c/fast_pve_{}.nii.gz'.format(i)) for i in range(3)]
tissue_probabilties = np.stack([nib.load(tfp).get_data() for tfp in tissue_filepaths], axis=0)
return {'nifiti': t1_nifti, 't1': t1_img, 'probs': tissue_probabilties}
dataset = nl.parallel_load(load_func=load_case, arguments=case_paths, num_workers=12)
dataset_train, dataset_val = nl.split_list(dataset, fraction=0.8) # Split images into train and validation
print('Training dataset with {} train and {} val images'.format(len(dataset_train), len(dataset_val)))
### 2. Create training and validation patch generators
train_sampling = nl.generator.BalancedSampling(
labels=[
|
np.argmax(case['probs'], axis=0)
|
numpy.argmax
|
import random
import time
import math
import os.path
import numpy as np
import pandas as pd
from collections import deque
import pickle
from pysc2.agents import base_agent
from pysc2.env import sc2_env
from pysc2.lib import actions, features, units, upgrades
from absl import app
import torch
from torch.utils.tensorboard import SummaryWriter
from s10336.skdrl.pytorch.model.mlp import NaiveMultiLayerPerceptron
from s10336.skdrl.common.memory.memory import ExperienceReplayMemory
from s10336.skdrl.pytorch.model.dqn import DQN, prepare_training_inputs
DATA_FILE_QNET = 's10336_rlagent_with_vanilla_dqn_qnet'
DATA_FILE_QNET_TARGET = 's10336_rlagent_with_vanilla_dqn_qnet_target'
SCORE_FILE = 's10336_rlagent_with_vanilla_dqn_score'
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#writer = SummaryWriter()
class TerranAgentWithRawActsAndRawObs(base_agent.BaseAgent):
# actions 추가 및 함수 정의(hirerachy하게)
actions = ("do_nothing",
"train_scv",
"harvest_minerals",
"harvest_gas",
"build_commandcenter",
"build_refinery",
"build_supply_depot",
"build_barracks",
"train_marine",
"build_factorys",
"build_techlab_factorys",
"train_tank",
"build_armorys",
"build_starports",
"build_techlab_starports",
"train_banshee",
"attack",
"attack_all",
"tank_control"
)
def unit_type_is_selected(self, obs, unit_type):
if (len(obs.observation.single_select) > 0 and
obs.observation.single_select[0].unit_type == unit_type):
return True
if (len(obs.observation.multi_select) > 0 and
obs.observation.multi_select[0].unit_type == unit_type):
return True
return False
def get_my_units_by_type(self, obs, unit_type):
if unit_type == units.Neutral.VespeneGeyser: # 가스 일 때만
return [unit for unit in obs.observation.raw_units
if unit.unit_type == unit_type]
return [unit for unit in obs.observation.raw_units
if unit.unit_type == unit_type
and unit.alliance == features.PlayerRelative.SELF]
def get_enemy_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.raw_units
if unit.unit_type == unit_type
and unit.alliance == features.PlayerRelative.ENEMY]
def get_my_completed_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.raw_units
if unit.unit_type == unit_type
and unit.build_progress == 100
and unit.alliance == features.PlayerRelative.SELF]
def get_enemy_completed_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.raw_units
if unit.unit_type == unit_type
and unit.build_progress == 100
and unit.alliance == features.PlayerRelative.ENEMY]
def get_distances(self, obs, units, xy):
units_xy = [(unit.x, unit.y) for unit in units]
return np.linalg.norm(np.array(units_xy) - np.array(xy), axis=1)
def step(self, obs):
super(TerranAgentWithRawActsAndRawObs, self).step(obs)
if obs.first():
command_center = self.get_my_units_by_type(
obs, units.Terran.CommandCenter)[0]
self.base_top_left = (command_center.x < 32)
self.top_left_gas_xy = [(14, 25), (21, 19), (46, 23), (39, 16)]
self.bottom_right_gas_xy = [(44, 43), (37, 50), (12, 46), (19, 53)]
self.cloaking_flag = 1
self.TerranVehicleWeaponsLevel1 = False
self.TerranVehicleWeaponsLevel2 = False
self.TerranVehicleWeaponsLevel3 = False
def do_nothing(self, obs):
return actions.RAW_FUNCTIONS.no_op()
def train_scv(self, obs):
completed_commandcenterses = self.get_my_completed_units_by_type(
obs, units.Terran.CommandCenter)
scvs = self.get_my_units_by_type(obs, units.Terran.SCV)
if (len(completed_commandcenterses) > 0 and obs.observation.player.minerals >= 100
and len(scvs) < 35):
commandcenters = self.get_my_units_by_type(obs, units.Terran.CommandCenter)
ccs = [commandcenter for commandcenter in commandcenters if commandcenter.assigned_harvesters < 18]
if ccs:
ccs = ccs[0]
if ccs.order_length < 5:
return actions.RAW_FUNCTIONS.Train_SCV_quick("now", ccs.tag)
return actions.RAW_FUNCTIONS.no_op()
def harvest_minerals(self, obs):
scvs = self.get_my_units_by_type(obs, units.Terran.SCV)
commandcenters = self.get_my_units_by_type(obs, units.Terran.CommandCenter) # 최적 자원 할당 유닛 구현
cc = [commandcenter for commandcenter in commandcenters if commandcenter.assigned_harvesters < 18]
if cc:
cc = cc[0]
idle_scvs = [scv for scv in scvs if scv.order_length == 0]
if len(idle_scvs) > 0 and cc.assigned_harvesters < 18:
mineral_patches = [unit for unit in obs.observation.raw_units
if unit.unit_type in [
units.Neutral.BattleStationMineralField,
units.Neutral.BattleStationMineralField750,
units.Neutral.LabMineralField,
units.Neutral.LabMineralField750,
units.Neutral.MineralField,
units.Neutral.MineralField750,
units.Neutral.PurifierMineralField,
units.Neutral.PurifierMineralField750,
units.Neutral.PurifierRichMineralField,
units.Neutral.PurifierRichMineralField750,
units.Neutral.RichMineralField,
units.Neutral.RichMineralField750
]]
scv = random.choice(idle_scvs)
distances = self.get_distances(obs, mineral_patches, (scv.x, scv.y))
mineral_patch = mineral_patches[np.argmin(distances)]
return actions.RAW_FUNCTIONS.Harvest_Gather_unit(
"now", scv.tag, mineral_patch.tag)
return actions.RAW_FUNCTIONS.no_op()
def harvest_gas(self, obs):
scvs = self.get_my_units_by_type(obs, units.Terran.SCV)
refs = self.get_my_units_by_type(obs, units.Terran.Refinery)
refs = [refinery for refinery in refs if refinery.assigned_harvesters < 3]
if refs:
ref = refs[0]
if len(scvs) > 0 and ref.ideal_harvesters:
scv = random.choice(scvs)
distances = self.get_distances(obs, refs, (scv.x, scv.y))
ref = refs[np.argmin(distances)]
return actions.RAW_FUNCTIONS.Harvest_Gather_unit(
"now", scv.tag, ref.tag)
return actions.RAW_FUNCTIONS.no_op()
def build_commandcenter(self, obs):
commandcenters = self.get_my_units_by_type(obs, units.Terran.CommandCenter)
scvs = self.get_my_units_by_type(obs, units.Terran.SCV)
if len(commandcenters) == 0 and obs.observation.player.minerals >= 400 and len(scvs) > 0:
# 본진 commandcenter가 파괴된 경우
ccs_xy = (19, 23) if self.base_top_left else (39, 45)
distances = self.get_distances(obs, scvs, ccs_xy)
scv = scvs[np.argmin(distances)]
return actions.RAW_FUNCTIONS.Build_CommandCenter_pt(
"now", scv.tag, ccs_xy)
if (len(commandcenters) < 2 and obs.observation.player.minerals >= 400 and
len(scvs) > 0):
ccs_xy = (41, 21) if self.base_top_left else (17, 48)
if len(commandcenters) == 1 and ((commandcenters[0].x, commandcenters[0].y) == (41, 21) or
(commandcenters[0].x, commandcenters[0].y) == (17, 48)):
# 본진 commandcenter가 파괴된 경우
ccs_xy = (19, 23) if self.base_top_left else (39, 45)
distances = self.get_distances(obs, scvs, ccs_xy)
scv = scvs[np.argmin(distances)]
return actions.RAW_FUNCTIONS.Build_CommandCenter_pt(
"now", scv.tag, ccs_xy)
return actions.RAW_FUNCTIONS.no_op()
################################################################################################
####################################### refinery ###############################################
def build_refinery(self, obs):
refinerys = self.get_my_units_by_type(obs, units.Terran.Refinery)
scvs = self.get_my_units_by_type(obs, units.Terran.SCV)
if (obs.observation.player.minerals >= 100 and
len(scvs) > 0):
gas = self.get_my_units_by_type(obs, units.Neutral.VespeneGeyser)[0]
if self.base_top_left:
gases = self.top_left_gas_xy
else:
gases = self.bottom_right_gas_xy
rc = np.random.choice([0, 1, 2, 3])
gas_xy = gases[rc]
if (gas.x, gas.y) == gas_xy:
distances = self.get_distances(obs, scvs, gas_xy)
scv = scvs[np.argmin(distances)]
return actions.RAW_FUNCTIONS.Build_Refinery_pt(
"now", scv.tag, gas.tag)
return actions.RAW_FUNCTIONS.no_op()
def build_supply_depot(self, obs):
supply_depots = self.get_my_units_by_type(obs, units.Terran.SupplyDepot)
scvs = self.get_my_units_by_type(obs, units.Terran.SCV)
free_supply = (obs.observation.player.food_cap -
obs.observation.player.food_used)
if (obs.observation.player.minerals >= 100 and
len(scvs) > 0 and free_supply < 8):
ccs = self.get_my_units_by_type(obs, units.Terran.CommandCenter)
if ccs:
for cc in ccs:
cc_x, cc_y = cc.x, cc.y
rand1, rand2 = random.randint(0, 10), random.randint(-10, 0)
supply_depot_xy = (cc_x + rand1, cc_y + rand2) if self.base_top_left else (cc_x - rand1, cc_y - rand2)
if 0 < supply_depot_xy[0] < 64 and 0 < supply_depot_xy[1] < 64:
pass
else:
return actions.RAW_FUNCTIONS.no_op()
distances = self.get_distances(obs, scvs, supply_depot_xy)
scv = scvs[np.argmin(distances)]
return actions.RAW_FUNCTIONS.Build_SupplyDepot_pt(
"now", scv.tag, supply_depot_xy)
return actions.RAW_FUNCTIONS.no_op()
def build_barracks(self, obs):
completed_supply_depots = self.get_my_completed_units_by_type(
obs, units.Terran.SupplyDepot)
barrackses = self.get_my_units_by_type(obs, units.Terran.Barracks)
scvs = self.get_my_units_by_type(obs, units.Terran.SCV)
if (len(completed_supply_depots) > 0 and
obs.observation.player.minerals >= 150 and len(scvs) > 0 and
len(barrackses) < 3):
brks = self.get_my_units_by_type(obs, units.Terran.SupplyDepot)
completed_command_center = self.get_my_completed_units_by_type(
obs, units.Terran.CommandCenter)
if len(barrackses) >= 1 and len(completed_command_center) == 1:
# double commands
commandcenters = self.get_my_units_by_type(obs, units.Terran.CommandCenter)
scvs = self.get_my_units_by_type(obs, units.Terran.SCV)
if (len(commandcenters) < 2 and obs.observation.player.minerals >= 400 and
len(scvs) > 0):
ccs_xy = (41, 21) if self.base_top_left else (17, 48)
distances = self.get_distances(obs, scvs, ccs_xy)
scv = scvs[np.argmin(distances)]
return actions.RAW_FUNCTIONS.Build_CommandCenter_pt(
"now", scv.tag, ccs_xy)
if brks:
for brk in brks:
brk_x, brk_y = brk.x, brk.y
rand1, rand2 = random.randint(1, 3), random.randint(1, 3)
barracks_xy = (brk_x + rand1, brk_y + rand2) if self.base_top_left else (brk_x - rand1, brk_y - rand2)
if 0 < barracks_xy[0] < 64 and 0 < barracks_xy[1] < 64:
pass
else:
return actions.RAW_FUNCTIONS.no_op()
distances = self.get_distances(obs, scvs, barracks_xy)
scv = scvs[
|
np.argmin(distances)
|
numpy.argmin
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 10:01:23 2020
@author: suraj
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from panel_allairfoil import panel
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Dense, Dropout, Flatten
from tensorflow.keras.layers import concatenate
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping
from keras import backend as kb
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from keras.regularizers import l2
from tensorflow.keras import regularizers
#from helpers import get_lift_drag, preprocess_image, read_csv_file, save_model, load_model
import matplotlib as mpl
font = {'family' : 'normal',
'size' : 14}
mpl.rc('font', **font)
#%%
def coeff_determination(y_true, y_pred):
SS_res = kb.sum(kb.square(y_true-y_pred ))
SS_tot = kb.sum(kb.square( y_true - kb.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + kb.epsilon()) )
def training_data(data,panel_data,aoa,re,lift,drag):
num_samples = data.shape[0]
npoints = data.shape[1]
num_cp = npoints - 1
nf = data.shape[2]
xtrain = np.zeros((num_samples,npoints*nf+2))
ytrain = np.zeros((num_samples,1))
for i in range(num_samples):
xtrain[i,:npoints] = data[i,:,0]
xtrain[i,npoints:2*npoints] = data[i,:,1]
xtrain[i,-2] = aoa[i]
xtrain[i,-1] = re[i]
ytrain[i,0] = lift[i]
# ytrain[i,1] = drag[i]
return xtrain, ytrain
def training_data_cp(data,panel_data,aoa,re,lift,drag):
num_samples = data.shape[0]
npoints = data.shape[1]
num_cp = npoints - 1
nf = data.shape[2]
xtrain = np.zeros((num_samples,npoints*nf+num_cp+2))
ytrain = np.zeros((num_samples,1))
for i in range(num_samples):
xtrain[i,:npoints] = data[i,:,0]
xtrain[i,npoints:2*npoints] = data[i,:,1]
xtrain[i,2*npoints:2*npoints+num_cp] = panel_data[i,2:]
xtrain[i,-2] = aoa[i]
xtrain[i,-1] = re[i]
ytrain[i,0] = lift[i]
# ytrain[i,1] = drag[i]
return xtrain, ytrain
def training_data_cl_cd(data,panel_data,aoa,re,lift,drag):
num_samples = data.shape[0]
npoints = data.shape[1]
num_cp = npoints - 1
nf = data.shape[2]
xtrain = np.zeros((num_samples,npoints*nf+2+2))
ytrain = np.zeros((num_samples,1))
for i in range(num_samples):
xtrain[i,:npoints] = data[i,:,0]
xtrain[i,npoints:2*npoints] = data[i,:,1]
xtrain[i,-4] = panel_data[i,0]
xtrain[i,-3] = panel_data[i,1]
xtrain[i,-2] = aoa[i]
xtrain[i,-1] = re[i]
ytrain[i,0] = lift[i]
# ytrain[i,1] = drag[i]
return xtrain, ytrain
def training_data_cl(data,panel_data,aoa,re,lift,drag):
num_samples = data.shape[0]
npoints = data.shape[1]
num_cp = npoints - 1
nf = data.shape[2]
xtrain = np.zeros((num_samples,npoints*nf+2+1))
ytrain = np.zeros((num_samples,1))
for i in range(num_samples):
xtrain[i,:npoints] = data[i,:,0]
xtrain[i,npoints:2*npoints] = data[i,:,1]
xtrain[i,-3] = panel_data[i,0]
xtrain[i,-2] = aoa[i]
xtrain[i,-1] = re[i]
ytrain[i,0] = lift[i]
# ytrain[i,1] = drag[i]
return xtrain, ytrain
#%%
#input_shape = (64,64,1)
#cd_scale_factor = 10
#X1, X2, Y = read_csv_file(input_shape, data_version="v1", cd_scale_factor=10)
im = 2 # 1: (x,y), 2: (x,y,cl,cd), 3: (x,y,Cp) 4: (x,y,cl)
data_path = '../airfoil_shapes_re_v2/'
#data_path = '../airfoil_shapes/'
num_xy = 201
num_cp = num_xy - 1
db = data_path + 'train_data.csv'
df = pd.read_csv(db, encoding='utf-8')
col_name = []
col_name.append('Airfoil')
for i in range(num_xy):
col_name.append(f'x{i}')
for i in range(num_xy):
col_name.append(f'y{i}')
col_name.append('AOA1')
col_name.append('AOA2')
col_name.append('RE')
col_name.append('CL')
col_name.append('CD')
col_name.append('CM')
col_name.append('CPmin')
df.columns = col_name
df = df[df['CL'].notna()]
#bad_airfoils = ['naca2206','naca4002','naca4404','naca4406','naca4602','naca6002','naca6004','naca6202','naca24002','naca24006','naca21004','naca22002','naca25002']
#df = df[~(df["Airfoil"].isin(bad_airfoils))]
num_samples = df.shape[0]
panel_data = np.zeros((num_samples,num_cp+2))
data_xy = np.zeros((num_samples,num_xy,2))
airfoil_names = []
aoa = np.zeros(num_samples)
re = np.zeros(num_samples)
cl = np.zeros(num_samples)
cd = np.zeros(num_samples)
cm = np.zeros(num_samples)
#%%
airfoils_xcoords = df.iloc[:,1:202].values
airfoils_ycoords = df.iloc[:,202:403].values
params = df.iloc[:,403:].values
#%%
counter = 0
cd_scale_factor = 10.0
generate_new_train_data = False
if generate_new_train_data :
for index, row in df.iterrows():
airfoil_names.append(row.Airfoil)
aoa[counter] = row.AOA2
re[counter] = row.RE
cl[counter] = row.CL
cd[counter] = row.CD*cd_scale_factor
cm[counter] = row.CM
data_xy[counter,:,0] = airfoils_xcoords[counter,:]
data_xy[counter,:,1] = airfoils_ycoords[counter,:]
CL, CDP, Cp, pp = panel(data_xy[counter,:,:], alfader=row.AOA2)
panel_data[counter,0], panel_data[counter,1], panel_data[counter,2:] = CL,CDP, Cp
print(counter, row.Airfoil, row.AOA2, row.RE)
counter +=1
aa,bb = np.where(panel_data < -35)
aau = np.unique(aa)
aoa = np.delete(aoa, aau, axis=0)
re = np.delete(re, aau, axis=0)
cl =
|
np.delete(cl, aau, axis=0)
|
numpy.delete
|
import os
import sys
sys.path.append(os.path.join('..', 'codes'))
def make_imlist(phase='train', saved=True):
import os
import torch
from mmm import DataHandler as DH
from mmm import DatasetFlickr
from torchvision import transforms
from tqdm import tqdm
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
path_top = '../datas/gcn/'
# データの読み込み先
image_normalization_mean = [0.485, 0.456, 0.406]
image_normalization_std = [0.229, 0.224, 0.225]
kwargs_DF = {
'train': {
'filenames': {
'Annotation': path_top + 'inputs/train_anno.json',
'Category_to_Index': path_top + 'inputs/category.json'
},
'transform': transforms.Compose(
[
transforms.RandomResizedCrop(
224, scale=(1.0, 1.0), ratio=(1.0, 1.0)
),
transforms.ToTensor(),
transforms.Normalize(
mean=image_normalization_mean,
std=image_normalization_std
)
]
),
'image_path': path_top + 'inputs/images/train/'
},
'validate': {
'filenames': {
'Annotation': path_top + 'inputs/validate_anno.json',
'Category_to_Index': path_top + 'inputs/category.json'
},
'transform': transforms.Compose(
[
transforms.RandomResizedCrop(
224, scale=(1.0, 1.0), ratio=(1.0, 1.0)
),
transforms.ToTensor(),
transforms.Normalize(
mean=image_normalization_mean,
std=image_normalization_std
)
]
),
'image_path': path_top + 'inputs/images/validate/'
}
}
dataset = DatasetFlickr(**kwargs_DF[phase])
loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)
imlist = [
(image, label, filename) for image, label, filename in tqdm(loader)
]
if saved:
DH.savePickle(imlist, 'imlist', directory=path_top + 'outputs/check/')
return imlist
def get_predicts(phase='train', threshold=0.4, check_epoch=1,
ranges=(0, 12), learning_rate=0.1):
'''
画像をmodelに入力したときの画像表示,正解ラベル,正解ラベルの順位・尤度,全ラベルの尤度を出力
Arguments:
phase {'train' or 'validate'} -- 入力が学習用画像か評価用画像かの指定
threshold {0.1 - 0.9} -- maskのしきい値.予め作成しておくこと
check_epoch {1 - 20} -- どのepochの段階で評価するかの指定
ranges {(int, int)} -- 画像集合の何番目から何番目を取ってくるかの指定
'''
# ---準備-------------------------------------------------------------------
import json
import numpy as np
import matplotlib.pyplot as plt
import os
# import torch
import torch.optim as optim
from mmm import CustomizedMultiLabelSoftMarginLoss as MyLossFunction
from mmm import DataHandler as DH
from mmm import DatasetFlickr
from mmm import VisGCN
from PIL import Image
from torchvision import transforms
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
path_top = '../datas/gcn/'
ranges = ranges if ranges[1] > ranges[0] else (ranges[1], ranges[0])
ranges = ranges if ranges[1] != ranges[0] else (ranges[0], ranges[0] + 1)
# データの読み込み先
image_normalization_mean = [0.485, 0.456, 0.406]
image_normalization_std = [0.229, 0.224, 0.225]
kwargs_DF = {
'train': {
'filenames': {
'Annotation': path_top + 'inputs/train_anno.json',
'Category_to_Index': path_top + 'inputs/category.json'
},
'transform': transforms.Compose(
[
transforms.RandomResizedCrop(
224, scale=(1.0, 1.0), ratio=(1.0, 1.0)
),
transforms.ToTensor(),
transforms.Normalize(
mean=image_normalization_mean,
std=image_normalization_std
)
]
),
'image_path': path_top + 'inputs/images/train/'
},
'validate': {
'filenames': {
'Annotation': path_top + 'inputs/validate_anno.json',
'Category_to_Index': path_top + 'inputs/category.json'
},
'transform': transforms.Compose(
[
transforms.RandomResizedCrop(
224, scale=(1.0, 1.0), ratio=(1.0, 1.0)
),
transforms.ToTensor(),
transforms.Normalize(
mean=image_normalization_mean,
std=image_normalization_std
)
]
),
'image_path': path_top + 'inputs/images/validate/'
}
}
dataset = DatasetFlickr(**kwargs_DF[phase])
num_class = dataset.num_category()
# loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)
# maskの読み込み
mask = DH.loadPickle(
'{0:0=2}'.format(int(threshold * 10)),
path_top + 'inputs/comb_mask/'
)
# 誤差伝播の重みの読み込み
bp_weight = DH.loadNpy(
'{0:0=2}'.format(int(threshold * 10)),
path_top + 'inputs/backprop_weight/'
)
# 学習で用いるデータの設定や読み込み先
gcn_settings = {
'class_num': num_class,
'filepaths': {
'category': path_top + 'inputs/category.json',
'upper_category': path_top + 'inputs/upper_category.json',
'relationship': path_top + 'inputs/relationship.pickle',
'learned_weight': path_top + 'inputs/learned/200cnn.pth'
},
'feature_dimension': 2048
}
# modelの設定
model = VisGCN(
class_num=num_class,
loss_function=MyLossFunction(),
optimizer=optim.SGD,
learningrate=learning_rate,
momentum=0.9,
weight_decay=1e-4,
fix_mask=mask,
network_setting=gcn_settings,
multigpu=False,
backprop_weight=bp_weight
)
# モデルの保存先
mpath = path_top + 'outputs/learned/th{0:0=2}/'.format(int(threshold * 10))
# 途中まで学習をしていたらここで読み込み
if check_epoch > 0:
model.loadmodel('{0:0=3}weight'.format(check_epoch), mpath)
def get_row_col(length):
'''
画像を並べて表示する際の行列数の計算
'''
sq = int(np.sqrt(length))
if sq ** 2 == length:
return sq, sq
if (sq + 1) * sq >= length:
return sq, sq + 1
else:
return sq + 1, sq + 1
# ---平均ラベル数の計算------------------------------------------------------
# ave, cnt = 0, 0
# mx, mi = 0, np.inf
# scores = []
# for i, (image, label, filename) in enumerate(loader):
# score = sum(model.predict(image, labeling=True)[0])
# ave += score
# cnt += 1
# mx = score if score > mx else mx
# mi = score if score < mi else mi
# scores.append(score)
# plt.hist(scores)
# plt.show()
# print('average: {0}'.format(ave / cnt))
# print('max, min: {0}, {1}'.format(mx, mi))
# ---画像・正解ラベル・予測ラベルの表示---------------------------------------
# imlist = [(image, label, filename) for image, label, filename in loader]
# DH.savePickle(imlist, 'imlist', directory=path_top + 'outputs/check/')
# imlist = DH.loadPickle('imlist', directory='../datas/geo_rep/inputs')
imlist = DH.loadPickle('imlist', directory=path_top + 'outputs/check/')
imlist = imlist[ranges[0]:ranges[1]]
category = json.load(
open(kwargs_DF[phase]['filenames']['Category_to_Index'], 'r')
)
category = [key for key, _ in category.items()]
results = []
row, col = get_row_col(len(imlist))
for idx, (image, label, filename) in enumerate(imlist):
# 画像の表示
print(kwargs_DF[phase]['image_path'], filename[0])
image_s = Image.open(
os.path.join(kwargs_DF[phase]['image_path'], filename[0])
).convert('RGB')
plt.subplot(row, col, idx + 1)
plt.imshow(image_s)
plt.axis('off')
# 正解ラベル
label = [lname for flg, lname in zip(label[0], category) if flg == 1]
# 予測ラベル
pred = model.predict(image)[0]
pred = [
(likelihood, lname) for likelihood, lname in zip(pred, category)
if likelihood != 0
]
pred = sorted(pred, reverse=True)
# 予測ラベルのうちtop nまでのもの
toppred = [(item[1], item[0]) for item in pred]
# 正解ラベルが予測ではどの順位にあるか
prank = {tag: (idx, llh) for idx, (llh, tag) in enumerate(pred)}
prank = [prank[lbl] for lbl in label]
result = {
'filename': filename[0],
'image': image_s,
'tag': label,
'tags_rank': prank,
'predict': toppred
}
results.append(result)
return results
def get_training_images(threshold=0.1, phase='train'):
'''
あるクラスについてのトレーニングデータを確認.
thresholdによって出力サイズがバカでかくなることもあるため注意.
Arguments:
phase {'train' or 'validate'} -- 入力が学習用画像か評価用画像かの指定
threshold {0.1 - 0.9} -- maskのしきい値.maskは予め作成しておくこと
'''
# ---準備-------------------------------------------------------------------
import json
import numpy as np
import os
import pickle
import shutil
from mmm import DataHandler as DH
from tqdm import tqdm
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
path_top = '../datas/gcn/'
# データの読み込み
imlist = DH.loadPickle('imlist', directory=path_top + 'outputs/check/')
category = json.load(open(path_top + 'inputs/category.json', 'r'))
category = [key for key, _ in category.items()]
# maskの読み込み
with open(path_top + 'inputs/comb_mask/{0:0=2}.pickle'.format(
int(threshold * 10)), 'rb') as f:
mask = pickle.load(f)
def _fixed_mask(labels, fmask):
'''
誤差を伝播させない部分を指定するマスクの生成
'''
labels = labels.data.cpu().numpy()
labels_y, labels_x = np.where(labels == 1)
labels_y = np.append(labels_y, labels_y[-1] + 1)
labels_x = np.append(labels_x, 0)
fixmask = np.zeros((labels.shape[0] + 1, labels.shape[1]), int)
row_p, columns_p = labels_y[0], [labels_x[0]]
fixmask[row_p] = fmask[labels_x[0]]
for row, column in zip(labels_y[1:], labels_x[1:]):
if row == row_p:
columns_p.append(column)
else:
if len(columns_p) > 1:
for x in columns_p:
fixmask[row_p][x] = 0
row_p, columns_p = row, [column]
fixmask[row] = fixmask[row] | fmask[column]
fixmask = fixmask[:-1]
return fixmask
# ---入力画像のタグから振り分け-----------------------------------------------
outputs_path = path_top \
+ 'outputs/check/images/th{0:0=2}/'.format(int(threshold * 10))
for _, label, filename in tqdm(imlist):
fix_mask = _fixed_mask(label, mask)
for idx, flg in enumerate(fix_mask[0]):
if flg == 1:
continue
path_fin = category[idx] + '/zero' if label[0][idx] == 0 \
else category[idx] + '/one'
os.makedirs(outputs_path + path_fin, exist_ok=True)
shutil.copy(
path_top + 'inputs/images/' + phase + '/' + filename[0],
outputs_path + path_fin
)
def class_precision(threshold=0.4, epoch=20):
'''
maskによりunknownと指定された画像を除いた場合での,
クラス毎の予測ラベルと正解を比較
'''
# -------------------------------------------------------------------------
# 準備
import json
import numpy as np
import os
import pickle
from mmm import CustomizedMultiLabelSoftMarginLoss as MyLossFunction
from mmm import DataHandler as DH
from mmm import VisGCN
from tqdm import tqdm
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
path_top = '../datas/gcn/'
# maskの読み込み
with open(path_top + 'inputs/comb_mask/{0:0=2}.pickle'.format(
int(threshold * 10)), 'rb') as f:
mask = pickle.load(f)
# 誤差伝播の重みの読み込み
with open(path_top + 'inputs/backprop_weight/{0:0=2}.npy'.format(
int(threshold * 10)), 'rb') as f:
bp_weight = np.load(f)
# modelの設定
gcn_settings = {
'class_num': 3100,
'filepaths': {
'category': path_top + 'inputs/category.json',
'upper_category': path_top + 'inputs/upper_category.json',
'relationship': path_top + 'inputs/relationship.pickle',
'learned_weight': path_top + 'inputs/learned/200cnn.pth'
},
'feature_dimension': 2048
}
model = VisGCN(
class_num=3100,
loss_function=MyLossFunction(),
learningrate=0.1,
weight_decay=1e-4,
fix_mask=mask,
network_setting=gcn_settings,
multigpu=False,
backprop_weight=bp_weight
)
if epoch > 0:
model.loadmodel(
'{0:0=3}weight'.format(epoch),
path_top + 'outputs/learned/th{0:0=2}'.format(int(threshold * 10))
)
# データの読み込み
imlist = DH.loadPickle('imlist', directory=path_top + 'outputs/check/')
category = json.load(open(path_top + 'inputs/category.json', 'r'))
category = [key for key, _ in category.items()]
def _update_backprop_weight(labels, fmask):
'''
誤差を伝播させる際の重みを指定.誤差を伝播させない部分は0.
'''
labels = labels.data.cpu().numpy()
labels_y, labels_x = np.where(labels == 1)
labels_y = np.append(labels_y, labels_y[-1] + 1)
labels_x = np.append(labels_x, 0)
weight = np.zeros((labels.shape[0] + 1, labels.shape[1]), int)
row_p, columns_p = labels_y[0], [labels_x[0]]
weight[row_p] = fmask[labels_x[0]]
for row, column in zip(labels_y[1:], labels_x[1:]):
if row == row_p:
columns_p.append(column)
else:
if len(columns_p) > 1:
for y in columns_p:
weight[row_p][y] = 0
row_p, columns_p = row, [column]
weight[row] = weight[row] | fmask[column]
weight = weight[:-1]
weight = np.ones(labels.shape, int) - weight
return weight
# ---入力画像のタグから振り分け-----------------------------------------------
outputs_path = path_top + 'outputs/check/confusion_matrix'
counts = np.zeros((len(category), 2, 2))
for image, label, _ in tqdm(imlist):
fix_mask = _update_backprop_weight(label, mask)
predicts = model.predict(image, labeling=True)
for idx, flg in enumerate(fix_mask[0]):
if flg == 0:
continue
if label[0][idx] == 0:
# 正解が0のとき
if predicts[0][idx] == 0:
# 予測が0であれば
counts[idx][0][0] += 1
else:
# 予測が1であれば
counts[idx][1][0] += 1
else:
# 正解が1のとき
if predicts[0][idx] == 0:
# 予測が0であれば
counts[idx][0][1] += 1
else:
# 予測が1であれば
counts[idx][1][1] += 1
DH.saveNpy(
np.array(counts),
'{0:0=2}_{1:0=2}'.format(int(threshold * 10), epoch),
outputs_path
)
def score_unknown(threshold=0.4, epoch=20):
'''
学習の際unknownとしている画像に対してどう予測しているかの確認
'''
# -------------------------------------------------------------------------
# 準備
import json
import numpy as np
import os
import pickle
from mmm import CustomizedMultiLabelSoftMarginLoss as MyLossFunction
from mmm import DataHandler as DH
from mmm import VisGCN
from tqdm import tqdm
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
path_top = '../datas/gcn/'
# maskの読み込み
with open(path_top + 'inputs/comb_mask/{0:0=2}.pickle'.format(
int(threshold * 10)), 'rb') as f:
mask = pickle.load(f)
# 誤差伝播の重みの読み込み
with open(path_top + 'inputs/backprop_weight/{0:0=2}.npy'.format(
int(threshold * 10)), 'rb') as f:
bp_weight = np.load(f)
# modelの設定
gcn_settings = {
'class_num': 3100,
'filepaths': {
'category': path_top + 'inputs/category.json',
'upper_category': path_top + 'inputs/upper_category.json',
'relationship': path_top + 'inputs/relationship.pickle',
'learned_weight': path_top + 'inputs/learned/200cnn.pth'
},
'feature_dimension': 2048
}
model = VisGCN(
class_num=3100,
loss_function=MyLossFunction(),
learningrate=0.1,
weight_decay=1e-4,
fix_mask=mask,
network_setting=gcn_settings,
multigpu=False,
backprop_weight=bp_weight
)
if epoch > 0:
model.loadmodel(
'{0:0=3}weight'.format(epoch),
path_top + 'outputs/learned/th{0:0=2}'.format(int(threshold * 10))
)
# データの読み込み
imlist = DH.loadPickle('imlist', directory=path_top + 'outputs/check/')
category = json.load(open(path_top + 'inputs/category.json', 'r'))
category = [key for key, _ in category.items()]
def _update_backprop_weight(labels, fmask):
'''
誤差を伝播させる際の重みを指定.誤差を伝播させない部分は1.
'''
labels = labels.data.cpu().numpy()
labels_y, labels_x = np.where(labels == 1)
labels_y = np.append(labels_y, labels_y[-1] + 1)
labels_x = np.append(labels_x, 0)
weight = np.zeros((labels.shape[0] + 1, labels.shape[1]), int)
row_p, columns_p = labels_y[0], [labels_x[0]]
weight[row_p] = fmask[labels_x[0]]
for row, column in zip(labels_y[1:], labels_x[1:]):
if row == row_p:
columns_p.append(column)
else:
if len(columns_p) > 1:
for y in columns_p:
weight[row_p][y] = 0
row_p, columns_p = row, [column]
weight[row] = weight[row] | fmask[column]
weight = weight[:-1]
return weight
# ---入力画像のタグから振り分け-----------------------------------------------
outputs_path = path_top + 'outputs/check/unknown_predict'
counts = np.zeros((len(category), 2))
for image, label, _ in tqdm(imlist):
fix_mask = _update_backprop_weight(label, mask)
predicts = model.predict(image, labeling=True)
for idx, flg in enumerate(fix_mask[0]):
if flg == 0:
continue
if predicts[0][idx] == 0:
# 予測が0であれば
counts[idx][0] += 1
else:
# 予測が1であれば
counts[idx][1] += 1
DH.saveNpy(
np.array(counts),
'{0:0=2}_{1:0=2}'.format(int(threshold * 10), epoch),
outputs_path
)
def check_lowrecall(threshold=0.4):
'''
recallを元に画像や上位クラスとの関連を確認
'''
import json
import numpy as np
import torch
from mmm import DataHandler as DH
path_top = '../datas/gcn/'
confusion_matrix = path_top + '_outputs/check/confusion_matrix/'
confusion_matrix = DH.loadNpy(
'{0:0=2}'.format(int(10 * threshold)), confusion_matrix
)
category = json.load(open(path_top + 'inputs/category.json', 'r'))
local_df = DH.loadPickle(
'local_df_area16_wocoth', '../datas/prepare/inputs'
)
w00 = torch.load(open(
path_top + 'outputs3/learned/th{0:0=2}/000weight.pth'.format(
int(10 * threshold)), 'rb')
)
w20 = torch.load(open(
path_top + 'outputs3/learned/th{0:0=2}/020weight.pth'.format(
int(10 * threshold)), 'rb')
)
results = []
for cm, (cat, idx) in zip(confusion_matrix, category.items()):
if cm[0][1] + cm[1][1] == 0:
continue
rc = cm[1][1] / (cm[0][1] + cm[1][1])
if rc > 0.5:
continue
pr = 0 if sum(cm[1]) == 0 else cm[1][1] / sum(cm[1])
results.append([
cat,
pr,
rc,
local_df.loc[cat]['representative'],
# w00['layer1.W'][idx].cpu(),
# w20['layer1.W'][idx].cpu()
])
return np.array(results)
def confusion_all_matrix(threshold=0.4, epoch=20, saved=False):
'''
正例・unknown・負例についてconfusion_matrixを作成
'''
# -------------------------------------------------------------------------
# 準備
import json
import numpy as np
import os
from mmm import CustomizedMultiLabelSoftMarginLoss as MyLossFunction
from mmm import DataHandler as DH
from mmm import VisGCN
from tqdm import tqdm
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
path_top = '../datas/gcn/'
# maskの読み込み
mask = DH.loadPickle(
'{0:0=2}'.format(int(threshold * 10)),
path_top + 'inputs/comb_mask/'
)
# 誤差伝播の重みの読み込み
bp_weight = DH.loadNpy(
'{0:0=2}'.format(int(threshold * 10)),
path_top + 'inputs/backprop_weight/'
)
# modelの設定
gcn_settings = {
'num_class': 3100,
'filepaths': {
'category': path_top + 'inputs/category.json',
'upper_category': path_top + 'inputs/upper_category.json',
'relationship': path_top + 'inputs/relationship.pickle',
'learned_weight': path_top + 'inputs/learned/200cnn.pth'
},
'feature_dimension': 2048
}
model = VisGCN(
class_num=3100,
loss_function=MyLossFunction(),
learningrate=0.1,
weight_decay=1e-4,
fix_mask=mask,
network_setting=gcn_settings,
multigpu=False,
backprop_weight=bp_weight
)
if epoch > 0:
model.loadmodel(
'{0:0=3}weight'.format(epoch),
# path_top + 'outputs/learned/th{0:0=2}'.format(int(threshold * 10))
path_top + 'outputs/learned_backup'
)
# データの読み込み
imlist = DH.loadPickle('imlist_val', directory=path_top + 'outputs/check/')
category = json.load(open(path_top + 'inputs/category.json', 'r'))
category = [key for key, _ in category.items()]
def _update_backprop_weight(labels, fmask):
'''
誤差を伝播させる際の重みを指定.誤差を伝播させない部分は0.
'''
labels = labels.data.cpu().numpy()
labels_y, labels_x = np.where(labels == 1)
labels_y = np.append(labels_y, labels_y[-1] + 1)
labels_x = np.append(labels_x, 0)
weight = np.zeros((labels.shape[0] + 1, labels.shape[1]), int)
row_p, columns_p = labels_y[0], [labels_x[0]]
weight[row_p] = fmask[labels_x[0]]
for row, column in zip(labels_y[1:], labels_x[1:]):
if row == row_p:
columns_p.append(column)
else:
if len(columns_p) > 1:
for y in columns_p:
weight[row_p][y] = 0
row_p, columns_p = row, [column]
weight[row] = weight[row] | fmask[column]
weight = weight[:-1]
weight = np.ones(labels.shape, int) - weight
return weight
# ---入力画像のタグから振り分け-----------------------------------------------
outputs_path = path_top + 'outputs/check/all_matrix'
# 0: precision, 1: recall, 2: positive_1, 3: positive_all,
# 4: unknown_1, 5: unknown_all, 6: negative_1, 7: negative_all
counts = np.zeros((len(category), 8))
for image, label, _ in tqdm(imlist):
fix_mask = _update_backprop_weight(label, mask)
predicts = model.predict(image, labeling=True)
for idx, flg in enumerate(fix_mask[0]):
# あるクラスcategory[idx]について
if flg == 0:
# 正解がunknownのとき
if predicts[0][idx] == 1:
# 予測が1であれば
counts[idx][4] += 1
continue
if label[0][idx] == 0:
# 正解が0のとき
if predicts[0][idx] == 1:
# 予測が1であれば
counts[idx][6] += 1
else:
# 正解が1のとき
if predicts[0][idx] == 1:
# 予測が1であれば
counts[idx][2] += 1
allnum = len(imlist)
for idx, (zero, one) in enumerate(bp_weight):
counts[idx][3] = one
counts[idx][5] = allnum - one - zero
counts[idx][7] = zero
if counts[idx][2] + counts[idx][6] != 0:
counts[idx][0] = counts[idx][2] / (counts[idx][2] + counts[idx][6])
if counts[idx][3] != 0:
counts[idx][1] = counts[idx][2] / counts[idx][3]
if saved:
DH.saveNpy(
np.array(counts),
'all_matrix{0:0=2}_{1:0=2}'.format(int(threshold * 10), epoch),
outputs_path
)
return np.array(counts)
def compare_pr(threshold=0.4, saved=True):
'''
トレーニング前後での精度の変化、各クラス、各クラスの上位クラス
を一覧にしたリストの作成
'''
# -------------------------------------------------------------------------
# 準備
import json
import numpy as np
import os
from mmm import DataHandler as DH
from tqdm import tqdm
path_top = '../datas/gcn/'
outputs_path = path_top + 'outputs/check/all_matrix'
epoch00 = 'all_matrix{0:0=2}_{1:0=2}.npy'.format(int(threshold * 10), 0)
epoch20 = 'all_matrix{0:0=2}_{1:0=2}.npy'.format(int(threshold * 10), 20)
if not os.path.isfile(outputs_path + '/' + epoch00):
confusion_all_matrix(threshold, 0, True)
if not os.path.isfile(outputs_path + '/' + epoch20):
confusion_all_matrix(threshold, 20, True)
epoch00 = DH.loadNpy(epoch00, outputs_path)
epoch20 = DH.loadNpy(epoch20, outputs_path)
category = json.load(open(path_top + 'inputs/category.json', 'r'))
category = [key for key, _ in category.items()]
upper_category = json.load(
open(path_top + 'inputs/upper_category.json', 'r')
)
upper_category = [key for key, _ in upper_category.items()]
local_df = DH.loadPickle(
'local_df_area16_wocoth', '../datas/prepare/inputs'
)
# -------------------------------------------------------------------------
# 0: label, 1: rep
# 2 ~ 9: confusion_all_matrix of epoch 0
# 10 ~ 17: confusion_all matrix of epoch 20
compare_list = []
for idx, cat in tqdm(enumerate(category)):
if cat in upper_category:
continue
row = [cat, local_df.loc[cat]['representative']]
row.extend(epoch00[idx])
row.extend(epoch20[idx])
compare_list.append(row)
if saved:
outputs_path = path_top + 'outputs/check/acc_change'
DH.saveNpy(
np.array(compare_list, dtype=object),
'result_00to20_{0:0=2}'.format(int(threshold * 10)),
outputs_path
)
return np.array(compare_list)
def check_locate():
'''
訓練画像の位置情報がデータセット内に正しく存在しているかどうかの確認
'''
# ---準備-------------------------------------------------------------------
from mmm import DataHandler as DH
from tqdm import tqdm
path_top = '../datas/geo_rep/'
photo_location = DH.loadPickle('photo_location_train', path_top + 'inputs')
local_df = DH.loadPickle(
'../datas/prepare/inputs/local_df_area16_wocoth2.pickle'
)
locate_list = []
for geo in local_df.itertuples():
locate_list.extend(list(geo.geo))
# print(len(locate_list))
# print(locate_list[0:5])
imlist = DH.loadPickle('imlist', directory=path_top + 'inputs/')
# ---チェック---------------------------------------------------------------
cnt = 0
for _, _, filename in tqdm(imlist):
image_loc = photo_location[filename[0]]
if image_loc not in locate_list:
cnt += 1
print(cnt, len(imlist), cnt / len(imlist))
def ite_hist_change(phase='train', width=0.16, saved=True):
import matplotlib.pyplot as plt
import numpy as np
from mmm import DataHandler as DH
plt.rcParams['font.family'] = 'IPAexGothic'
# -------------------------------------------------------------------------
dpath = '../datas/gcn/outputs/check/ite'
data = DH.loadNpy('epoch00to20_thr04_{0}.npy'.format(phase), dpath)
diff = [[] for _ in range(5)]
for item in data:
diff[len(item[1]) - 1].append(item[11] - item[3])
bar_heights = np.zeros((5, 8))
thresholds = np.arange(-1.0, 1.1, 0.25)
for idx, item in enumerate(diff):
bin_heights = np.histogram(item, bins=8, range=(-1.0, 1.0))[0]
bar_heights[idx] = bin_heights / sum(bin_heights)
# -------------------------------------------------------------------------
# labels = ['{0:0.2f}'.format(item) for item in thresholds]
x = np.arange(len(thresholds) - 1)
width = width if 0 < width <= 0.2 else 0.16
fig, ax = plt.subplots()
ax.bar(x + 0.1 + 2.5 * width,
|
np.ones(8)
|
numpy.ones
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
import six
from fake_reader import fake_imdb_reader
paddle.enable_static()
def bow_net(data,
label,
dict_dim,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
class_dim=2):
"""
BOW net
This model is from https://github.com/PaddlePaddle/models:
fluid/PaddleNLP/text_classification/nets.py
"""
emb = fluid.layers.embedding(
input=data, is_sparse=True, size=[dict_dim, emb_dim])
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow)
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
return avg_cost
class TestGradientClip(unittest.TestCase):
def setUp(self):
self.word_dict_len = 5147
self.BATCH_SIZE = 2
reader = fake_imdb_reader(self.word_dict_len, self.BATCH_SIZE * 100)
self.train_data = paddle.batch(reader, batch_size=self.BATCH_SIZE)
self.clip_gradient = lambda x: None
self.init()
def init(self):
pass
def get_places(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
return places
def check_clip_result(self, out, out_clip):
pass
def check_gradient_clip(self, place, dtype='float32'):
prog = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(
main_program=prog, startup_program=startup_program):
image = fluid.data(name="a", shape=[-1, 784], dtype='float32')
label = fluid.data(name="b", shape=[-1, 1], dtype='int64')
if dtype != 'float32':
image_cast = paddle.cast(image, dtype)
hidden = fluid.layers.fc(input=image_cast, size=32, act='relu')
else:
hidden = fluid.layers.fc(input=image, size=32, act='relu')
predict = fluid.layers.fc(input=hidden, size=10, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
prog_clip = prog.clone()
avg_cost_clip = prog_clip.block(0).var(avg_cost.name)
p_g = fluid.backward.append_backward(loss=avg_cost)
p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip)
p_g = sorted(p_g, key=lambda x: x[0].name)
p_g_clip = sorted(p_g_clip, key=lambda x: x[0].name)
with fluid.program_guard(
main_program=prog_clip, startup_program=startup_program):
p_g_clip = self.clip_gradient(p_g_clip)
grad_list = [elem[1] for elem in p_g]
grad_clip_list = [elem[1] for elem in p_g_clip]
train_reader = paddle.batch(paddle.dataset.mnist.train(), batch_size=3)
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=[image, label], place=place)
exe.run(startup_program)
data = next(train_reader())
out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list)
out_clip = exe.run(prog_clip,
feed=feeder.feed(data),
fetch_list=grad_clip_list)
self.check_clip_result(out, out_clip)
def check_sparse_gradient_clip(self, place):
prog = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(
main_program=prog, startup_program=startup_program):
data = fluid.data(
name="words", shape=[-1, 1], dtype="int64", lod_level=1)
label = fluid.data(name="label", shape=[-1, 1], dtype="int64")
cost = bow_net(data, label, self.word_dict_len)
self.backward_and_optimize(cost)
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=[data, label], place=place)
exe.run(startup_program)
data = next(self.train_data())
val = exe.run(prog, feed=feeder.feed(data), fetch_list=[cost])[0]
self.assertEqual((1, ), val.shape)
self.assertFalse(np.isnan(val))
def backward_and_optimize(self, cost):
pass
class TestGradientClipByGlobalNorm(TestGradientClip):
def init(self):
self.clip_norm = 0.2
def check_clip_result(self, out, out_clip):
global_norm = 0
for v in out:
global_norm += np.sum(np.square(v))
global_norm = np.sqrt(global_norm)
scale = self.clip_norm / np.maximum(self.clip_norm, global_norm)
res = []
for i in range(len(out)):
out[i] = scale * out[i]
for u, v in zip(out, out_clip):
self.assertTrue(
np.allclose(
a=u, b=v, rtol=1e-5, atol=1e-8),
"gradient clip by global norm has wrong results!, \nu={}\nv={}\ndiff={}".
format(u, v, u - v))
# test whether the ouput is right when use 'set_gradient_clip'
def test_old_gradient_clip(self):
def func(params_grads):
clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm)
fluid.clip.set_gradient_clip(clip)
return fluid.clip.append_gradient_clip_ops(params_grads)
self.clip_gradient = func
self.check_gradient_clip(fluid.CPUPlace())
# test whether the ouput is right when use grad_clip
def test_new_gradient_clip(self):
def func(params_grads):
clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm)
return clip(params_grads)
self.clip_gradient = func
self.check_gradient_clip(fluid.CPUPlace())
# test whether the ouput is right when use grad_clip under float64
def test_new_gradient_clip_fp64(self):
def func(params_grads):
clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm)
return clip(params_grads)
self.clip_gradient = func
self.check_gradient_clip(fluid.CPUPlace(), "float64")
# invoke 'set_gradient_clip' in a wrong order
def test_wrong_API_order(self):
def backward_func(cost):
clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0)
fluid.clip.set_gradient_clip(clip)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01,
grad_clip=clip)
# if 'set_gradient_clip' and 'optimize(grad_clip)' together, 'set_gradient_clip' will be ineffective
sgd_optimizer.minimize(cost)
# 'set_gradient_clip' must before 'minimize', otherwise, 'set_gradient_clip' will be ineffective
fluid.clip.set_gradient_clip(clip)
self.backward_and_optimize = backward_func
for place in self.get_places():
self.check_sparse_gradient_clip(place)
# raise typeError
def test_tpyeError(self):
# the type of optimizer(grad_clip=) must be an instance of GradientClipBase's derived class
with self.assertRaises(TypeError):
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1,
grad_clip="test")
# if grad is None or not need clip
def test_none_grad_fp32(self):
ops = self._test_none_grad_helper("float32")
self.assertListEqual(ops, [
'squared_l2_norm', 'squared_l2_norm', 'sum', 'sum', 'sqrt',
'fill_constant', 'elementwise_max', 'elementwise_div',
'elementwise_mul', 'elementwise_mul'
])
def test_none_grad_fp16(self):
ops = self._test_none_grad_helper("float16")
self.assertListEqual(ops, [
'square', 'reduce_sum', 'square', 'reduce_sum', 'sum', 'cast',
'sum', 'sqrt', 'fill_constant', 'elementwise_max',
'elementwise_div', 'cast', 'elementwise_mul', 'cast',
'elementwise_mul'
])
def _test_none_grad_helper(self, dtype):
prog = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(
main_program=prog, startup_program=startup_program):
clip = fluid.clip.GradientClipByGlobalNorm(self.clip_norm)
x = fluid.default_main_program().global_block().create_parameter(
name="x", shape=[2, 3], dtype=dtype)
y = fluid.default_main_program().global_block().create_parameter(
name="y", shape=[2, 3], dtype=dtype)
# (x, None) should not be returned
params_grads = [(x, None), (x, y), (y, x)]
params_grads = clip(params_grads)
self.assertTrue(
len(params_grads) == 2,
"ClipByGlobalNorm: when grad is None, it shouldn't be returned by gradient clip!"
)
ops = [op.type for op in x.block.ops]
return ops
class TestGradientClipByNorm(TestGradientClip):
def init(self):
self.clip_norm = 0.2
def check_clip_result(self, out, out_clip):
for u, v in zip(out, out_clip):
norm = np.sqrt(np.sum(np.power(u, 2)))
scale = self.clip_norm / np.maximum(self.clip_norm, norm)
u = u * scale
self.assertTrue(
np.allclose(
a=u, b=v, rtol=1e-5, atol=1e-8),
"gradient clip by norm has wrong results!")
# test whether the ouput is right when use grad_clip
def test_gradient_clip(self):
def func(params_grads):
clip = fluid.clip.GradientClipByNorm(clip_norm=self.clip_norm)
return clip(params_grads)
self.clip_gradient = func
self.check_gradient_clip(fluid.CPUPlace())
# if grad is None or not need clip
def test_none_grad(self):
clip = fluid.clip.GradientClipByNorm(self.clip_norm)
x = fluid.default_main_program().global_block().create_parameter(
name="x", shape=[2, 3], dtype="float32", need_clip=False)
y = fluid.default_main_program().global_block().create_parameter(
name="y", shape=[2, 3], dtype="float32", need_clip=False)
# (x, None) should not be returned
params_grads = [(x, None), (x, y)]
params_grads = clip(params_grads)
self.assertTrue(
len(clip(params_grads)) == 1,
"ClipGradByNorm: when grad is None, it shouldn't be returned by gradient clip!"
)
self.assertTrue(
params_grads[0][1].name == 'y',
"ClipGradByNorm: grad should not be clipped when filtered out!")
class TestGradientClipByValue(TestGradientClip):
def init(self):
self.max = 0.2
self.min = 0.1
def check_clip_result(self, out, out_clip):
for i, v in enumerate(out):
out[i] = np.clip(v, self.min, self.max)
for u, v in zip(out, out_clip):
u = np.clip(u, self.min, self.max)
self.assertTrue(
np.allclose(
a=u, b=v, rtol=1e-6, atol=1e-8),
"gradient clip by value has wrong results!")
# test whether the ouput is right when use grad_clip
def test_gradient_clip(self):
def func(params_grads):
clip = fluid.clip.GradientClipByValue(max=self.max, min=self.min)
return clip(params_grads)
self.clip_gradient = func
self.check_gradient_clip(fluid.CPUPlace())
# if grad is None or not need clip
def test_none_grad(self):
clip = fluid.clip.GradientClipByValue(self.max, self.min)
x = fluid.default_main_program().global_block().create_parameter(
name="x", shape=[2, 3], dtype="float32", need_clip=False)
y = fluid.default_main_program().global_block().create_parameter(
name="y", shape=[2, 3], dtype="float32", need_clip=False)
# (x, None) should not be returned
params_grads = [(x, None), (x, y)]
params_grads = clip(params_grads)
self.assertTrue(
len(clip(params_grads)) == 1,
"ClipGradByValue: when grad is None, it shouldn't be returned by gradient clip!"
)
self.assertTrue(
params_grads[0][1].name == 'y',
"ClipGradByValue: grad should not be clipped when filtered out!")
class TestDygraphGradientClip(unittest.TestCase):
def test_gradient_clip(self):
with fluid.dygraph.guard():
linear = fluid.dygraph.Linear(5, 5)
inputs = fluid.layers.uniform_random(
[16, 5], min=-10, max=10).astype('float32')
out = linear(fluid.dygraph.to_variable(inputs))
loss = fluid.layers.reduce_mean(out)
loss.backward()
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=0.0,
parameter_list=linear.parameters(),
grad_clip=fluid.clip.GradientClipByGlobalNorm(0.1))
self.check_clip_result(loss, sgd_optimizer)
def check_clip_result(self, loss, optimizer):
pass
class TestDygraphGradientClipByGlobalNorm(TestDygraphGradientClip):
def setUp(self):
self.clip_norm = 0.8
self.clip1 = fluid.clip.GradientClipByGlobalNorm(
clip_norm=self.clip_norm)
self.clip2 = fluid.clip.GradientClipByGlobalNorm(
clip_norm=self.clip_norm)
def check_clip_result(self, loss, optimizer):
# if grad is None
x = fluid.dygraph.to_variable(
np.array([2, 3]).astype("float32"), name="x")
y = fluid.dygraph.to_variable(
np.array([3, 4]).astype("float32"), name="y")
assert len(self.clip1([(x, x), (x, y), (x, None)])) == 2
# get params and grads from network
opt, params_grads = optimizer.minimize(loss)
_, grads = zip(*params_grads)
params_grads = self.clip2(params_grads)
_, grads_clip = zip(*params_grads)
global_norm = 0
for u in grads:
u = u.numpy()
global_norm += np.sum(np.power(u, 2))
global_norm = np.sqrt(global_norm)
global_norm_clip = 0
for v in grads_clip:
v = v.numpy()
global_norm_clip += np.sum(np.power(v, 2))
global_norm_clip = np.sqrt(global_norm_clip)
a = np.minimum(global_norm, self.clip_norm)
b = global_norm_clip
self.assertTrue(
np.isclose(
a=a, b=b, rtol=1e-6, atol=1e-8),
"gradient clip by global norm has wrong results, expetcd:%f, but recieved:%f"
% (a, b))
class TestDygraphGradientClipByNorm(TestDygraphGradientClip):
def setUp(self):
self.clip_norm = 0.8
self.clip = fluid.clip.GradientClipByNorm(clip_norm=self.clip_norm)
def check_clip_result(self, loss, optimizer):
# if grad is None
x = fluid.dygraph.to_variable(np.array([2, 3]).astype("float32"))
assert len(self.clip([(x, None)])) == 0
# get params and grads from network
self.clip([(fluid.dygraph.to_variable(np.array([2, 3])), None)])
opt, params_grads = optimizer.minimize(loss)
_, grads = zip(*params_grads)
params_grads = self.clip(params_grads)
_, grads_clip = zip(*params_grads)
for u, v in zip(grads, grads_clip):
u = u.numpy()
v = v.numpy()
a = np.sqrt(np.sum(np.power(u, 2)))
a = np.minimum(a, self.clip_norm)
b = np.sqrt(np.sum(
|
np.power(v, 2)
|
numpy.power
|
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import torchvision.models as models
from PIL import Image
import json
from matplotlib.ticker import FormatStrFormatter
import pandas as pd
import argparse
# Argparse config section
parser = argparse.ArgumentParser(description='Testing a Neural Network with the test sample')
parser.add_argument('--checkpoint_path', type=str,
help='path to recover and reload checkpoint',default='checkpoint.pth')
parser.add_argument('--image_path', type=str,
help='/path/to/image',default='flowers/test/71/image_04512.jpg')
parser.add_argument('--top_k', type=int,
help='top k: top categories by prob predictions',default=5)
parser.add_argument('--cat_to_name', type=str,
help='category name mapping',default='cat_to_name.json')
parser.add_argument('--device', type=str,
help='Choose -cuda- gpu or internal -cpu-',default='cuda')
parser.add_argument('--network', type=str,
help='Torchvision pretrained model. May choose densenet121 too', default='vgg19')
parser.add_argument('data_dir', type=str,
help='Path to root data directory', default='/flowers/')
args = parser.parse_args()
checkpoint_path = args.checkpoint_path
image_path = args.image_path
top_k = args.top_k
device = args.device
cat_to_name = args.cat_to_name
network = args.network
data_dir = args.data_dir
# Label mapping
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# Loading the checkpoint
def load_checkpoint(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
learning_rate = checkpoint['learning_rate']
class_to_idx = checkpoint['class_to_idx']
model = build_model(hidden_layers, class_to_idx)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
img = Image.open(image)
W, H = img.size # the W:width and H:height
size = 224
# TODO: Process a PIL image for use in a PyTorch model
if H > W:
H,W = int(max(H * size / W, 1)),int(size)
else:
W,H = int(max(W * size / H, 1)),int(size)
x0,y0 = ((W - size) / 2) ,((H - size) / 2)
x1,y1 = (x0 + size),(y0 + size)
resized_image = img.resize((W, H))
# Crop
cropped_image = img.crop((x0, y0, x1, y1))
# Normalize
np_image = np.array(cropped_image) / 255.
mean =
|
np.array([0.485, 0.456, 0.406])
|
numpy.array
|
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Histogram based calibrators"""
from collections import Counter
import numpy as np
from scipy.stats import entropy
from absl import logging
import torch
from pytorch_quantization.calib.calibrator import _Calibrator
from pytorch_quantization.tensor_quant import fake_tensor_quant
class HistogramCalibrator(_Calibrator):
"""Unified histogram calibrator
Histogram will be only collected once. compute_amax() performs entropy, percentile, or mse
calibration based on arguments
Args:
num_bits: An integer. Number of bits of quantization.
axis: A tuple. see QuantDescriptor.
unsigned: A boolean. using unsigned quantization.
num_bins: An integer. Number of histograms bins. Default 2048.
grow_method: A string. DEPRECATED. default None.
skip_zeros: A boolean. If True, skips zeros when collecting data for histogram. Default False.
"""
def __init__(self, num_bits, axis, unsigned, num_bins=2048, grow_method=None, skip_zeros=False):
super(HistogramCalibrator, self).__init__(num_bits, axis, unsigned)
self._num_bins = num_bins
self._skip_zeros = skip_zeros
self._calib_bin_edges = None
self._calib_hist = None
if axis is not None:
raise NotImplementedError("Calibrator histogram collection only supports per tensor scaling")
if grow_method is not None:
logging.warning("grow_method is deprecated. Got %s, ingored!", grow_method)
def collect(self, x):
"""Collect histogram"""
if torch.min(x) < 0.:
logging.log_first_n(
logging.INFO,
("Calibrator encountered negative values. It shouldn't happen after ReLU. "
"Make sure this is the right tensor to calibrate."),
1)
x = x.abs()
x_np = x.cpu().detach().numpy()
if self._skip_zeros:
x_np = x_np[np.where(x_np != 0)]
if self._calib_bin_edges is None and self._calib_hist is None:
# first time it uses num_bins to compute histogram.
self._calib_hist, self._calib_bin_edges =
|
np.histogram(x_np, bins=self._num_bins)
|
numpy.histogram
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 09:30:06 2019
@author: Daniel
"""
from neuron import h, gui
import numpy as np
import matplotlib.pyplot as plt
# locate nrnmech.dll and load it into h
dll_dir = ("C:\\Users\\Daniel\\Dropbox\\MEC Project\\BarisProject\\tmgexp2syn"
"\\nrnmech.dll")
h.nrn_load_dll(dll_dir)
# Set some parameters
tau_1 = 2
tau_2 = 20
gmax = 1
pas_e = -65
syn_e = 0
tau_facil = 500
tau_rec = 0
stim_interval = 200
stim_number = 10
stim_noise = 0
U = 0.1
weight=1
# Create the sections where the synapses will be attached
exp2syn_sec = h.Section()
tmgsyn_sec = h.Section()
tmgexp2syn_sec = h.Section()
secs = [exp2syn_sec, tmgsyn_sec, tmgexp2syn_sec]
for sec in secs:
sec.insert('pas')
sec(0.5).pas.e = pas_e
# Create the synapses
exp2syn_syn = h.Exp2Syn(exp2syn_sec(0.5))
exp2syn_syn.tau1 = tau_1
exp2syn_syn.tau2 = tau_2
exp2syn_syn.e = syn_e
exp2syn_syn.g = gmax
tmgsyn_syn = h.tmgsyn(tmgsyn_sec(0.5))
tmgsyn_syn.tau_1 = tau_2 # Note that tau_2 is the decay tau at script level!
tmgsyn_syn.tau_facil = tau_facil
tmgsyn_syn.tau_rec = tau_rec
tmgsyn_syn.e = syn_e
#tmgsyn_syn.g = gmax
tmgsyn_syn.U = U
tmgexp2syn_syn = h.tmgexp2syn(tmgexp2syn_sec(0.5))
tmgexp2syn_syn.tau_1 = tau_1
tmgexp2syn_syn.tau_2 = tau_2
tmgexp2syn_syn.tau_facil = tau_facil
tmgexp2syn_syn.tau_rec = tau_rec
tmgexp2syn_syn.e = syn_e
#tmgexp2syn_syn.g = gmax
tmgexp2syn_syn.U = U
syns = [exp2syn_syn, tmgsyn_syn, tmgexp2syn_syn]
# Create the netstim object that feeds events into the synapses
netstim = h.NetStim()
netstim.start = 50
netstim.interval = stim_interval
netstim.number = stim_number
netstim.noise = stim_noise
# Connect the netstim with the synapses
exp2syn_netcon = h.NetCon(netstim, exp2syn_syn)
exp2syn_netcon.weight[0] = weight
tmgsyn_netcon = h.NetCon(netstim, tmgsyn_syn)
tmgsyn_netcon.weight[0] = weight
tmgexp2syn_netcon = h.NetCon(netstim, tmgexp2syn_syn)
tmgexp2syn_netcon.weight[0] = weight
# Setup the conductance recordings
exp2syn_rec = h.Vector()
exp2syn_rec.record(exp2syn_syn._ref_g)
tmgsyn_rec = h.Vector()
tmgsyn_rec.record(tmgsyn_syn._ref_g)
tmgexp2syn_rec = h.Vector()
tmgexp2syn_rec.record(tmgexp2syn_syn._ref_g)
# Run the simulation
h.cvode.active(0)
dt = 0.1
h.steps_per_ms = 1.0/dt
h.finitialize(-65)
h.t = -2000
h.secondorder = 0
h.dt = 10
while h.t < -100:
h.fadvance()
h.secondorder = 2
h.t = 0
h.dt = 0.1
"""Setup run control for -100 to 1500"""
h.frecord_init() # Necessary after changing t to restart the vectors
while h.t < 500:
h.fadvance()
# Plotting
exp2syn_rec =
|
np.array(exp2syn_rec)
|
numpy.array
|
import unittest
import numpy as np
import torch
from h0rton.h0_inference import DiagonalGaussianBNNPosterior, LowRankGaussianBNNPosterior, DoubleLowRankGaussianBNNPosterior, FullRankGaussianBNNPosterior, DoubleGaussianBNNPosterior
from h0rton.h0_inference.gaussian_bnn_posterior_cpu import sigmoid
class TestGaussianBNNPosterior(unittest.TestCase):
"""A suite of tests verifying that the input PDFs and the sample distributions
match.
"""
@classmethod
def setUpClass(cls):
cls.sample_seed = 1113
def setUp(self):
np.random.seed(self.sample_seed)
def test_diagonal_gaussian_bnn_posterior(self):
"""Test the sampling of `DiagonalGaussianBNNPosterior`
"""
Y_dim = 2
batch_size = 3
rank = 2
device = torch.device('cpu')
mu = np.random.randn(batch_size, Y_dim)
logvar = np.abs(np.random.randn(batch_size, Y_dim))
pred = np.concatenate([mu, logvar], axis=1)
# Get h0rton samples
#Y_mean = np.random.randn(batch_size, Y_dim)
#Y_std = np.abs(np.random.randn(batch_size, Y_dim))
Y_mean = np.zeros(Y_dim)
Y_std = np.ones(Y_dim)
diagonal_bnn_post = DiagonalGaussianBNNPosterior(Y_dim, device, Y_mean, Y_std)
diagonal_bnn_post.set_sliced_pred(torch.Tensor(pred))
h0rton_samples = diagonal_bnn_post.sample(10**7, self.sample_seed)
# Get h0rton summary stats
h0rton_mean = np.mean(h0rton_samples, axis=1)
h0rton_covmat = np.zeros((batch_size, Y_dim, Y_dim))
exp_covmat = np.zeros((batch_size, Y_dim, Y_dim))
for b in range(batch_size):
cov_b = np.cov(h0rton_samples[b, :, :].swapaxes(0, 1), ddof=0)
h0rton_covmat[b, :, :] = cov_b
exp_covmat[b, :, :] += np.diagflat(np.exp(logvar[b, :]))
# Get expected summary stats
exp_mean = mu
np.testing.assert_array_almost_equal(h0rton_mean, exp_mean, decimal=2)
np.testing.assert_array_almost_equal(h0rton_covmat, exp_covmat, decimal=2)
def test_low_rank_gaussian_bnn_posterior(self):
"""Test the sampling of `LowRankGaussianBNNPosterior`
"""
Y_dim = 2
batch_size = 3
rank = 2
device = torch.device('cpu')
mu = np.random.randn(batch_size, Y_dim)
logvar = np.abs(np.random.randn(batch_size, Y_dim))
F = np.random.randn(batch_size, Y_dim*rank)
F_unraveled = F.reshape(batch_size, Y_dim, rank)
FFT = np.matmul(F_unraveled, np.swapaxes(F_unraveled, 1, 2))
pred = np.concatenate([mu, logvar, F], axis=1)
# Get h0rton samples
#Y_mean = np.random.randn(batch_size, Y_dim)
#Y_std = np.abs(np.random.randn(batch_size, Y_dim))
Y_mean = np.zeros(Y_dim)
Y_std = np.ones(Y_dim)
low_rank_bnn_post = LowRankGaussianBNNPosterior(Y_dim, device, Y_mean, Y_std)
low_rank_bnn_post.set_sliced_pred(torch.Tensor(pred),)
h0rton_samples = low_rank_bnn_post.sample(10**7, self.sample_seed)
#import matplotlib.pyplot as plt
#plt.hist(h0rton_samples[0, :, 0], bins=30)
#plt.axvline(mu[0, 0], color='r')
#plt.show()
# Get h0rton summary stats
h0rton_mean = np.mean(h0rton_samples, axis=1)
h0rton_covmat = np.empty((batch_size, Y_dim, Y_dim))
exp_covmat = FFT
for b in range(batch_size):
cov_b = np.cov(h0rton_samples[b, :, :].swapaxes(0, 1), ddof=0)
h0rton_covmat[b, :, :] = cov_b
exp_covmat[b, :, :] += np.diagflat(np.exp(logvar[b, :]))
# Get expected summary stats
exp_mean = mu
np.testing.assert_array_almost_equal(h0rton_mean, exp_mean, decimal=2)
np.testing.assert_array_almost_equal(h0rton_covmat, exp_covmat, decimal=2)
def test_double_low_rank_gaussian_bnn_posterior(self):
"""Test the sampling of `DoubleLowRankGaussianBNNPosterior`
Note
----
Only compares the true and sample means
"""
Y_dim = 2
batch_size = 3
rank = 2
device = torch.device('cpu')
# First gaussian
mu = np.random.randn(batch_size, Y_dim)
logvar = np.abs(np.random.randn(batch_size, Y_dim))
F = np.random.randn(batch_size, Y_dim*rank)
F_unraveled = F.reshape(batch_size, Y_dim, rank)
FFT = np.matmul(F_unraveled, np.swapaxes(F_unraveled, 1, 2))
# Second gaussian
mu2 = np.random.randn(batch_size, Y_dim)
logvar2 = np.abs(np.random.randn(batch_size, Y_dim))
F2 = np.random.randn(batch_size, Y_dim*rank)
F2_unraveled = F2.reshape(batch_size, Y_dim, rank)
FFT2 = np.matmul(F2_unraveled,
|
np.swapaxes(F2_unraveled, 1, 2)
|
numpy.swapaxes
|
import unittest
import numpy as np
import numpy.linalg as la
import numpy.random as rnd
import numpy.testing as np_testing
from pymanopt.manifolds import Stiefel
import pymanopt.tools.testing as testing
from pymanopt.tools.multi import *
import autograd.numpy as npa
class TestSingleStiefelManifold(unittest.TestCase):
def setUp(self):
self.m = m = 20
self.n = n = 2
self.k = k = 1
self.man = Stiefel(m, n, k=k)
self.proj = lambda x, u: u - npa.dot(x, npa.dot(x.T, u) +
npa.dot(u.T, x)) / 2
def test_dim(self):
assert self.man.dim == 0.5 * self.n * (2 * self.m - self.n - 1)
# def test_typicaldist(self):
# def test_dist(self):
def test_inner(self):
X = la.qr(rnd.randn(self.m, self.n))[0]
A, B =
|
rnd.randn(2, self.m, self.n)
|
numpy.random.randn
|
import datajoint as dj
import numpy as np
from . import lab, experiment, ccf, ephys, get_schema_name
from pipeline.plot import unit_characteristic_plot
[lab, experiment, ccf, ephys] # schema imports only
schema = dj.schema(get_schema_name('histology'))
@schema
class CCFToMRITransformation(dj.Imported):
definition = """ # one per project - a mapping between CCF coords and MRI coords (e.g. average MRI from 10 brains)
project_name: varchar(32) # e.g. MAP
"""
class Landmark(dj.Part):
definition = """
-> master
landmark_id: int
---
landmark_name='': varchar(32)
mri_x: float # (mm)
mri_y: float # (mm)
mri_z: float # (mm)
ccf_x: float # (um)
ccf_y: float # (um)
ccf_z: float # (um)
"""
@schema
class SubjectToCCFTransformation(dj.Imported):
definition = """ # one per subject
-> lab.Subject
"""
class Landmark(dj.Part):
definition = """
-> master
landmark_name: char(8) # pt-N from landmark file.
---
subj_x: float # (a.u.)
subj_y: float # (a.u.)
subj_z: float # (a.u.)
ccf_x: float # (um)
ccf_y: float # (um)
ccf_z: float # (um)
"""
@schema
class ElectrodeCCFPosition(dj.Manual):
definition = """
-> ephys.ProbeInsertion
"""
class ElectrodePosition(dj.Part):
definition = """
-> master
-> lab.ElectrodeConfig.Electrode
-> ccf.CCF
---
mri_x=null: float # (mm)
mri_y=null: float # (mm)
mri_z=null: float # (mm)
"""
class ElectrodePositionError(dj.Part):
definition = """
-> master
-> lab.ElectrodeConfig.Electrode
-> ccf.CCFLabel
ccf_x: int # (um)
ccf_y: int # (um)
ccf_z: int # (um)
---
mri_x=null: float # (mm)
mri_y=null: float # (mm)
mri_z=null: float # (mm)
"""
@schema
class LabeledProbeTrack(dj.Manual):
definition = """
-> ephys.ProbeInsertion
---
labeling_date=NULL: date
dye_color=NULL: varchar(32)
"""
class Point(dj.Part):
definition = """
-> master
order: int
shank: int
---
ccf_x: float # (um)
ccf_y: float # (um)
ccf_z: float # (um)
"""
@schema
class InterpolatedShankTrack(dj.Computed):
definition = """
-> ElectrodeCCFPosition
shank: int
"""
class Point(dj.Part):
definition = """ # CCF coordinates of all points on this interpolated shank track
-> master
-> ccf.CCF
"""
class BrainSurfacePoint(dj.Part):
definition = """ # CCF coordinates of the brain surface intersection point with this shank
-> master
---
-> ccf.CCF
"""
class DeepestElectrodePoint(dj.Part):
definition = """ # CCF coordinates of the most ventral recording electrode site (deepest in the brain)
-> master
---
-> ccf.CCF
"""
key_source= ElectrodeCCFPosition & LabeledProbeTrack.Point
def make(self, key):
probe_insertion = ephys.ProbeInsertion & key
shanks = probe_insertion.aggr(lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode,
shanks='GROUP_CONCAT(DISTINCT shank SEPARATOR ", ")').fetch1('shanks')
shanks = np.array(shanks.split(', ')).astype(int)
shank_points, brain_surface_points, last_electrode_points = [], [], []
for shank in shanks:
# shank points
_, shank_ccfs = retrieve_pseudocoronal_slice(probe_insertion, shank)
points = [{**key, 'shank': shank, 'ccf_label_id': 0,
'ccf_x': ml, 'ccf_y': dv, 'ccf_z': ap} for ml, dv, ap in shank_ccfs]
shank_points.extend(points)
# brain surface site
brain_surface_points.append(points[0])
# last electrode site
last_electrode_site = (
lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode
* ephys.ProbeInsertion * ElectrodeCCFPosition.ElectrodePosition
& key & {'shank': shank}).fetch(
'ccf_x', 'ccf_y', 'ccf_z', order_by='ccf_y DESC', limit=1)
last_electrode_site = np.array([*last_electrode_site]).squeeze()
last_electrode_points.append({**key, 'shank': shank, 'ccf_label_id': 0,
'ccf_x': last_electrode_site[0],
'ccf_y': last_electrode_site[1],
'ccf_z': last_electrode_site[2]})
self.insert({**key, 'shank': shank} for shank in shanks)
self.Point.insert(shank_points)
self.BrainSurfacePoint.insert(brain_surface_points)
self.DeepestElectrodePoint.insert(last_electrode_points)
@schema
class EphysCharacteristic(dj.Imported):
definition = """
-> ephys.ProbeInsertion
-> lab.ElectrodeConfig.Electrode
---
lfp_theta_power: float
lfp_beta_power: float
lfp_gama_power: float
waveform_amplitude: float
waveform_width: float
mua: float
photstim_effect: float
"""
@schema
class ArchivedElectrodeHistology(dj.Manual):
definition = """
-> ephys.ProbeInsertion
archival_time: datetime # time of archiving
---
archival_note='': varchar(2000) # user notes about this particular Electrode CCF being archived
archival_hash: varchar(32) # hash of Electrode CCF position, prevent duplicated archiving
unique index (archival_hash)
"""
class ElectrodePosition(dj.Part):
definition = """
-> master
-> lab.ElectrodeConfig.Electrode
-> ccf.CCF
---
mri_x=null : float # (mm)
mri_y=null : float # (mm)
mri_z=null : float # (mm)
"""
class ElectrodePositionError(dj.Part):
definition = """
-> master
-> lab.ElectrodeConfig.Electrode
-> ccf.CCFLabel
ccf_x : int # (um)
ccf_y : int # (um)
ccf_z : int # (um)
---
mri_x=null : float # (mm)
mri_y=null : float # (mm)
mri_z=null : float # (mm)
"""
class LabeledProbeTrack(dj.Part):
definition = """
-> master
---
labeling_date=NULL: date
dye_color=NULL: varchar(32)
"""
class ProbeTrackPoint(dj.Part):
definition = """
-> master.LabeledProbeTrack
order: int
shank: int
---
ccf_x: float # (um)
ccf_y: float # (um)
ccf_z: float # (um)
"""
# ====================== HELPER METHODS ======================
def retrieve_pseudocoronal_slice(probe_insertion, shank_no=1):
"""
For each shank, retrieve the pseudocoronal slice of the brain that the shank traverses
This function returns a tuple of 2 things:
1. an array of (CCF_DV, CCF_ML, CCF_AP, color_codes) for all points in the pseudocoronal slice
2. an array of (CCF_DV, CCF_ML, CCF_AP) for all points on the interpolated track of the shank
"""
probe_insertion = probe_insertion.proj()
# ---- Electrode sites ----
annotated_electrodes = (lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode
* ephys.ProbeInsertion
* ElectrodeCCFPosition.ElectrodePosition
& probe_insertion & {'shank': shank_no})
electrode_coords = np.array(list(zip(*annotated_electrodes.fetch(
'ccf_z', 'ccf_y', 'ccf_x', order_by='ccf_y')))) # (AP, DV, ML)
probe_track_coords = np.array(list(zip(*(LabeledProbeTrack.Point
& probe_insertion & {'shank': shank_no}).fetch(
'ccf_z', 'ccf_y', 'ccf_x', order_by='ccf_y'))))
coords = np.vstack([electrode_coords, probe_track_coords])
# ---- linear fit of probe in DV-AP axis ----
X = np.asmatrix(np.hstack((np.ones((coords.shape[0], 1)), coords[:, 1][:, np.newaxis]))) # DV
y = np.asmatrix(coords[:, 0]).T # AP
XtX = X.T * X
Xty = X.T * y
ap_fit = np.linalg.solve(XtX, Xty)
y =
|
np.asmatrix(coords[:, 2])
|
numpy.asmatrix
|
from sklearn.model_selection import KFold, StratifiedKFold
import pandas as pd
import numpy as np
from bestScore import *
from xgboost import XGBClassifier
from sklearn import linear_model
from sklearn import ensemble
# KFold顺序切分
def myStacking2(baseModelList, kfold, train_data, test_data, SecondModel):
x_train = train_data.iloc[:, :-1]
y_train = train_data.iloc[:, -1]
kf = KFold(n_splits=kfold, shuffle=False, random_state=37)
layer2_train = pd.DataFrame(np.zeros([train_data.shape[0], kfold]))
layer2_test = pd.DataFrame(
|
np.zeros([test_data.shape[0], kfold])
|
numpy.zeros
|
from src.util.prioritized_replay_memory import PrioritizedReplayMemory
from src.problem.tsptw.environment.tsptw import TSPTW
from src.problem.tsptw.learning.brain_dqn import BrainDQN
from src.problem.tsptw.environment.environment import Environment
import dgl
import numpy as np
import sys
import time
import random
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.use('Agg')
# definition of constants
MEMORY_CAPACITY = 50000
GAMMA = 1
STEP_EPSILON = 5000.0
UPDATE_TARGET_FREQUENCY = 500
VALIDATION_SET_SIZE = 100
RANDOM_TRIAL = 100
MAX_BETA = 10
MIN_VAL = -1000000
MAX_VAL = 1000000
EPS = 1e-7
class TrainerDQN:
"""
Definition of the Trainer DQN for the TSPTW
"""
def __init__(self, args):
"""
Initialization of the trainer
:param args: argparse object taking hyperparameters and instance configuration
"""
self.args = args
np.random.seed(self.args.seed)
self.instance_size = self.args.n_city
# Because we begin at a given city, so we have 1 city less to visit
self.n_action = self.instance_size - 1
self.num_node_feats = 6
self.num_edge_feats = 5
self.reward_scaling = 0.001
self.validation_set = TSPTW.generate_dataset(size=VALIDATION_SET_SIZE, n_city=self.args.n_city,
grid_size=self.args.grid_size, max_tw_gap=self.args.max_tw_gap,
max_tw_size=self.args.max_tw_size, is_integer_instance=False,
seed=
|
np.random.randint(10000)
|
numpy.random.randint
|
import numpy as np
import scipy
import scipy.stats as stats
import copy
import levmar
from util import data_validation
from util.distance import euclidean_squared
import sys
def eprint(*args, **kwargs): #prints errors/warnings to stderr
print(*args, file=sys.stderr, **kwargs)
class FLog(object):
"""
Base class for path loss functions. Each inherited class needs to implement
(1) mean_val,
(2) implement params_init, can be a fixed, educated random or random init.
data:
Dictionary with entries X, Y and Var
X: Location inputs X, numpy array [p,2]
Y: Readings at each position X, numpy array [p,n] or [p,]
Var: Readings variance, numpy array [p,n] or [p,]
kwargs
debug: print debug statements - default 0
params_0: [n,np] numpy array, with np being the number of parameters of the
particualr model. It forces initial paramenters - default None
"""
def __init__(self,data,**kwargs):
self.data = data_validation.data_structure(data)
self.debug = kwargs.get('debug',0)
self.name = 'Log Function with levmar optimization'
#self.params = kwargs.get('params_0',None)
self.params = np.ones((self.data['Y'].shape[1],4))
def nll(self,dataTest=None):
"""
Negative log likelihood
dataTest (Optional):
If declared, will be used instead of self.data for computing the nll
"""
if dataTest is None:
data = self.data
else:
data = data_validation.data_structure(dataTest)
Ys = self.mean_val(data['X']) #bounded estimation
log_prob = stats.norm.logpdf(Ys,loc=data['Y'],scale=data['Var']**0.5)
return -np.sum(log_prob)
def cv(self,dataTest=None):
"""
Cross validation value used for comparing different function optimization outputs
can be computed with a separate dataset test or with training dataset
<math>
nll(testing dataset)/nll(zero function)
dataTest (Optional):
If declared, will be used instead of self.data for computing the nll
"""
if dataTest is None:
data = self.data
else:
data = data_validation.data_structure(dataTest)
nll_f = self.nll(dataTest=data)
nll_z = -np.sum(stats.norm.logpdf(np.zeros_like(data['Y']),loc=data['Y'],
scale=data['Var']**0.5))
return nll_f/nll_z
def optimize(self, AP='all', ntimes = 1, verbose=False):
"""
Main optimization function
Call for levmar optimization
TODO: add openMP to levmar
"""
# Optimization through levmar
#if verbose:
eprint ('[Class C] Initializing optimization')
nap = self.data['Y'].shape[1]
x_list = self.data['X'][:,0].tolist()
y_list = self.data['X'][:,1].tolist()
z_list = self.data['Y'].T.flatten().tolist()
eprint ('[Class C] p_estimate ... ')
p_estimate = levmar.optimize(x_list,y_list,z_list)
eprint ('[Class C] p_estimate ... Done')
self.params = np.reshape(np.asarray(p_estimate),self.params.shape)
def new_data(self):
"""
Ouputs a new dictionary where entry 'Y' has been replace by the difference between
the original data entry and the bounded predicted value from mean_val()
<math>
Y_new = Y - mean_val(X)
"""
data_new = copy.deepcopy(self.data)
Ys = self.mean_val(self.data['X'])
data_new['Y'] = data_new['Y']-Ys
return data_new
def __str__(self):
"""
Overloading __str__ to make print statement meaningful
<format>
Name :
data
X :
Y :
Var :
Log-likelihood :
Number of Parameters :
Parameters
... all parameters and their values ...
"""
to_print = '{} : {}\n'.format('Name'.ljust(32),self.name)
to_print = to_print + 'data\n'
to_print = to_print + ' {} : [{},{}]\n'.format('X'.ljust(30),
str(self.data['X'].shape[0]).rjust(4),
str(self.data['X'].shape[1]).rjust(4))
to_print = to_print + ' {} : [{},{}]\n'.format('Y'.ljust(30),
str(self.data['Y'].shape[0]).rjust(4),
str(self.data['Y'].shape[1]).rjust(4))
to_print = to_print + ' {} : [{},{}]\n'.format('Var'.ljust(30),
str(self.data['Var'].shape[0]).rjust(4),
str(self.data['Var'].shape[1]).rjust(4))
to_print = to_print + '{} : {}\n'.format('Log-likelihood'.ljust(32),
str(self.nll()))
to_print = to_print + '{} : {}\n'.format('Number of Parameters'.ljust(32),
str(np.prod(self.params.shape)))
### Parameters
to_print = to_print + 'Parameters\n'
nap, nps = self.params.shape
for ap,param in zip(range(nap),self.params):
to_print = to_print + '{}:'.format(str(ap).rjust(4)[:4])
for i in range(nps):
to_print = to_print + ' {:16.6f}'.format(param[i])
to_print = to_print + '\n'
return to_print
#################### To implement by derived classes ####################
def mean_val(self,X,params=None,bounded=True):
"""
Compute the estimated value of the function at points X - see .classes.PathLossFun
<math>
Pr = P0 - k*log10(|Xap-X|)
"""
if params is None:
params = self.params
XAP = params[:,2:4]
k = params[:,1]
P0 = params[:,0]
r2 = euclidean_squared(X,XAP)
out = P0 - k*
|
np.log(r2)
|
numpy.log
|
"""
Make a match param file
Note -- will have to edit the param file by hand to insert dmod and Av.
"""
from __future__ import print_function
import argparse
import os
import sys
import time
import numpy as np
import matplotlib.pylab as plt
from .config import PARAMEXT
from .fileio import read_calcsfh_param, calcsfh_input_parameter, match_filters
from .utils import replaceext, parse_pipeline
from .match_phot import make_phot
from .graphics import match_diagnostic
def move_on(okay, msg='0 to move on: '):
"""read and return raw input"""
okay = int(raw_input(msg))
time.sleep(1)
return okay
def within_limits(params, fakefile, offset=1.):
"""
Cull param cmd limits to that of the fake file
params : dict
calcsfh_input_parameter dictionary (only need CMD limits)
fakefile : string
match AST file
offset : float
mag below
"""
vimin = params['vimin']
vimax = params['vimax']
vmin = params['vmin']
imin = params['imin']
vmax = params['vmax']
imax = params['imax']
mag1in, mag2in, _, _ = np.loadtxt(fakefile, unpack=True)
colin = mag1in - mag2in
msg = 'Overwrote'
if vimin < colin.min():
vimin = colin.min()
msg += ' vimin'
if vimax > colin.max():
vimax = colin.max()
msg += ' vimax'
if vmin < mag1in.min():
vmin = mag1in.min()
msg += ' vmin'
if vmax > mag1in.max() - 1.:
vmax = mag1in.max() - 1.
msg += ' vmax'
if imin < mag2in.min():
imin = mag2in.min()
msg += ' imin'
if imax > mag2in.max() - 1:
imax = mag2in.max() - 1.
msg += ' imax'
msg += ' with values from matchfake'
print(msg)
params['vimin'] = vimin
params['vimax'] = vimax
params['vmin'] = vmin
params['imin'] = imin
params['vmax'] = vmax
params['imax'] = imax
return params
def find_match_limits(mag1, mag2, comp1=90., comp2=90., color_only=False,
xlim=None, ylim=None):
"""
click color limits on a cmd and mag1 mag2 limits on a plot of mag1 vs mag2
"""
col = mag1 - mag2
_, ax = plt.subplots()
ax.plot(col, mag2, 'o', color='k', ms=3, alpha=0.3, mec='none')
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
else:
ax.set_ylim(ax.get_ylim()[::-1])
if comp1 < 90.:
ax.hlines(comp2, *ax.get_xlim())
okay = 1
while okay == 1:
print('click color extrema')
pts = plt.ginput(2, timeout=-1)
colmin, colmax = [pts[i][0] for i in range(2)]
if colmin > colmax:
colmin, colmax = colmax, colmin
ax.vlines(colmin, *ax.get_ylim())
ax.vlines(colmax, *ax.get_ylim())
plt.draw()
okay = move_on(0)
plt.close()
inds, = np.nonzero((col < colmax) & (col > colmin))
data = (colmin, colmax)
if not color_only:
_, ax = plt.subplots()
ax.plot(mag1, mag2, '.', color='k')
okay = 1
while okay == 1:
print('click mag extrema')
pts = plt.ginput(2, timeout=-1)
mag1max, mag2max = pts[0]
mag1min, mag2min = pts[1]
if mag1min > mag1max:
mag1min, mag1max = mag1max, mag1min
if mag2min > mag2max:
mag2min, mag2max = mag2max, mag2min
ax.plot(mag1max, mag2max, 'o', color='r')
ax.plot(mag1min, mag2min, 'o', color='r')
plt.draw()
okay = move_on(okay)
plt.close()
inds, = np.nonzero((mag1 < mag1min) & (mag1 > mag1max) &
(mag2 < mag2min) & (mag2 > mag2max) &
(col < colmax) & (col > colmin))
_, ax = plt.subplots()
ax.plot(col, mag2, '.', color='k')
ax.plot(col[inds], mag2[inds], '.', color='r')
ax.set_ylim(ax.get_ylim()[::-1])
if comp2 < 90.:
ax.hlines(comp2, *ax.get_xlim(), lw=2)
ax.vlines(colmin, *ax.get_ylim(), lw=2)
ax.vlines(colmax, *ax.get_ylim(), lw=2)
if not color_only:
ax.hlines(mag2max, *ax.get_xlim(), lw=2)
data = (colmin, colmax, mag1min, mag1max, mag2min, mag2max)
plt.draw()
print(data)
return data
def find_gates(mag1, mag2, param):
"""Click 4 points to make an exclude gate -- does not work in calcsfh!"""
print('not supported')
sys.exit()
col = mag1 - mag2
lines = open(param, 'r').readlines()
colmin, colmax = map(float, lines[4].split()[3:-1])
mag1min, mag1max = map(float, lines[5].split()[:-1])
# mag2min, mag2max = map(float, lines[5].split()[:-1])
# click around
_, ax = plt.subplots()
ax.plot(col, mag2, ',', color='k', alpha=0.2)
ax.set_ylim(mag1max, mag1min)
ax.set_xlim(colmin, colmax)
okay = 1
while okay != 0:
print('click ')
pts = np.asarray(plt.ginput(n=4, timeout=-1))
exclude_gate = '1 {} 0 \n'.format(' '.join(['%.4f' % p
for p in pts.flatten()]))
pts =
|
np.append(pts, pts[0])
|
numpy.append
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from NumPyNet.layers import Connected_layer
from NumPyNet.activations import Logistic
from NumPyNet.activations import Tanh
from NumPyNet.utils import check_is_fitted
from NumPyNet.exception import LayerError
import numpy as np
__author__ = ['<NAME>', '<NAME>']
__email__ = ['<EMAIL>', '<EMAIL>']
class LSTM_layer(object):
def __init__(self, outputs, steps, input_shape=None, weights=None, bias=None, **kwargs):
'''
LSTM layer
Parameters
----------
outputs : integer, number of outputs of the layers
input_shape : tuple, default None. Shape of the input in the format (batch, w, h, c),
None is used when the layer is part of a Network model.
weights : array of shape (w * h * c, outputs), default is None. Weights of the dense layer.
If None, weights init is random.
bias : array of shape (outputs, ), default None. Bias of the connected layer.
If None, bias init is random
'''
if isinstance(outputs, int) and outputs > 0:
self.outputs = outputs
else :
raise ValueError('Parameter "outputs" must be an integer and > 0')
if isinstance(steps, int) and steps > 0:
self.steps = steps
else :
raise ValueError('Parameter "steps" must be an integer and > 0')
if weights is None:
weights = (None, None, None)
else:
if np.shape(weights)[0] != 2:
raise ValueError('Wrong number of init "weights". There are 2 connected layers into the LSTM cell.')
if bias is None:
bias = (None, None, None)
else:
if np.shape(bias)[0] != 2:
raise ValueError('Wrong number of init "biases". There are 2 connected layers into the LSTM cell.')
# if input shape is passed, init of weights, else done in __call__
if input_shape is not None:
self.input_shape = input_shape
b, w, h, c = input_shape
self.batch = b // self.steps
# self.batches = np.array_split(range(self.input_shape[0]), indices_or_sections=self.steps)
indices = np.arange(0, b, dtype='int64')
self.batches = np.lib.stride_tricks.as_strided(indices, shape=(b - self.steps + 1, self.steps), strides=(8, 8)).T.copy()
# print(self.batches)
_, w, h, c = self.input_shape
initial_shape = (self.batch, w, h, c)
self.uf = Connected_layer(outputs, activation='Linear', input_shape=initial_shape, weights=weights[0], bias=bias[0])
self.ui = Connected_layer(outputs, activation='Linear', input_shape=initial_shape, weights=weights[0], bias=bias[0])
self.ug = Connected_layer(outputs, activation='Linear', input_shape=initial_shape, weights=weights[0], bias=bias[0])
self.uo = Connected_layer(outputs, activation='Linear', input_shape=initial_shape, weights=weights[0], bias=bias[0])
self.wf = Connected_layer(outputs, activation='Linear', input_shape=(self.batch, 1, 1, self.outputs), weights=weights[1], bias=bias[1])
self.wi = Connected_layer(outputs, activation='Linear', input_shape=(self.batch, 1, 1, self.outputs), weights=weights[1], bias=bias[1])
self.wg = Connected_layer(outputs, activation='Linear', input_shape=(self.batch, 1, 1, self.outputs), weights=weights[1], bias=bias[1])
self.wo = Connected_layer(outputs, activation='Linear', input_shape=(self.batch, 1, 1, self.outputs), weights=weights[1], bias=bias[1])
self.uf.input_shape = (self.input_shape[0], w, h, c)
self.ui.input_shape = (self.input_shape[0], w, h, c)
self.ug.input_shape = (self.input_shape[0], w, h, c)
self.uo.input_shape = (self.input_shape[0], w, h, c)
self.wf.input_shape = (self.input_shape[0], w, h, self.outputs)
self.wi.input_shape = (self.input_shape[0], w, h, self.outputs)
self.wg.input_shape = (self.input_shape[0], w, h, self.outputs)
self.wo.input_shape = (self.input_shape[0], w, h, self.outputs)
self.cell = np.empty(shape=self.uf.out_shape, dtype=float)
self.output = np.empty(shape=self.uf.out_shape, dtype=float)
else:
self.input_shape = None
self.cell = None
self.output = None
# TODO: remember to overwrite this layer when the input_shape is known!
self.batch = None
self.batches = None
initial_shape = None
self.delta = None
self.optimizer = None
def __str__(self):
return '\n\t'.join(('LSTM Layer: {:d} inputs, {:d} outputs'.format(self.inputs, self.outputs),
'{}'.format(self.uf),
'{}'.format(self.ui),
'{}'.format(self.ug),
'{}'.format(self.uo),
'{}'.format(self.wf),
'{}'.format(self.wi),
'{}'.format(self.wg),
'{}'.format(self.wo)
))
def __call__(self, previous_layer):
if previous_layer.out_shape is None:
class_name = self.__class__.__name__
prev_name = layer.__class__.__name__
raise LayerError('Incorrect shapes found. Layer {} cannot be connected to the previous {} layer.'.format(class_name, prev_name))
b, w, h, c = previous_layer.out_shape
if b < self.steps:
class_name = self.__class__.__name__
prev_name = layer.__class__.__name__
raise LayerError('Incorrect steps found. Layer {} cannot be connected to the previous {} layer.'.format(class_name, prev_name))
self.batch = b // self.steps
self.input_shape = (self.batch, w, h, c)
indices = np.arange(0, b, dtype='int64')
self.batches = np.lib.stride_tricks.as_strided(indices, shape=(b - self.steps + 1, self.steps), strides=(8, 8)).T.copy()
initial_shape = (self.batch, w, h, c)
self.uf = Connected_layer(self.outputs, activation='Linear', input_shape=initial_shape)
self.ui = Connected_layer(self.outputs, activation='Linear', input_shape=initial_shape)
self.ug = Connected_layer(self.outputs, activation='Linear', input_shape=initial_shape)
self.uo = Connected_layer(self.outputs, activation='Linear', input_shape=initial_shape)
self.wf = Connected_layer(self.outputs, activation='Linear', input_shape=(self.batch, 1, 1, self.outputs))
self.wi = Connected_layer(self.outputs, activation='Linear', input_shape=(self.batch, 1, 1, self.outputs))
self.wg = Connected_layer(self.outputs, activation='Linear', input_shape=(self.batch, 1, 1, self.outputs))
self.wo = Connected_layer(self.outputs, activation='Linear', input_shape=(self.batch, 1, 1, self.outputs))
self.uf.input_shape = (b, w, h, c)
self.ui.input_shape = (b, w, h, c)
self.ug.input_shape = (b, w, h, c)
self.uo.input_shape = (b, w, h, c)
self.wf.input_shape = (b, w, h, self.outputs)
self.wi.input_shape = (b, w, h, self.outputs)
self.wg.input_shape = (b, w, h, self.outputs)
self.wo.input_shape = (b, w, h, self.outputs)
self.state = np.zeros(shape=(self.batch, w, h, self.outputs), dtype=float)
self.output = np.empty(shape=self.uf.out_shape, dtype=float)
self.cell = np.empty(shape=self.uf.out_shape, dtype=float)
self.delta = None
self.optimizer = None
return self
@property
def inputs(self):
return np.prod(self.input_shape[1:])
@property
def out_shape(self):
return self.wo.out_shape
def load_weights(self, chunck_weights, pos=0):
'''
Load weights from full array of model weights
Parameters
----------
chunck_weights : numpy array of model weights
pos : current position of the array
Returns
----------
pos
'''
pos = self.uf.load_weights(chunck_weights, pos=pos)
pos = self.ui.load_weights(chunck_weights, pos=pos)
pos = self.ug.load_weights(chunck_weights, pos=pos)
pos = self.uo.load_weights(chunck_weights, pos=pos)
pos = self.wf.load_weights(chunck_weights, pos=pos)
pos = self.wi.load_weights(chunck_weights, pos=pos)
pos = self.wg.load_weights(chunck_weights, pos=pos)
pos = self.wo.load_weights(chunck_weights, pos=pos)
return pos
def save_weights(self):
'''
Return the biases and weights in a single ravel fmt to save in binary file
'''
return np.concatenate([self.uf.bias.ravel(), self.uf.weights.ravel(),
self.ui.bias.ravel(), self.ui.weights.ravel(),
self.ug.bias.ravel(), self.ug.weights.ravel(),
self.uo.bias.ravel(), self.uo.weights.ravel(),
self.wf.bias.ravel(), self.wf.weights.ravel(),
self.wi.bias.ravel(), self.wi.weights.ravel(),
self.wg.bias.ravel(), self.wg.weights.ravel(),
self.wo.bias.ravel(), self.wo.weights.ravel()], axis=0).tolist()
def forward(self, inpt, copy=False):
'''
Forward function of the LSTM layer. It computes the matrix product
between inpt and weights, add bias and activate the result with the
chosen activation function.
Parameters
----------
inpt : numpy array with shape (batch, w, h, c). Input batch of images of the layer
shortcut : boolean, default False. Enable/Disable internal shortcut connection.
Returns
----------
LSTM_layer object
'''
self.uf.output = np.empty(shape=self.uf.out_shape, dtype=float)
self.ui.output = np.empty(shape=self.ui.out_shape, dtype=float)
self.ug.output = np.empty(shape=self.ug.out_shape, dtype=float)
self.uo.output = np.empty(shape=self.uo.out_shape, dtype=float)
self.wf.output = np.empty(shape=self.wf.out_shape, dtype=float)
self.wi.output = np.empty(shape=self.wi.out_shape, dtype=float)
self.wg.output = np.empty(shape=self.wg.out_shape, dtype=float)
self.wo.output =
|
np.empty(shape=self.wo.out_shape, dtype=float)
|
numpy.empty
|
# FourierTOuNN: Length Scale Control in Topology Optimization, using Fourier Enhanced Neural Networks
# Authors : <NAME>, <NAME>
# Affliation : University of Wisconsin - Madison
# Corresponding Author : <EMAIL> , <EMAIL>
# Submitted to Computer Aided Design, 2021
# For academic purposes only
#Versions
#Numpy 1.18.1
#Pytorch 1.5.0
#scipy 1.4.1
#cvxopt 1.2.0
#%% imports
import numpy as np
import torch
import torch.optim as optim
from os import path
from FE import FE
from extrusion import applyExtrusion
import matplotlib.pyplot as plt
from network import TopNet
from torch.autograd import grad
from gridMesher import GridMesh
def to_np(x):
return x.detach().cpu().numpy()
#%% main TO functionalities
class TopologyOptimizer:
#-----------------------------#
def __init__(self, mesh, matProp, bc, nnSettings, fourierMap, \
desiredVolumeFraction, densityProjection, keepElems, extrude, overrideGPU = True):
self.exampleName = bc['exampleName'];
self.device = self.setDevice(overrideGPU);
self.FE = FE(mesh, matProp, bc)
xy = self.FE.mesh.generatePoints()
self.xy = torch.tensor(xy, requires_grad = True).\
float().view(-1,2).to(self.device);
self.keepElems = keepElems
self.desiredVolumeFraction = desiredVolumeFraction;
self.density = self.desiredVolumeFraction*np.ones((self.FE.mesh.numElems));
self.symXAxis = bc['symXAxis'];
self.symYAxis = bc['symYAxis'];
self.fourierMap = fourierMap;
self.extrude = extrude;
self.densityProjection = densityProjection;
if(self.fourierMap['isOn']):
coordnMap = np.zeros((2, self.fourierMap['numTerms']));
for i in range(coordnMap.shape[0]):
for j in range(coordnMap.shape[1]):
coordnMap[i,j] = np.random.choice([-1.,1.])*np.random.uniform(1./(2*fourierMap['maxRadius']), 1./(2*fourierMap['minRadius'])); #
self.coordnMap = torch.tensor(coordnMap).float().to(self.device)#
inputDim = 2*self.coordnMap.shape[1];
else:
self.coordnMap = torch.eye(2);
inputDim = 2;
self.topNet = TopNet(nnSettings, inputDim).to(self.device);
self.objective = 0.;
#-----------------------------#
def setDevice(self, overrideGPU):
if(torch.cuda.is_available() and (overrideGPU == False) ):
device = torch.device("cuda:0");
print("GPU enabled")
else:
device = torch.device("cpu")
print("Running on CPU")
return device;
#-----------------------------#
def applySymmetry(self, x):
if(self.symYAxis['isOn']):
xv =( self.symYAxis['midPt'] + torch.abs( x[:,0] - self.symYAxis['midPt']));
else:
xv = x[:,0];
if(self.symXAxis['isOn']):
yv = (self.symXAxis['midPt'] + torch.abs( x[:,1] - self.symXAxis['midPt'])) ;
else:
yv = x[:,1];
x = torch.transpose(torch.stack((xv,yv)),0,1);
return x;
#-----------------------------#
def applyFourierMapping(self, x):
if(self.fourierMap['isOn']):
c = torch.cos(2*np.pi*torch.matmul(x,self.coordnMap));
s = torch.sin(2*np.pi*torch.matmul(x,self.coordnMap));
xv = torch.cat((c,s), axis = 1);
return xv;
return x;
#-----------------------------#
def projectDensity(self, x):
if(self.densityProjection['isOn']):
b = self.densityProjection['sharpness']
nmr =
|
np.tanh(0.5*b)
|
numpy.tanh
|
from PIL import Image, ImageFont, ImageDraw
import colorsys
import numpy as np
import os
import json
def show_label_dir(view_path = '65217-64937_top_view_result_folder/' , image_dir='/home/sd/Downloads/star_baidu/test/pic/' ):
match_result_file = os.listdir(view_path)
match_result_file.sort(key=lambda x: int(x[:-5]))
# print(match_result_file)
detect_class_list = ['limit speed 20','limit speed 30','limit speed 40','limit speed 50','limit speed 60',\
'limit speed 70','limit speed 80','limit speed 90','limit speed 100','limit speed 110',\
'limit speed 120','ban turn left','ban turn right','ban straight','ban leftAright',\
'ban leftAstraight','ban straightAright','ban turn back','electronic eye']
class_names = ['102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '201', '202', '203', '204', '205',
'206', '207', '301']
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors_ = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
i = 0;
for colar in colors_:
color_1 = tuple(map(lambda x: int(x * 255), colar))
colors_[i] = color_1
i += 1
for i_file, img_json in enumerate(match_result_file):
match_result = json.load(open(os.path.join(view_path,img_json)))
match_result_groups = match_result['group']
match_result_signs = match_result['signs']
keep_pic = False
for sign_i, sign in enumerate(match_result_signs):
if keep_pic == False:
img_path = os.path.join(image_dir,str(sign['pic_id']) + '.jpg')
image = Image.open(img_path)
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 800 # 300
left, top, right, bottom, class_name_id = (sign['x'],sign['y'],sign['x'] + sign['w'],sign['y'] + sign['h'],sign['type'])
for i,cla_nam in enumerate(class_names):
if class_names[i] == class_name_id:
class_name_id = i
break
label = detect_class_list[class_name_id] + '---' + sign['sign_id']
print(label, (left, top), (right, bottom))
label_size = draw.textsize(label, font)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin =
|
np.array([left, top + 1])
|
numpy.array
|
#!/usr/bin/python3
# Convert features and labels to numpy arrays
import ast
import numpy
from lxml import etree
from sklearn.preprocessing import LabelEncoder
# Local imports
from data_tools import data_util
debug = True
''' Convert the dataframe feature column to a numpy array for processing
use_numpy: True if we should convert to a numpy array
doc_level: True if features should aggregate features at the document level (NOT IMPLEMENTED)
'''
def to_feats(df, use_numpy=True, doc_level=False, feat_names=None):
print('to_feats: use_numpy:', use_numpy, 'doc_level:', doc_level, 'feats:', feat_names)
feats = []
feat_columns = []
# Get the names of the feature columns for the df
if feat_names is None:
feat_columns.append('feats')
else: # Handle multiple feature types
for fname in feat_names:
feat_columns.append('feats_' + fname)
# Load a list of all the features
for i, row in df.iterrows():
if len(feat_columns) > 1:
mini_feat_list = []
for featname in feat_columns:
flist = row[featname]
if type(flist) is str:
flist = ast.literal_eval(flist)
if len(feat_columns) > 1:
mini_feat_list.append(flist)
else:
feats.append(flist)
#if debug:
# print('to_feats: ', row['docid'], 'feats:', flist)
if debug and i == 0:
print('feats:', flist)
#if len(flist) > 0:
# print('feats[0]:', type(flist[0]), flist[0])
if len(feat_columns) > 1:
feats.append(mini_feat_list)
if use_numpy:
return numpy.asarray(feats).astype('float')
else:
return feats
def to_labels(df, labelname, labelencoder=None, encode=True):
if debug:
print('to_labels: ', labelname, ', encode: ', str(encode))
#data = ast.literal_eval(df[labelname])
labels = []
# Extract the labels from the dataframe
for i, row in df.iterrows():
flist = row[labelname]
if debug: print('flist:', type(flist), str(flist))
#if type(flist) == str:
# flist = ast.literal_eval(flist)
if debug and i == 0:
print('labels[0]:', flist)
labels.append(flist)
# Normalize the rank values
if labelname == 'event_ranks':
enc_labels = []
for rank_list in labels:
norm_ranks = []
if rank_list is not None and len(rank_list) > 0 and not rank_list == '[]':
if type(rank_list) == str:
rank_list = ast.literal_eval(rank_list)
min_rank = float(numpy.nanmin(numpy.array(rank_list, dtype=numpy.float), axis=None))
# Scale min rank to 0
if min_rank is not numpy.nan and min_rank > 0:
rank_list_scaled = []
for rank in rank_list:
if rank is None or rank is numpy.nan:
rank_list_scaled.append(-1)
else:
rank_list_scaled.append(rank - min_rank)
rank_list = rank_list_scaled
if encode:
max_rank = float(numpy.nanmax(
|
numpy.array(rank_list, dtype=numpy.float)
|
numpy.array
|
import scipy
import librosa
import numpy as np
from scipy.io import wavfile
from librosa.util import normalize
from hparams import hparams as hps
MAX_WAV_VALUE = 32768.0
_mel_basis = None
def load_wav(path):
sr, wav = wavfile.read(path)
assert sr == hps.sample_rate
return normalize(wav/MAX_WAV_VALUE)*0.95
def save_wav(wav, path):
wav *= MAX_WAV_VALUE
wavfile.write(path, hps.sample_rate, wav.astype(np.int16))
def spectrogram(y):
D = _stft(y)
S = _amp_to_db(np.abs(D))
return S
def inv_spectrogram(S):
S = _db_to_amp(S)
return _griffin_lim(S ** hps.power)
def melspectrogram(y):
D = _stft(y)
S = _amp_to_db(_linear_to_mel(np.abs(D)))
return S
def inv_melspectrogram(mel):
mel = _db_to_amp(mel)
S = _mel_to_linear(mel)
return _griffin_lim(S**hps.power)
def _griffin_lim(S):
'''librosa implementation of Griffin-Lim
Based on https://github.com/librosa/librosa/issues/434
'''
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex =
|
np.abs(S)
|
numpy.abs
|
from __future__ import division
import numpy as np
def ADC_static_ramp(signal,maxcode=None,mincode=None):
codes=np.array(signal)
codes.reshape([-1,])
if maxcode==None:
maxcode=np.max(codes)
if mincode==None:
mincode=
|
np.min(codes)
|
numpy.min
|
from mosek.fusion import *
import sys
import numpy as np
import networkx
from scipy.linalg import sqrtm
from DimacsReader import *
def save(name, value,soltime, xsol):
f = open("../Application2_data/"+name+"/reformulation_obj_value.txt","w+")
f.write("Obj: "+str(value)+"\n")
f.write("SolTime: "+str(soltime)+"\n")
f.write("\nUpper level solution: "+str(xsol)+"\n")
f.close()
def main(name_dimacs,name):
#Reading graph file
f = DimacsReader("../DIMACS/"+name_dimacs)
M = f.M
n = f.n
Q1= np.load("../Application2_data/"+name+"/bigQ1.npy")
Q2= np.load("../Application2_data/"+name+"/bigQ2_fix.npy")
q1= np.load("../Application2_data/"+name+"/q1.npy")
q2=
|
np.load("../Application2_data/"+name+"/q2_fix.npy")
|
numpy.load
|
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: <EMAIL>
import os
import cv2
import torch
import random
import numpy as np
import torchvision.transforms as transforms
from skimage.util.shape import view_as_windows
def get_image(filename):
image = cv2.imread(filename)
return cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
def do_augmentation(scale_factor=0.3, color_factor=0.2):
scale = random.uniform(1.2, 1.2+scale_factor)
# scale = np.clip(np.random.randn(), 0.0, 1.0) * scale_factor + 1.2
rot = 0 # np.clip(np.random.randn(), -2.0, 2.0) * aug_config.rot_factor if random.random() <= aug_config.rot_aug_rate else 0
do_flip = False # aug_config.do_flip_aug and random.random() <= aug_config.flip_aug_rate
c_up = 1.0 + color_factor
c_low = 1.0 - color_factor
color_scale = [random.uniform(c_low, c_up), random.uniform(c_low, c_up), random.uniform(c_low, c_up)]
return scale, rot, do_flip, color_scale
def trans_point2d(pt_2d, trans):
src_pt = np.array([pt_2d[0], pt_2d[1], 1.]).T
dst_pt = np.dot(trans, src_pt)
return dst_pt[0:2]
def rotate_2d(pt_2d, rot_rad):
x = pt_2d[0]
y = pt_2d[1]
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
xx = x * cs - y * sn
yy = x * sn + y * cs
return np.array([xx, yy], dtype=np.float32)
def gen_trans_from_patch_cv(c_x, c_y, src_width, src_height, dst_width, dst_height, scale, rot, inv=False):
# augment size with scale
src_w = src_width * scale
src_h = src_height * scale
src_center = np.zeros(2)
src_center[0] = c_x
src_center[1] = c_y # np.array([c_x, c_y], dtype=np.float32)
# augment rotation
rot_rad = np.pi * rot / 180
src_downdir = rotate_2d(np.array([0, src_h * 0.5], dtype=np.float32), rot_rad)
src_rightdir = rotate_2d(np.array([src_w * 0.5, 0], dtype=np.float32), rot_rad)
dst_w = dst_width
dst_h = dst_height
dst_center = np.array([dst_w * 0.5, dst_h * 0.5], dtype=np.float32)
dst_downdir = np.array([0, dst_h * 0.5], dtype=np.float32)
dst_rightdir = np.array([dst_w * 0.5, 0], dtype=np.float32)
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = src_center
src[1, :] = src_center + src_downdir
src[2, :] = src_center + src_rightdir
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = dst_center
dst[1, :] = dst_center + dst_downdir
dst[2, :] = dst_center + dst_rightdir
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def generate_patch_image_cv(cvimg, c_x, c_y, bb_width, bb_height, patch_width, patch_height, do_flip, scale, rot):
img = cvimg.copy()
img_height, img_width, img_channels = img.shape
if do_flip:
img = img[:, ::-1, :]
c_x = img_width - c_x - 1
trans = gen_trans_from_patch_cv(c_x, c_y, bb_width, bb_height, patch_width, patch_height, scale, rot, inv=False)
img_patch = cv2.warpAffine(img, trans, (int(patch_width), int(patch_height)),
flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
return img_patch, trans
def crop_image(image, kp_2d, center_x, center_y, width, height, patch_width, patch_height, do_augment):
# get augmentation params
if do_augment:
scale, rot, do_flip, color_scale = do_augmentation()
else:
scale, rot, do_flip, color_scale = 1.3, 0, False, [1.0, 1.0, 1.0]
# generate image patch
image, trans = generate_patch_image_cv(
image,
center_x,
center_y,
width,
height,
patch_width,
patch_height,
do_flip,
scale,
rot
)
for n_jt in range(kp_2d.shape[0]):
kp_2d[n_jt] = trans_point2d(kp_2d[n_jt], trans)
return image, kp_2d, trans
def transfrom_keypoints(kp_2d, center_x, center_y, width, height, patch_width, patch_height, do_augment):
if do_augment:
scale, rot, do_flip, color_scale = do_augmentation()
else:
scale, rot, do_flip, color_scale = 1.2, 0, False, [1.0, 1.0, 1.0]
# generate transformation
trans = gen_trans_from_patch_cv(
center_x,
center_y,
width,
height,
patch_width,
patch_height,
scale,
rot,
inv=False,
)
for n_jt in range(kp_2d.shape[0]):
kp_2d[n_jt] = trans_point2d(kp_2d[n_jt], trans)
return kp_2d, trans
def get_image_crops(image_file, bboxes):
image = cv2.cvtColor(cv2.imread(image_file), cv2.COLOR_BGR2RGB)
crop_images = []
for bb in bboxes:
c_y, c_x = (bb[0]+bb[2]) // 2, (bb[1]+bb[3]) // 2
h, w = bb[2]-bb[0], bb[3]-bb[1]
w = h = np.where(w / h > 1, w, h)
crop_image, _ = generate_patch_image_cv(
cvimg=image.copy(),
c_x=c_x,
c_y=c_y,
bb_width=w,
bb_height=h,
patch_width=224,
patch_height=224,
do_flip=False,
scale=1.3,
rot=0,
)
crop_image = convert_cvimg_to_tensor(crop_image)
crop_images.append(crop_image)
batch_image = torch.cat([x.unsqueeze(0) for x in crop_images])
return batch_image
from lib.data_utils.occ_utils import occlude_with_objects, paste_over
def get_single_image_crop(image, occluders, bbox, scale=1.3, occ=False):
if isinstance(image, str):
if os.path.isfile(image):
image = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB)
else:
print(image)
raise BaseException(image, 'is not a valid file!')
elif isinstance(image, torch.Tensor):
image = image.numpy()
elif not isinstance(image, np.ndarray):
raise('Unknown type for object', type(image))
crop_image, _ = generate_patch_image_cv(
cvimg=image.copy(),
c_x=bbox[0],
c_y=bbox[1],
bb_width=bbox[2],
bb_height=bbox[3],
patch_width=224,
patch_height=224,
do_flip=False,
scale=scale,
rot=0,
)
if occ:
crop_image = occlude_with_objects(crop_image, occluders)
crop_image = convert_cvimg_to_tensor(crop_image)
return crop_image
def get_single_image_crop_demo(image, bbox, kp_2d, scale=1.2, crop_size=224):
if isinstance(image, str):
if os.path.isfile(image):
image = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB)
else:
print(image)
raise BaseException(image, 'is not a valid file!')
elif isinstance(image, torch.Tensor):
image = image.numpy()
elif not isinstance(image, np.ndarray):
raise('Unknown type for object', type(image))
crop_image, trans = generate_patch_image_cv(
cvimg=image.copy(),
c_x=bbox[0],
c_y=bbox[1],
bb_width=bbox[2],
bb_height=bbox[3],
patch_width=crop_size,
patch_height=crop_size,
do_flip=False,
scale=scale,
rot=0,
)
if kp_2d is not None:
for n_jt in range(kp_2d.shape[0]):
kp_2d[n_jt, :2] = trans_point2d(kp_2d[n_jt], trans)
raw_image = crop_image.copy()
crop_image = convert_cvimg_to_tensor(crop_image)
return crop_image, raw_image, kp_2d
def read_image(filename):
image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (224,224))
return convert_cvimg_to_tensor(image)
def convert_cvimg_to_tensor(image):
transform = get_default_transform()
image = transform(image)
return image
def torch2numpy(image):
image = image.detach().cpu()
inv_normalize = transforms.Normalize(
mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255],
std=[1 / 0.229, 1 / 0.224, 1 / 0.255]
)
image = inv_normalize(image)
image = image.clamp(0., 1.)
image = image.numpy() * 255.
image = np.transpose(image, (1, 2, 0))
return image.astype(np.uint8)
def torch_vid2numpy(video):
video = video.detach().cpu().numpy()
# video = np.transpose(video, (0, 2, 1, 3, 4)) # NCTHW->NTCHW
# Denormalize
mean = np.array([-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255])
std = np.array([1 / 0.229, 1 / 0.224, 1 / 0.255])
mean = mean[np.newaxis, np.newaxis, ..., np.newaxis, np.newaxis]
std = std[np.newaxis, np.newaxis, ..., np.newaxis, np.newaxis]
video = (video - mean) / std # [:, :, i, :, :].sub_(mean[i]).div_(std[i]).clamp_(0., 1.).mul_(255.)
video = video.clip(0.,1.) * 255
video = video.astype(np.uint8)
return video
def get_bbox_from_kp2d(kp_2d):
# get bbox
if len(kp_2d.shape) > 2:
ul = np.array([kp_2d[:, :, 0].min(axis=1), kp_2d[:, :, 1].min(axis=1)]) # upper left
lr = np.array([kp_2d[:, :, 0].max(axis=1), kp_2d[:, :, 1].max(axis=1)]) # lower right
else:
ul = np.array([kp_2d[:, 0].min(), kp_2d[:, 1].min()]) # upper left
lr = np.array([kp_2d[:, 0].max(), kp_2d[:, 1].max()]) # lower right
# ul[1] -= (lr[1] - ul[1]) * 0.10 # prevent cutting the head
w = lr[0] - ul[0]
h = lr[1] - ul[1]
c_x, c_y = ul[0] + w / 2, ul[1] + h / 2
# to keep the aspect ratio
w = h = np.where(w / h > 1, w, h)
w = h = h * 1.1
bbox =
|
np.array([c_x, c_y, w, h])
|
numpy.array
|
import unittest
import numpy as np
from PCAfold import preprocess
from PCAfold import reduction
from PCAfold import analysis
class Preprocess(unittest.TestCase):
def test_preprocess__center_scale__allowed_calls(self):
pass
# ------------------------------------------------------------------------------
def test_preprocess__center_scale__not_allowed_calls(self):
X = np.random.rand(100,20)
with self.assertRaises(ValueError):
(X_cs, X_center, X_scale) = preprocess.center_scale(X, 'none', nocenter=1)
with self.assertRaises(ValueError):
(X_cs, X_center, X_scale) = preprocess.center_scale([1,2,3], 'none')
with self.assertRaises(ValueError):
(X_cs, X_center, X_scale) = preprocess.center_scale(X, 1)
X = np.random.rand(100,)
with self.assertRaises(ValueError):
(X_cs, X_center, X_scale) = preprocess.center_scale(X, 'none')
# ------------------------------------------------------------------------------
def test_preprocess__center_scale__all_possible_C_and_D(self):
test_data_set = np.random.rand(100,20)
# Instantiations that should work:
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'none', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'auto', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'pareto', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'range', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '0to1', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'level', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'max', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'poisson', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_2', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_3', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_4', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'none', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'auto', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'std', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'pareto', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'range', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '0to1', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'level', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'max', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'poisson', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_2', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_3', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, 'vast_4', nocenter=True)
except Exception:
self.assertTrue(False)
# ------------------------------------------------------------------------------
def test_preprocess__center_scale__on_1D_variable(self):
test_1D_variable = np.random.rand(100,1)
# Instantiations that should work:
try:
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'none', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'auto', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'std', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'pareto', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'range', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, '0to1', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, '-1to1', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'level', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'max', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'poisson', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_2', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_3', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_4', nocenter=False)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'none', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'auto', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'std', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'pareto', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'range', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, '0to1', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, '-1to1', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'level', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'max', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'poisson', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_2', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_3', nocenter=True)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_1D_variable, 'vast_4', nocenter=True)
except Exception:
self.assertTrue(False)
# ------------------------------------------------------------------------------
def test_preprocess__center_scale__ZeroToOne(self):
tolerance = 10**-10
try:
test_data_set = np.random.rand(100,10)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '0to1', nocenter=False)
for i in range(0,10):
self.assertTrue((np.min(X_cs[:,i]) > (- tolerance)) and (np.min(X_cs[:,i]) < tolerance))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(1000,1)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '0to1', nocenter=False)
for i in range(0,1):
self.assertTrue((np.min(X_cs[:,i]) > (- tolerance)) and (np.min(X_cs[:,i]) < tolerance))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(2000,1)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '0to1', nocenter=False)
for i in range(0,1):
self.assertTrue((np.min(X_cs[:,i]) > (- tolerance)) and (np.min(X_cs[:,i]) < tolerance))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(100,10)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '0to1', nocenter=False)
for i in range(0,10):
self.assertTrue((np.max(X_cs[:,i]) > (1 - tolerance)) and (np.max(X_cs[:,i]) < (1 + tolerance)))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(1000,1)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '0to1', nocenter=False)
for i in range(0,1):
self.assertTrue((np.max(X_cs[:,i]) > (1 - tolerance)) and (np.max(X_cs[:,i]) < (1 + tolerance)))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(2000,1)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '0to1', nocenter=False)
for i in range(0,1):
self.assertTrue((np.max(X_cs[:,i]) > (1 - tolerance)) and (np.max(X_cs[:,i]) < (1 + tolerance)))
except Exception:
self.assertTrue(False)
# ------------------------------------------------------------------------------
def test_preprocess__center_scale__MinusOneToOne(self):
tolerance = 10**-10
try:
test_data_set = np.random.rand(100,10)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
for i in range(0,10):
self.assertTrue((np.min(X_cs[:,i]) > (-1 - tolerance)) and (np.min(X_cs[:,i]) < -1 + tolerance))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(1000,1)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
for i in range(0,1):
self.assertTrue((np.min(X_cs[:,i]) > (-1 - tolerance)) and (np.min(X_cs[:,i]) < -1 + tolerance))
except Exception:
self.assertTrue(False)
try:
test_data_set = np.random.rand(2000,1)
(X_cs, X_center, X_scale) = preprocess.center_scale(test_data_set, '-1to1', nocenter=False)
for i in range(0,1):
self.assertTrue((np.min(X_cs[:,i]) > (-1 - tolerance)) and (np.min(X_cs[:,i]) < -1 + tolerance))
except Exception:
self.assertTrue(False)
try:
test_data_set =
|
np.random.rand(100,10)
|
numpy.random.rand
|
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy
class Core(Benchmark):
def setup(self):
self.l100 = range(100)
self.l50 = range(50)
self.l = [numpy.arange(1000), numpy.arange(1000)]
self.l10x10 = numpy.ones((10, 10))
def time_array_1(self):
numpy.array(1)
def time_array_empty(self):
numpy.array([])
def time_array_l1(self):
numpy.array([1])
def time_array_l100(self):
numpy.array(self.l100)
def time_array_l(self):
numpy.array(self.l)
def time_vstack_l(self):
numpy.vstack(self.l)
def time_hstack_l(self):
numpy.hstack(self.l)
def time_dstack_l(self):
numpy.dstack(self.l)
def time_arange_100(self):
numpy.arange(100)
def time_zeros_100(self):
numpy.zeros(100)
def time_ones_100(self):
numpy.ones(100)
def time_empty_100(self):
numpy.empty(100)
def time_eye_100(self):
|
numpy.eye(100)
|
numpy.eye
|
"""Implementations of QMIX for Checkers.
Same as alg_qmix.py, except that Checkers global state has more components.
"""
import numpy as np
import tensorflow as tf
import sys
import networks
class Alg(object):
def __init__(self, experiment, dimensions, stage=1, n_agents=1,
tau=0.01, lr_Q=0.001, gamma=0.99, nn={}):
"""
Same as alg_qmix. Checkers state has more components
Inputs:
experiment - string
dimensions - dictionary containing tensor dimensions
(h,w,c) for tensor
l for 1D vector
stage - curriculum stage (always 2 for IAC and COMA)
tau - target variable update rate
lr_Q - learning rates for optimizer
gamma - discount factor
"""
self.experiment = experiment
if self.experiment == "checkers":
# Global state
self.rows_state = dimensions['rows_state']
self.columns_state = dimensions['columns_state']
self.channels_state = dimensions['channels_state']
self.l_state = n_agents * dimensions['l_state_one']
self.l_state_one_agent = dimensions['l_state_one']
self.l_state_other_agents = (n_agents-1) * dimensions['l_state_one']
# Agent observations
self.l_obs_others = dimensions['l_obs_others']
self.l_obs_self = dimensions['l_obs_self']
# Dimensions for image input
self.rows_obs = dimensions['rows_obs']
self.columns_obs = dimensions['columns_obs']
self.channels_obs = dimensions['channels_obs']
self.l_action = dimensions['l_action']
self.l_goal = dimensions['l_goal']
self.n_agents = n_agents
self.tau = tau
self.lr_Q = lr_Q
self.gamma = gamma
self.nn = nn
self.agent_labels = np.eye(self.n_agents)
self.actions = np.eye(self.l_action)
# Initialize computational graph
self.create_networks(stage)
self.list_initialize_target_ops, self.list_update_target_ops = self.get_assign_target_ops(tf.trainable_variables())
self.create_train_op()
def create_networks(self, stage):
# Placeholders
self.state_env = tf.placeholder(tf.float32, [None, self.rows_state, self.columns_state, self.channels_state], 'state_env')
self.v_state = tf.placeholder(tf.float32, [None, self.l_state], 'v_state')
self.v_goal_all = tf.placeholder(tf.float32, [None, self.n_agents*self.l_goal], 'v_goal_all')
self.v_state_one_agent = tf.placeholder(tf.float32, [None, self.l_state_one_agent], 'v_state_one_agent')
self.v_state_other_agents = tf.placeholder(tf.float32, [None, self.l_state_other_agents], 'v_state_other_agents')
self.v_goal = tf.placeholder(tf.float32, [None, self.l_goal], 'v_goal')
self.v_goal_others = tf.placeholder(tf.float32, [None, (self.n_agents-1)*self.l_goal], 'v_goal_others')
self.v_labels = tf.placeholder(tf.float32, [None, self.n_agents])
self.action_others = tf.placeholder(tf.float32, [None, self.n_agents-1, self.l_action], 'action_others')
if self.experiment == "checkers":
self.obs_self_t = tf.placeholder(tf.float32, [None, self.rows_obs, self.columns_obs, self.channels_obs], 'obs_self_t')
self.obs_self_v = tf.placeholder(tf.float32, [None, self.l_obs_self], 'obs_self_v')
self.obs_others = tf.placeholder(tf.float32, [None, self.l_obs_others], 'obs_others')
self.actions_prev = tf.placeholder(tf.float32, [None, self.l_action], 'action_prev')
# Individual agent networks
# output dimension is [time * n_agents, q-values]
with tf.variable_scope("Agent_main"):
if self.experiment == 'checkers':
self.agent_qs = networks.Qmix_single_checkers(self.actions_prev, self.obs_self_t, self.obs_self_v, self.obs_others, self.v_goal, f1=self.nn['A_conv_f'], k1=self.nn['A_conv_k'], n_h1=self.nn['A_n_h1'], n_h2=self.nn['A_n_h2'], n_actions=self.l_action)
with tf.variable_scope("Agent_target"):
if self.experiment == 'checkers':
self.agent_qs_target = networks.Qmix_single_checkers(self.actions_prev, self.obs_self_t, self.obs_self_v, self.obs_others, self.v_goal, f1=self.nn['A_conv_f'], k1=self.nn['A_conv_k'], n_h1=self.nn['A_n_h1'], n_h2=self.nn['A_n_h2'], n_actions=self.l_action)
self.argmax_Q = tf.argmax(self.agent_qs, axis=1)
self.argmax_Q_target = tf.argmax(self.agent_qs_target, axis=1)
# To extract Q-value from agent_qs and agent_qs_target; [batch*n_agents, l_action]
self.actions_1hot = tf.placeholder(tf.float32, [None, self.l_action], 'actions_1hot')
self.q_selected = tf.reduce_sum(tf.multiply(self.agent_qs, self.actions_1hot), axis=1)
self.mixer_q_input = tf.reshape( self.q_selected, [-1, self.n_agents] ) # [batch, n_agents]
self.q_target_selected = tf.reduce_sum(tf.multiply(self.agent_qs_target, self.actions_1hot), axis=1)
self.mixer_target_q_input = tf.reshape( self.q_target_selected, [-1, self.n_agents] )
# Mixing network
with tf.variable_scope("Mixer_main"):
self.mixer = networks.Qmix_mixer_checkers(self.mixer_q_input, self.state_env, self.v_state, self.v_goal_all, self.l_state, self.l_goal, self.n_agents, f1=self.nn['Q_conv_f'], k1=self.nn['Q_conv_k'])
with tf.variable_scope("Mixer_target"):
self.mixer_target = networks.Qmix_mixer_checkers(self.mixer_q_input, self.state_env, self.v_state, self.v_goal_all, self.l_state, self.l_goal, self.n_agents, f1=self.nn['Q_conv_f'], k1=self.nn['Q_conv_k'])
def get_assign_target_ops(self, list_vars):
# ops for equating main and target
list_initial_ops = []
# ops for slow update of target toward main
list_update_ops = []
list_Agent_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Agent_main')
map_name_Agent_main = {v.name.split('main')[1] : v for v in list_Agent_main}
list_Agent_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Agent_target')
map_name_Agent_target = {v.name.split('target')[1] : v for v in list_Agent_target}
if len(list_Agent_main) != len(list_Agent_target):
raise ValueError("get_initialize_target_ops : lengths of Agent_main and Agent_target do not match")
for name, var in map_name_Agent_main.items():
# create op that assigns value of main variable to
# target variable of the same name
list_initial_ops.append( map_name_Agent_target[name].assign(var) )
for name, var in map_name_Agent_main.items():
# incremental update of target towards main
list_update_ops.append( map_name_Agent_target[name].assign( self.tau*var + (1-self.tau)*map_name_Agent_target[name] ) )
list_Mixer_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Mixer_main')
map_name_Mixer_main = {v.name.split('main')[1] : v for v in list_Mixer_main}
list_Mixer_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Mixer_target')
map_name_Mixer_target = {v.name.split('target')[1] : v for v in list_Mixer_target}
if len(list_Mixer_main) != len(list_Mixer_target):
raise ValueError("get_initialize_target_ops : lengths of Mixer_main and Mixer_target do not match")
# ops for equating main and target
for name, var in map_name_Mixer_main.items():
# create op that assigns value of main variable to
# target variable of the same name
list_initial_ops.append( map_name_Mixer_target[name].assign(var) )
# ops for slow update of target toward main
for name, var in map_name_Mixer_main.items():
# incremental update of target towards main
list_update_ops.append( map_name_Mixer_target[name].assign( self.tau*var + (1-self.tau)*map_name_Mixer_target[name] ) )
return list_initial_ops, list_update_ops
def run_actor(self, actions_prev, obs_others, obs_self_t, obs_self_v, goals, epsilon, sess):
"""
Get actions for all agents as a batch
actions_prev - list of integers
obs_others - list of vector or tensor describing other agents
obs_self - list of observation grid centered on self
goals - [n_agents, n_lanes]
"""
# convert to batch
obs_others = np.array(obs_others)
obs_self_t = np.array(obs_self_t)
obs_self_v = np.array(obs_self_v)
actions_prev_1hot = np.zeros([self.n_agents, self.l_action])
actions_prev_1hot[
|
np.arange(self.n_agents)
|
numpy.arange
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from numpy import dot
from numpy.linalg import inv, LinAlgError
from scipy.stats import norm
# from seaborn import FacetGrid
def z_score(conf):
"""
:param conf: Desired level of confidence
:return: The Z-score corresponding to the level of confidence desired.
"""
return norm.ppf((100 - (100 - conf) / 2) / 100)
def bias_corrected_ci(estimate, samples, conf=95):
"""
Return the bias-corrected bootstrap confidence interval for an estimate
:param estimate: Numerical estimate in the original sample
:param samples: Nx1 array of bootstrapped estimates
:param conf: Level of the desired confidence interval
:return: Bias-corrected bootstrapped LLCI and ULCI for the estimate.
"""
# noinspection PyUnresolvedReferences
ptilde = ((samples < estimate) * 1).mean()
Z = norm.ppf(ptilde)
Zci = z_score(conf)
Zlow, Zhigh = -Zci + 2 * Z, Zci + 2 * Z
plow, phigh = norm._cdf(Zlow), norm._cdf(Zhigh)
llci = np.percentile(samples, plow * 100, interpolation="lower")
ulci = np.percentile(samples, phigh * 100, interpolation="higher")
return llci, ulci
def percentile_ci(samples, conf):
"""
Based on an array of values, returns the lower and upper percentile bound for a desired level of confidence
:param samples: NxK array of samples
:param conf: Desired level of confidence
:return: 2xK array corresponding to the lower and upper percentile bounds for K estimates.
"""
lower = (100 - conf) / 2
upper = 100 - lower
return np.percentile(samples, [lower, upper])
def fast_OLS(endog, exog):
"""
A simple function for (X'X)^(-1)X'Y
:return: The Kx1 array of estimated coefficients.
"""
endog = endog.astype(float)
exog = exog.astype(float)
try:
return dot(dot(inv(dot(exog.T, exog)), exog.T), endog).squeeze()
except LinAlgError:
raise LinAlgError
def logit_cdf(X):
"""
The CDF of the logistic function.
:param X: Value at which to estimate the CDF
:return: The logistic function CDF, evaluated at X
"""
idx = X > 0
out = np.empty(X.size, dtype=float)
out[idx] = 1 / (1 + np.exp(-X[idx]))
exp_X = np.exp(X[~idx])
out[~idx] = exp_X / (1 + exp_X)
return out
def logit_score(endog, exog, params, n_obs):
"""
The score of the logistic function.
:param endog: Nx1 vector of endogenous predictions
:param exog: NxK vector of exogenous predictors
:param params: Kx1 vector of parameters for the predictors
:param n_obs: Number of observations
:return: The score, a Kx1 vector, evaluated at `params'
"""
return dot(endog - logit_cdf(dot(exog, params)), exog) / n_obs
def logit_hessian(exog, params, n_obs):
"""
The hessian of the logistic function.
:param exog: NxK vector of exogenous predictors
:param params: Kx1 vector of parameters for the predictors
:param n_obs: Number of observations
:return: The Hessian, a KxK matrix, evaluated at `params'
"""
L = logit_cdf(np.dot(exog, params))
return -dot(L * (1 - L) * exog.T, exog) / n_obs
def fast_optimize(endog, exog, n_obs=0, n_vars=0, max_iter=10000, tolerance=1e-10):
"""
A convenience function for the Newton-Raphson method to evaluate a logistic model.
:param endog: Nx1 vector of endogenous predictions
:param exog: NxK vector of exogenous predictors
:param n_obs: Number of observations N
:param n_vars: Number of exogenous predictors K
:param max_iter: Maximum number of iterations
:param tolerance: Margin of error for convergence
:return: The error-minimizing parameters for the model.
"""
iterations = 0
oldparams = np.inf
newparams = np.repeat(0, n_vars)
while iterations < max_iter and np.any(np.abs(newparams - oldparams) > tolerance):
oldparams = newparams
try:
H = logit_hessian(exog, oldparams, n_obs)
newparams = oldparams - dot(
inv(H), logit_score(endog, exog, oldparams, n_obs)
)
except LinAlgError:
raise LinAlgError
iterations += 1
return newparams
def bootstrap_sampler(n_obs, seed=None):
"""
A generator of bootstrapped indices. Samples with repetition a list of indices.
:param n_obs: Number of observations
:param seed: The seed to use for the random number generator
:return: Bootstrapped indices of size n_obs
"""
seeder = np.random.RandomState(seed)
seeder.seed(seed)
while True:
yield seeder.randint(n_obs, size=n_obs)
def eigvals(exog):
"""
Return the eigenvalues of a matrix of endogenous predictors.
:param exog: NxK matrix of exogenous predictors.
:return: Kx1 vector of eigenvalues, sorted in decreasing order of magnitude.
"""
return np.sort(np.linalg.eigvalsh(dot(exog.T, exog)))[::-1]
def eval_expression(expr, values=None):
"""
Evaluate a symbolic expression and returns a numerical array.
:param expr: A symbolic expression to evaluate, in the form of a N_terms * N_Vars matrix
:param values: None, or a dictionary of variable:value pairs, to substitute in the symbolic expression.
:return: An evaled expression, in the form of an N_terms array.
"""
n_coeffs = expr.shape[0]
evaled_expr = np.zeros(n_coeffs)
for (i, term) in enumerate(expr):
if values:
evaled_term = np.array(
[
values.get(elem, 0) if isinstance(elem, str) else elem
for elem in term
]
)
else:
evaled_term = np.array(
[0 if isinstance(elem, str) else elem for elem in term]
) # All variables at 0
evaled_expr[i] = np.product(
evaled_term.astype(float)
) # Gradient is the product of values
return evaled_expr
def gen_moderators(raw_equations, raw_varlist):
terms_y = raw_equations["all_to_y"]
terms_m = raw_equations["x_to_m"]
# Moderators of X in the path to Y
mod_x_direct = set(filter(lambda v: f"x*{v}" in terms_y, raw_varlist))
# Moderators of X in the path to M
mod_x_indirect = set(filter(lambda v: f"x*{v}" in terms_m, raw_varlist))
# Moderators of M in the path to Y
mod_m = set(filter(lambda v: f"m*{v}" in terms_y, raw_varlist))
moderators = {
"x_direct": mod_x_direct,
"x_indirect": mod_x_indirect,
"m": mod_m,
"all": mod_x_indirect | mod_x_direct | mod_m,
"indirect": mod_x_indirect | mod_m,
}
return moderators
def plot_errorbars(
x, y, yerrlow, yerrhigh, plot_kws=None, err_kws=None, *args, **kwargs
):
yerr = [yerrlow, yerrhigh]
err_kws_final = kwargs.copy()
err_kws_final.update(err_kws)
err_kws_final.update({"marker": "", "fmt": "none", "label": "", "zorder": 3})
plot_kws_final = kwargs.copy()
plot_kws_final.update(plot_kws)
plt.plot(x, y, *args, **plot_kws_final)
plt.errorbar(x, y, yerr, *args, **err_kws_final)
return None
def plot_errorbands(x, y, llci, ulci, plot_kws=None, err_kws=None, *args, **kwargs):
err_kws_final = kwargs.copy()
err_kws_final.update(err_kws)
err_kws_final.update({"label": ""})
plot_kws_final = kwargs.copy()
plot_kws_final.update(plot_kws)
plt.plot(x, y, *args, **plot_kws_final)
plt.fill_between(x, llci, ulci, *args, **err_kws_final)
return None
def plot_conditional_effects(
df_effects, x, hue, row, col, errstyle, hue_format, facet_kws, plot_kws, err_kws
):
if isinstance(hue, list):
huename = "Hue"
if len(hue) == 2:
if hue_format is None:
hue_format = "{var1} at {hue1:.2f}, {var2} at {hue2:.2f}"
df_effects["Hue"] = df_effects[hue].apply(
lambda d: hue_format.format(
var1=hue[0], var2=hue[1], hue1=d[hue[0]], hue2=d[hue[1]]
),
axis=1,
)
else:
if hue_format is None:
hue_format = "{var1} at {hue1:.2f}"
df_effects["Hue"] = df_effects[hue[0]].apply(
lambda d: hue_format.format(var1=hue[0], hue1=d)
)
elif isinstance(hue, str):
huename = "Hue"
if hue_format is None:
hue_format = "{var1} at {hue1:.2f}"
df_effects["Hue"] = df_effects[hue].apply(
lambda d: hue_format.format(var1=hue, hue1=d)
)
else:
huename = None
if not facet_kws:
facet_kws = {}
if not plot_kws:
plot_kws = {}
g = FacetGrid(hue=huename, data=df_effects, col=col, row=row, **facet_kws)
if errstyle == "band":
if not err_kws:
err_kws = {"alpha": 0.2}
g.map(
plot_errorbands,
x,
"Effect",
"LLCI",
"ULCI",
plot_kws=plot_kws,
err_kws=err_kws,
)
elif errstyle == "ci":
if not err_kws:
err_kws = {"alpha": 1, "capthick": 1, "capsize": 3}
df_effects["yerr_low"] = df_effects["Effect"] - df_effects["LLCI"]
df_effects["yerr_high"] = df_effects["ULCI"] - df_effects["Effect"]
g.map(
plot_errorbars,
x,
"Effect",
"yerr_low",
"yerr_high",
plot_kws=plot_kws,
err_kws=err_kws,
)
elif errstyle == "none":
g.map(plt.plot, x, "Effect", **plot_kws)
if facet_kws.get("margin_titles"):
for ax in g.axes.flat:
plt.setp(ax.texts, text="")
if row and col:
g.set_titles(
row_template="{row_var} at {row_name:.2f}",
col_template="{col_var} at {col_name:.2f}",
)
return g
# noinspection PyTypeChecker
def find_significance_region(
spotlight_func, mod_symb, modval_min, modval_max, modval_other_symb, atol, rtol
):
mos = modval_other_symb.copy()
dict_modval_min = {**dict([[mod_symb, modval_min]]), **mos}
dict_modval_max = {**dict([[mod_symb, modval_max]]), **mos}
b_min, _, llci_min, ulci_min = spotlight_func(dict_modval_min)
b_max, _, llci_max, ulci_max = spotlight_func(dict_modval_max)
slope = "positive" if (b_min < b_max) else "negative"
if slope == "negative":
# Flip the values to facilitate the computations.
b_min, llci_min, ulci_min, b_max, llci_max, ulci_max = (
b_max,
llci_max,
ulci_max,
b_min,
llci_min,
ulci_min,
)
# Cases 1 and 2: The effect is always significantly negative/positive:
if ulci_max < 0:
return [[modval_min, modval_max], []]
if llci_min > 0:
return [[], [modval_min, modval_max]]
# Case 3: The effect is negative and sig. in one region, and becomes non-significant at some critical value:
if (ulci_min < 0) and (llci_max < 0 < ulci_max):
critical_value_neg = search_critical_values(
spotlight_func,
modval_min,
modval_max,
mod_symb,
mos,
slope,
region="negative",
atol=atol,
rtol=rtol,
)
return (
[[modval_min, critical_value_neg], []]
if slope == "positive"
else [[critical_value_neg, modval_max], []]
)
# Case 4: The is positive and significant in one region, and becomes non-significant at some critical value:
if (llci_min < 0 < ulci_min) and (llci_max > 0):
critical_value_pos = search_critical_values(
spotlight_func,
modval_min,
modval_max,
mod_symb,
mos,
slope,
region="positive",
atol=atol,
rtol=rtol,
)
return (
[[], [critical_value_pos, modval_max]]
if slope == "positive"
else [[], [modval_min, critical_value_pos]]
)
# Case 5: The effect is negative and significant in one region, and crossover to positive and sig. in another:
if (ulci_min < 0) and (llci_max > 0):
modval_diff = modval_max - modval_min
dist_to_zero = 1 - (b_max / (b_max - b_min))
if slope == "positive":
modval_zero = modval_min + modval_diff * dist_to_zero
critical_value_neg = search_critical_values(
spotlight_func,
modval_min,
modval_zero,
mod_symb,
mos,
slope,
region="negative",
atol=atol,
rtol=rtol,
)
critical_value_pos = search_critical_values(
spotlight_func,
modval_zero,
modval_max,
mod_symb,
mos,
slope,
region="positive",
atol=atol,
rtol=rtol,
)
return [[modval_min, critical_value_neg], [critical_value_pos, modval_max]]
else:
modval_zero = modval_max - modval_diff * dist_to_zero
critical_value_neg = search_critical_values(
spotlight_func,
modval_min,
modval_zero,
mod_symb,
mos,
slope,
region="positive",
atol=atol,
rtol=rtol,
)
critical_value_pos = search_critical_values(
spotlight_func,
modval_zero,
modval_max,
mod_symb,
mos,
slope,
region="negative",
atol=atol,
rtol=rtol,
)
return [[critical_value_pos, modval_max], [modval_min, critical_value_neg]]
# Case 6: The effect is not significant on the bounds of the region, but can still be significant in some middle
# range:
if (llci_min < 0 < ulci_min) and (llci_max < 0 < ulci_max):
return search_mid_range(
spotlight_func, modval_min, modval_max, mod_symb, mos, atol=atol, rtol=rtol
)
def search_mid_range(
spotlight_func, min_val, max_val, mod_symb, mod_dict, atol=1e-8, rtol=1e-5
):
cvals = np.linspace(min_val, max_val, 1000) # Construct a grid of 1000 points.
arr_ci = np.empty((1000, 2))
# noinspection PyRedundantParentheses
arr_b = np.empty(shape=(1000))
for i, cval in enumerate(cvals):
mod_dict[mod_symb] = cval
arr_b[i], _, arr_ci[i][0], arr_ci[i][1] = spotlight_func(mod_dict)
non_sig = list(
map(lambda x: x[0] < 0 < x[1], arr_ci)
) # Check if there is at least one point where the CI does
# not include 0
if all(non_sig): # If not, no significant region.
return [[], []]
# Otherwise, we identify the effect at the point at which the CI is the most narrow.
effect_at_tightest_ci = arr_b[np.argmin(arr_ci[:, 1] - arr_ci[:, 0])]
if effect_at_tightest_ci > 0: # Significance region will be positive
# Slope here is the slope of the CI: the slope of the effect itself is not significant.
mid_val = cvals[
|
np.argmax(arr_ci[:, 0])
|
numpy.argmax
|
"""
BSD 3-Clause License
Copyright (c) 2020, Cyber Security Research Centre Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import matplotlib.pyplot as plt
import random
import pandas as pd
#data is a list of two arrays of timestamps
#timestamps is probably a 1d pandas dataframe or np array or just a list
#will return a list of timestamps of cluster starts and a list of arrays containing times within clusters
#the cluster ends when a new point has not been seen for cluster_size time
#assumes timestamps are positive
def ClusteriseSingle(timestamps, cluster_size):
cluster_timestamps = [] #start of each cluster
inner_cluster_timestamps = [] #arrays of timestamps of events within each cluster
prev_cluster_time = -1
curr_cluster_timestamps = [] #timestamps in the current cluster
for time in timestamps:
if (time > prev_cluster_time + cluster_size):
#a new cluster
cluster_timestamps.append(time)
inner_cluster_timestamps.append(np.array(curr_cluster_timestamps, dtype=np.dtype('d')))
curr_cluster_timestamps = [time]
else:
curr_cluster_timestamps.append(time)
prev_cluster_time = time
#ensure the final clusters inner timestamps are added
if (curr_cluster_timestamps):
inner_cluster_timestamps.append(np.array(curr_cluster_timestamps, dtype=np.dtype('d')))
#ensure the return value is a np array for efficiency reasons
return np.array(cluster_timestamps, dtype=np.dtype('d')), inner_cluster_timestamps
#takes a list of arrays (data)
#returns a list of cluster start times and a list of timestamps for events within each cluster for each cluster start
def Clusterise(data, cluster_size):
num_series = len(data)
cluster_starts = [] #the start of a cluster of events
cluster_timestamps = [] #[cluster no, series no]
#create iterators for each series
iterators = []
next_event = []
for series in data:
it = iter(series)
iterators.append(it)
try:
next_event.append(next(it))
except StopIteration:
next_event.append(None)
cluster_prev = float('-inf')
#dont use [[]]*x it just duplicates the pointer to the list
curr_cluster_timestamps = [[] for i in range(num_series)] #one list for each series
while (True):
curr_min, pos = SafeMin(next_event)
if (curr_min is None):
#no more points
#save the current cluster
c = []
for cl in curr_cluster_timestamps:
c.append(np.array(cl, dtype=np.dtype('d'))-cluster_starts[-1])
cluster_timestamps.append(c)
break
if (curr_min > cluster_prev + cluster_size):
#save current cluster, convert to np array
if (cluster_prev >= 0):
c = []
for cl in curr_cluster_timestamps:
c.append(np.array(cl, dtype=np.dtype('d'))-cluster_starts[-1])
cluster_timestamps.append(c)
#create new cluster
curr_cluster_timestamps = [[] for i in range(num_series)]
cluster_starts.append(curr_min)
#record the new event in the cluster
curr_cluster_timestamps[pos].append(curr_min)
else:
#record the next event
curr_cluster_timestamps[pos].append(curr_min)
#prepare for the next event
cluster_prev = curr_min
next_event[pos] = SafeNext(iterators[pos])
return np.array(cluster_starts, dtype=np.dtype('d')), cluster_timestamps
#min but with none values
#will return None if all inputs are none
#will also return the position of the min value
def SafeMin(l):
m = None
pos = None
p = 0
for i in l:
if (i is not None):
if (m is None or i < m):
m = i
pos = p
p += 1
return m, pos
def SafeNext(it):
try:
return next(it)
except StopIteration:
return None
def MultiHist(data, title="<Insert Title>", subtitles=[], bins=250, data_range=None):
l = len(data)
fig, axs = plt.subplots(l, figsize=(20,10), sharex=True, sharey=True)
fig.suptitle(title, fontsize=22)
fig.tight_layout(pad=5.0)
m = float('-inf')
for series in data:
s_max = max(series)
if (s_max > m):
m = s_max
if (data_range is None):
data_range = (0,m)
for i in range(l):
axs[i].set_ylabel('Freq (Log x)', fontsize=16)
axs[i].set_xlabel('Time (Sec)', fontsize=16)
axs[i].set_title(subtitles[i], fontsize=18)
_ = axs[i].hist(data[i], bins=bins, log=True, range=data_range)
#compute and return an array of interarrival times
def InterTimes(timestamps):
t = []
for i in range(len(timestamps)-1):
t.append(timestamps[i+1] - timestamps[i])
t.sort()
return np.array(t, dtype=np.dtype('d'))
def ComputeClusterLengths(inner_cluster_times):
cluster_lengths = []
for cluster in inner_cluster_times:
min = float("inf")
max = 0
for series in cluster:
if (series.size > 0): #some series could be empty, but not all
if (series[0] < min):
min = series[0]
if (series[-1] > max):
max = series[-1]
cluster_lengths.append(max-min)
a = np.array(cluster_lengths, dtype=np.dtype('d'))
#a.sort()
return a
#timestamps is an array of timestamp arrays (one for each process)
#distribution is an array of cluster lengths to draw from
#cluster_size is the wait period for the end of a cluster, will be used as spacing for extracting samples
def GenerateClusters(timestamps, distribution, cluster_size):
num_lengths = len(distribution)
data = []
for series in timestamps:
data.append(pd.Series(series))
#find min and max
low = []
high = []
for series in timestamps:
low.append(series.min())
high.append(series.max())
#trim the first and last cluster_size seconds of data
low = min(low) + cluster_size
high = max(high) - cluster_size
#generate the clusters
curr_time = low
new_clusters = []
#TODO need to normalise the points
while (True):
#generate a new cluster length
l = distribution[int(random.random()*num_lengths)]
if (curr_time + l > high):
#cant sample if there isnt enough data to sample
break
cluster = []
is_empty = True
for series in data:
s = series[(series >= curr_time) & (series < curr_time + l)]
cluster.append(np.array(s, dtype=
|
np.dtype('d')
|
numpy.dtype
|
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: BSD-3-Clause
""" Base and mixin classes for nearest neighbors.
Adapted from scikit-learn codebase at
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/base.py.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# Sparseness support by <NAME>
# Multi-output support by <NAME> <<EMAIL>>
# Hubness reduction and approximate nearest neighbor support by <NAME> <<EMAIL>>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from functools import partial
import warnings
import numpy as np
from scipy.sparse import issparse, csr_matrix
from sklearn.exceptions import DataConversionWarning
from sklearn.neighbors.base import NeighborsBase as SklearnNeighborsBase
from sklearn.neighbors.base import KNeighborsMixin as SklearnKNeighborsMixin
from sklearn.neighbors.base import RadiusNeighborsMixin as SklearnRadiusNeighborsMixin
from sklearn.neighbors.base import UnsupervisedMixin, SupervisedFloatMixin
from sklearn.neighbors.base import _tree_query_radius_parallel_helper
from sklearn.neighbors.ball_tree import BallTree
from sklearn.neighbors.kd_tree import KDTree
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS, pairwise_distances_chunked
from sklearn.utils import check_array, gen_even_slices
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_is_fitted, check_X_y
from joblib import Parallel, delayed, effective_n_jobs
from tqdm.auto import tqdm
from .approximate_neighbors import ApproximateNearestNeighbor, UnavailableANN
from .hnsw import HNSW
from .lsh import FalconnLSH
from .lsh import PuffinnLSH
from .nng import NNG
from .random_projection_trees import RandomProjectionTree
from ..reduction import NoHubnessReduction, LocalScaling, MutualProximity, DisSimLocal
__all__ = ['KNeighborsMixin', 'NeighborsBase', 'RadiusNeighborsMixin',
'SupervisedFloatMixin', 'SupervisedIntegerMixin', 'UnsupervisedMixin',
'VALID_METRICS', 'VALID_METRICS_SPARSE',
]
VALID_METRICS = dict(lsh=PuffinnLSH.valid_metrics if not issubclass(PuffinnLSH, UnavailableANN) else [],
falconn_lsh=FalconnLSH.valid_metrics if not issubclass(FalconnLSH, UnavailableANN) else [],
nng=NNG.valid_metrics if not issubclass(NNG, UnavailableANN) else [],
hnsw=HNSW.valid_metrics,
rptree=RandomProjectionTree.valid_metrics,
ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(lsh=[],
falconn_lsh=[],
nng=[],
hnsw=[],
rptree=[],
ball_tree=[],
kd_tree=[],
brute=(PAIRWISE_DISTANCE_FUNCTIONS.keys()
- {'haversine'}),
)
ALG_WITHOUT_RADIUS_QUERY = ('hnsw', 'lsh', 'rptree', 'nng', )
EXACT_ALG = ('brute', 'kd_tree', 'ball_tree', )
ANN_ALG = ('hnsw', 'lsh', 'falconn_lsh', 'rptree', 'nng', )
ANN_CLS = (HNSW, FalconnLSH, PuffinnLSH, NNG, RandomProjectionTree, )
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
----------
dist : ndarray
The input distances
weights : {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
-------
weights_arr : array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(SklearnNeighborsBase):
"""Base class for nearest neighbors estimators."""
def __init__(self, n_neighbors=None, radius=None,
algorithm='auto', algorithm_params: dict = None,
hubness: str = None, hubness_params: dict = None,
leaf_size=30, metric='minkowski', p=2, metric_params=None,
n_jobs=None, verbose: int = 0, **kwargs):
super().__init__(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
n_jobs=n_jobs)
if algorithm_params is None:
n_candidates = 1 if hubness is None else 100
algorithm_params = {'n_candidates': n_candidates,
'metric': metric}
if n_jobs is not None and 'n_jobs' not in algorithm_params:
algorithm_params['n_jobs'] = self.n_jobs
if 'verbose' not in algorithm_params:
algorithm_params['verbose'] = verbose
hubness_params = hubness_params if hubness_params is not None else {}
if 'verbose' not in hubness_params:
hubness_params['verbose'] = verbose
self.algorithm_params = algorithm_params
self.hubness_params = hubness_params
self.hubness = hubness
self.verbose = verbose
self.kwargs = kwargs
def _check_hubness_algorithm(self):
if self.hubness not in ['mp', 'mutual_proximity',
'ls', 'local_scaling',
'dsl', 'dis_sim_local',
None]:
raise ValueError(f'Unrecognized hubness algorithm: {self.hubness}')
# Users are allowed to use various identifiers for the algorithms,
# but here we normalize them to the short abbreviations used downstream
if self.hubness in ['mp', 'mutual_proximity']:
self.hubness = 'mp'
elif self.hubness in ['ls', 'local_scaling']:
self.hubness = 'ls'
elif self.hubness in ['dsl', 'dis_sim_local']:
self.hubness = 'dsl'
elif self.hubness is None:
pass
else:
raise ValueError(f'Internal error: unknown hubness algorithm: {self.hubness}')
def _check_algorithm_metric(self):
if self.algorithm not in ['auto', *EXACT_ALG, *ANN_ALG]:
raise ValueError("unrecognized algorithm: '%s'" % self.algorithm)
if self.algorithm == 'auto':
if self.metric == 'precomputed':
alg_check = 'brute'
elif (callable(self.metric) or
self.metric in VALID_METRICS['ball_tree']):
alg_check = 'ball_tree'
else:
alg_check = 'brute'
else:
alg_check = self.algorithm
if callable(self.metric):
if self.algorithm in ['kd_tree', *ANN_ALG]:
# callable metric is only valid for brute force and ball_tree
raise ValueError(f"{self.algorithm} algorithm does not support callable metric '{self.metric}'")
elif self.metric not in VALID_METRICS[alg_check]:
raise ValueError(f"Metric '{self.metric}' not valid. Use "
f"sorted(skhubness.neighbors.VALID_METRICS['{alg_check}']) "
f"to get valid options. "
f"Metric can also be a callable function.")
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = self.metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p <= 0:
raise ValueError("p must be greater than zero for minkowski metric")
def _check_algorithm_hubness_compatibility(self):
if self.hubness == 'dsl':
if self.metric in ['euclidean', 'minkowski']:
self.metric = 'euclidean' # DSL input must still be squared Euclidean
self.hubness_params['squared'] = False
if self.p != 2:
warnings.warn(f'DisSimLocal only supports squared Euclidean distances: Ignoring p={self.p}.')
elif self.metric in ['sqeuclidean']:
self.hubness_params['squared'] = True
else:
warnings.warn(f'DisSimLocal only supports squared Euclidean distances: Ignoring metric={self.metric}.')
self.metric = 'euclidean'
self.hubness_params['squared'] = True
def _set_hubness_reduction(self, X):
if self._hubness_reduction_method is None:
self._hubness_reduction = NoHubnessReduction()
else:
n_candidates = self.algorithm_params['n_candidates']
if 'include_self' in self.kwargs and self.kwargs['include_self']:
neigh_train = self.kcandidates(X, n_neighbors=n_candidates, return_distance=True)
else:
neigh_train = self.kcandidates(n_neighbors=n_candidates, return_distance=True)
# Remove self distances
neigh_dist_train = neigh_train[0] # [:, 1:]
neigh_ind_train = neigh_train[1] # [:, 1:]
if self._hubness_reduction_method == 'ls':
self._hubness_reduction = LocalScaling(**self.hubness_params)
elif self._hubness_reduction_method == 'mp':
self._hubness_reduction = MutualProximity(**self.hubness_params)
elif self._hubness_reduction_method == 'dsl':
self._hubness_reduction = DisSimLocal(**self.hubness_params)
else:
raise ValueError(f'Hubness reduction algorithm = "{self._hubness_reduction_method}" not recognized.')
self._hubness_reduction.fit(neigh_dist_train, neigh_ind_train, X=X, assume_sorted=False)
def _fit(self, X):
self._check_algorithm_metric()
self._check_hubness_algorithm()
self._check_algorithm_hubness_compatibility()
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p <= 0:
raise ValueError(f"p must be greater than one for minkowski metric, "
f"or in ]0, 1[ for fractional norms.")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
self._index = X._index
self._hubness_reduction = X._hubness_reduction
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
elif isinstance(X, ApproximateNearestNeighbor):
self._tree = None
if isinstance(X, PuffinnLSH):
self._fit_X = np.array([X.index_.get(i) for i in range(X.n_indexed_)]) * X.X_indexed_norm_
self._fit_method = 'lsh'
elif isinstance(X, FalconnLSH):
self._fit_X = X.X_train_
self._fit_method = 'falconn_lsh'
elif isinstance(X, NNG):
self._fit_X = None
self._fit_method = 'nng'
elif isinstance(X, HNSW):
self._fit_X = None
self._fit_method = 'hnsw'
elif isinstance(X, RandomProjectionTree):
self._fit_X = None
self._fit_method = 'rptree'
self._index = X
# TODO enable hubness reduction here.
# We do not store X_train in all cases atm.
# self._hubness_reduction_method = self.hubness
# self._set_hubness_reduction(self._fit_X)
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError(f"n_samples must be greater than 0 (but was {n_samples}.")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute'] \
and not callable(self.effective_metric_):
raise ValueError(f"Metric '{self.effective_metric_}' not valid for sparse input. "
f"Use sorted(sklearn.neighbors.VALID_METRICS_SPARSE['brute']) "
f"to get valid options. Metric can also be a callable function.")
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
if self.hubness is not None:
warnings.warn(f'cannot use hubness reduction with sparse data: disabling hubness reduction.')
self.hubness = None
self._hubness_reduction_method = None
self._hubness_reduction = NoHubnessReduction()
return self
self._fit_method = self.algorithm
self._fit_X = X
self._hubness_reduction_method = self.hubness
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
elif (callable(self.effective_metric_) or
self.effective_metric_ in VALID_METRICS['ball_tree']):
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
else:
self._fit_method = 'brute'
self._index = None
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
self._index = None
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
self._index = None
elif self._fit_method == 'brute':
self._tree = None
self._index = None
elif self._fit_method == 'lsh':
self._index = PuffinnLSH(**self.algorithm_params)
self._index.fit(X)
self._tree = None
elif self._fit_method == 'falconn_lsh':
self._index = FalconnLSH(**self.algorithm_params)
self._index.fit(X)
self._tree = None
elif self._fit_method == 'nng':
self._index = NNG(**self.algorithm_params)
self._index.fit(X)
self._tree = None
elif self._fit_method == 'hnsw':
self._index = HNSW(**self.algorithm_params)
self._index.fit(X)
self._tree = None
elif self._fit_method == 'rptree':
self._index = RandomProjectionTree(**self.algorithm_params)
self._index.fit(X)
self._tree = None # because it's a tree, but not an sklearn tree...
else:
raise ValueError(f"algorithm = '{self.algorithm}' not recognized")
# Fit hubness reduction method
self._set_hubness_reduction(X)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(f"Expected n_neighbors > 0. Got {self.n_neighbors:d}")
else:
if not np.issubdtype(type(self.n_neighbors), np.integer):
raise TypeError(
f"n_neighbors does not take {type(self.n_neighbors)} value, "
f"enter integer value"
)
return self
def kcandidates(self, X=None, n_neighbors=None, return_distance=True) -> np.ndarray or (np.ndarray, np.ndarray):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from skhubness.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[0.5]]), array([[2]]))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
check_is_fitted(self, "_fit_method")
if n_neighbors is None:
try:
n_neighbors = self.algorithm_params['n_candidates']
except KeyError:
n_neighbors = 1 if self.hubness is None else 100
elif n_neighbors <= 0:
raise ValueError(f"Expected n_neighbors > 0. Got {n_neighbors}")
else:
if not np.issubdtype(type(n_neighbors), np.integer):
raise TypeError(
"n_neighbors does not take %s value, "
"enter integer value" %
type(n_neighbors))
# The number of candidates must not be less than the number of neighbors used downstream
if self.n_neighbors is not None:
if n_neighbors < self.n_neighbors:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
try:
train_size = self._fit_X.shape[0]
except AttributeError:
train_size = self._index.n_samples_fit_
if n_neighbors > train_size:
warnings.warn(f'n_candidates > n_samples. Setting n_candidates = n_samples.')
n_neighbors = train_size
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = effective_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# TODO handle sparse matrices here
reduce_func = partial(self._kneighbors_reduce_func,
n_neighbors=n_neighbors,
return_distance=return_distance)
# for efficiency, use squared euclidean distances
kwds = ({'squared': True} if self.effective_metric_ == 'euclidean'
else self.effective_metric_params_)
result = pairwise_distances_chunked(
X, self._fit_X, reduce_func=reduce_func,
metric=self.effective_metric_, n_jobs=n_jobs,
**kwds)
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
# require joblib >= 0.12
delayed_query = delayed(self._tree.query)
parallel_kwargs = {"prefer": "threads"}
result = Parallel(n_jobs, **parallel_kwargs)(
delayed_query(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
elif self._fit_method in ['lsh', 'falconn_lsh', 'rptree', 'nng', ]:
# assume joblib>=0.12
delayed_query = delayed(self._index.kneighbors)
parallel_kwargs = {"prefer": "threads"}
result = Parallel(n_jobs, **parallel_kwargs)(
delayed_query(X[s], n_candidates=n_neighbors, return_distance=True)
for s in gen_even_slices(X.shape[0], n_jobs)
)
elif self._fit_method in ['hnsw']:
# XXX nmslib supports multiple threads natively, so no joblib used here
# Must pack results into list to match the output format of joblib
result = self._index.kneighbors(X, n_candidates=n_neighbors, return_distance=True)
result = [result, ]
else:
raise ValueError(f"internal: _fit_method not recognized: {self._fit_method}.")
if return_distance:
dist, neigh_ind = zip(*result)
result = [np.atleast_2d(arr) for arr in [np.vstack(dist), np.vstack(neigh_ind)]]
else:
result = np.atleast_2d(np.vstack(result))
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
neigh_ind = np.atleast_2d(neigh_ind)
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
dist = np.atleast_2d(dist)
return dist, neigh_ind
return neigh_ind
class KNeighborsMixin(SklearnKNeighborsMixin):
"""Mixin for k-neighbors searches.
NOTE: adapted from scikit-learn. """
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
""" TODO """
check_is_fitted(self, ["_fit_method", "_hubness_reduction"], all_or_any=any)
if n_neighbors is None:
n_neighbors = self.n_neighbors
elif n_neighbors <= 0:
raise ValueError(f"Expected n_neighbors > 0. Got {n_neighbors}")
else:
if not np.issubdtype(type(n_neighbors), np.integer):
raise TypeError(f"n_neighbors does not take {type(n_neighbors)} value, enter integer value")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
try:
train_size = self._fit_X.shape[0]
except AttributeError:
train_size = self._index.n_samples_fit_
if n_neighbors > train_size:
raise ValueError(f"Expected n_neighbors <= n_samples, "
f"but n_samples = {train_size}, n_neighbors = {n_neighbors}")
# First obtain candidate neighbors
query_dist, query_ind = self.kcandidates(X, return_distance=True)
query_dist = np.atleast_2d(query_dist)
query_ind = np.atleast_2d(query_ind)
# Second, reduce hubness
hubness_reduced_query_dist, query_ind = self._hubness_reduction.transform(query_dist,
query_ind,
X=X, # required by e.g. DSL
assume_sorted=True,)
# Third, sort hubness reduced candidate neighbors to get the final k neighbors
if query_is_train:
n_neighbors -= 1
kth = np.arange(n_neighbors)
mask = np.argpartition(hubness_reduced_query_dist, kth=kth)[:, :n_neighbors]
hubness_reduced_query_dist = np.take_along_axis(hubness_reduced_query_dist, mask, axis=1)
query_ind = np.take_along_axis(query_ind, mask, axis=1)
if return_distance:
result = hubness_reduced_query_dist, query_ind
else:
result = query_ind
return result
class RadiusNeighborsMixin(SklearnRadiusNeighborsMixin):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from skhubness.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
check_is_fitted(self, ["_fit_method", "_fit_X"], all_or_any=any)
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
radius *= radius
kwds = {'squared': True}
else:
kwds = self.effective_metric_params_
reduce_func = partial(self._radius_neighbors_reduce_func,
radius=radius,
return_distance=return_distance)
results = pairwise_distances_chunked(
X, self._fit_X, reduce_func=reduce_func,
metric=self.effective_metric_, n_jobs=self.n_jobs,
**kwds)
if return_distance:
dist_chunks, neigh_ind_chunks = zip(*results)
dist_list = sum(dist_chunks, [])
neigh_ind_list = sum(neigh_ind_chunks, [])
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
dist = np.empty(len(dist_list), dtype='object')
dist[:] = dist_list
neigh_ind = np.empty(len(neigh_ind_list), dtype='object')
neigh_ind[:] = neigh_ind_list
results = dist, neigh_ind
else:
neigh_ind_list = sum(results, [])
results = np.empty(len(neigh_ind_list), dtype='object')
results[:] = neigh_ind_list
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(f"{self._fit_method} does not work with sparse matrices. "
f"Densify the data, or set algorithm='brute'.")
n_jobs = effective_n_jobs(self.n_jobs)
delayed_query = delayed(_tree_query_radius_parallel_helper)
parallel_kwargs = {"prefer": "threads"}
results = Parallel(n_jobs, **parallel_kwargs)(
delayed_query(self._tree, X[s], radius, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
# Different order of neigh_ind, dist than usual!
neigh_ind, dist = tuple(zip(*results))
results = np.hstack(dist), np.hstack(neigh_ind)
else:
results = np.hstack(results)
elif self._fit_method in ['falconn_lsh']:
# assume joblib>=0.12
delayed_query = delayed(self._index.radius_neighbors)
parallel_kwargs = {"prefer": "threads"}
n_jobs = effective_n_jobs(self.n_jobs)
results = Parallel(n_jobs, **parallel_kwargs)(
delayed_query(X[s], radius=radius, return_distance=return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
elif self._fit_method in ALG_WITHOUT_RADIUS_QUERY:
raise ValueError(f'{self._fit_method} does not support radius queries.')
else:
raise ValueError(f"internal: _fit_method={self._fit_method} not recognized.")
if self._fit_method in ANN_ALG:
if return_distance:
dist, neigh_ind = zip(*results)
results = [np.hstack(dist), np.hstack(neigh_ind)]
else:
results = np.hstack(results)
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 0.],
[1., 0., 1.]])
See also
--------
kneighbors_graph
"""
check_is_fitted(self, ["_fit_method", "_fit_X"], all_or_any=any)
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
def _kneighbors_reduce_func(self, dist, start,
n_neighbors, return_distance):
"""Reduce a chunk of distances to the nearest neighbors
Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
Parameters
----------
dist : array of shape (n_samples_chunk, n_samples)
start : int
The index in X which the first row of dist corresponds to.
n_neighbors : int
return_distance : bool
Returns
-------
dist : array of shape (n_samples_chunk, n_neighbors), optional
Returned only if return_distance
neigh : array of shape (n_samples_chunk, n_neighbors)
Notes
-----
This is required until radius_candidates is implemented in addition to kcandiates.
"""
sample_range = np.arange(dist.shape[0])[:, None]
neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
return result
class SupervisedIntegerMixin:
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, HNSW, FalconnLSH, PuffinLSH, NNG, RandomProjectionTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
from .unsupervised import NearestNeighbors
if not isinstance(X, (KDTree, BallTree, *ANN_CLS, NearestNeighbors)):
X, y = check_X_y(X, y, "csr", multi_output=True)
try:
verbose = self.verbose
except AttributeError:
verbose = 0
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
if issparse(y):
self._y = y
self.classes_ = np.tile([0, 1], (y.shape[1], 1))
else:
self._y = np.empty(y.shape, dtype=np.int)
for k in tqdm(range(self._y.shape[1]),
desc='fit:targets',
disable=False if verbose else True):
classes, self._y[:, k] =
|
np.unique(y[:, k], return_inverse=True)
|
numpy.unique
|
'''
<NAME>
TFR Tire Data Analysis
this code is written to analyze the TTC FSAE tire data
the code is written in a linear, easy to read format catered towards an engineering mindset
rather than efficient software
Contact: <EMAIL> for help running or understanding the program
'''
#_______________________________________________________________________________________________________________________
'''
SECTION 1
this section contains the necessary packages used to process the data. If you are using Pycharm as your IDE, go to
File --> Settings --> Project --> choose Project Interpreter then click on the (+) sign on the right hand side to add
other packages
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#_______________________________________________________________________________________________________________________
'''
SECTION 2
The original format of this code took run8 data. If you want to add other data, make sure it is added in the same file
structure. ~$PROJECT_PATH...program files\data\.....
'''
#run_input = input("Enter the run number you want to study: ") # example of input B1965run2
run_input = 'B1965run2'
data = pd.read_excel (r'C:\Users\Fizics\Desktop\TTC\data\RunData_cornering_ASCII_SI_10in_round8 excel/'+(run_input)+(".xlsx"),skiprows=2)
df = pd.DataFrame(data)
#print(data.head())
#print(data.tail())
'''
first we skipped the information in the first two rows
then the 'date' variable read the 3rd rows heads
now we delete data from 0:3600 where 0 is actually the 4th row in the excel doc. We do this to get rid of
pre test noise. Comment out the the line below to see what the splash graph looks like without it
'''
df = df.drop(df.index[0:5000])
#SI Units are being used
#This varaibles are mostly used in the splash graph. You can add whatever other variables you want to look at
speed=df["V"] # kph
pressure=df["P"] # kPa
camber=df["IA"] # deg
slipAngle = df["SA"] # deg
verticalLoad = df["FZ"] * -1 # N
Radius_loaded=df["RL"] # cm
lateralForce = df["FY"] # N
alignTorque = df["MZ"] # Nm
#_______________________________________________________________________________________________________________________
'''
SECTION 3
https://fsaettc.org/viewtopic.php?f=18&t=182&p=1099&hilit=tactics+for+TTC#p1099
This section creates a splash graph similiar to whats done in the URL above
numpy.sign - returns a -1,0, or 1 correlating to a negative, zero, or possitive value (array)
numpy.diff - returns the difference of out[]= a[i+1] - a[i] (array)
numpy.nonzero - returns values of input array that are not == 0 (tuple)
'''
slipAngle = np.array(slipAngle)
verticalLoad = np.array(verticalLoad)
lateralForce = np.array(lateralForce)
sign = np.sign(slipAngle)
diff = np.diff(sign)
xCross = np.ndarray.nonzero(diff)
xvec = np.array(range(len(slipAngle)))
xCritical = xvec[xCross]
yCritical = slipAngle[xCross]
#total_sets = (len(xCritical)) # use this line to find the number of x crossings
#num_Fz_loadvalues = 6
#num_load_set = int(total_sets/6)
a=18
fig1, ax1 = plt.subplots(6, sharex=False)
ax1[0].set_title('Data of Interest vs Indices - Splash Graph')
ax1[0].plot(speed,c='r',linewidth=0.1)
ax1[0].set_ylabel('speed (kph)')
ax1[0].axvline(x=xCritical[a])
ax1[0].grid()
ax1[1].plot(pressure,c='k')
ax1[1].set_ylabel('pressure (kPa)')
ax1[1].axvline(x=xCritical[a])
ax1[1].grid()
ax1[2].plot(camber,c='k')
ax1[2].set_ylabel('IA (deg)')
ax1[2].axvline(x=xCritical[a])
ax1[2].grid()
ax1[3].plot(slipAngle,c='k')
ax1[3].scatter(xCritical,yCritical,marker='x', c='r') #overlay a scatter of (x's) to show the x axis crossings.
ax1[3].set_ylabel('SA (deg)')
ax1[3].axvline(x=xCritical[a])
ax1[3].grid()
ax1[4].plot(verticalLoad,c='k')
ax1[4].set_ylabel('FZ (N)')
ax1[4].axvline(x=xCritical[a])
ax1[4].grid()
ax1[5].plot(Radius_loaded,c='k')
ax1[5].set_ylabel('RL (cm)')
ax1[5].axvline(x=xCritical[a])
ax1[5].set_xlabel('Indices')
ax1[5].grid()
#_______________________________________________________________________________________________________________________
'''
SECTION 4
https://stackoverflow.com/questions/1735025/how-to-normalize-a-numpy-array-to-within-a-certain-range
normalize between -1 and 1
need to look up why the data is normalized when they seem to produce identical graphs
'''
slipAngle_norm = 2.*(slipAngle-
|
np.min(slipAngle)
|
numpy.min
|
import numpy as np
import pytest
from surropt.core.utils import is_row_member
_b = np.array([[1, 4.], [3, 6], [5, 9], [2, 8]])
_a1 = np.array([1., 3])
_a2 = np.array([5., 9])
_a3 = np.array([8., 2])
_a4 = np.array([3., 6])
_a5 = np.vstack((_a1, _a4))
_a6 = np.vstack((_a3, _a1))
_a7 = np.vstack((_a2, _a3))
_a8 =
|
np.vstack((_a2, _a2))
|
numpy.vstack
|
from __future__ import division
import pytest
import numpy as np
from numpy.testing import assert_allclose
from rl.memory import SequentialMemory, RingBuffer
def test_ring_buffer():
def assert_elements(b, ref):
assert len(b) == len(ref)
for idx in range(b.maxlen):
if idx >= len(ref):
with pytest.raises(KeyError):
b[idx]
else:
assert b[idx] == ref[idx]
b = RingBuffer(5)
# Fill buffer.
assert_elements(b, [])
b.append(1)
assert_elements(b, [1])
b.append(2)
assert_elements(b, [1, 2])
b.append(3)
assert_elements(b, [1, 2, 3])
b.append(4)
assert_elements(b, [1, 2, 3, 4])
b.append(5)
assert_elements(b, [1, 2, 3, 4, 5])
# Add couple more items with buffer at limit.
b.append(6)
assert_elements(b, [2, 3, 4, 5, 6])
b.append(7)
assert_elements(b, [3, 4, 5, 6, 7])
b.append(8)
assert_elements(b, [4, 5, 6, 7, 8])
def test_get_recent_state_with_episode_boundaries():
memory = SequentialMemory(3, window_length=2, ignore_episode_boundaries=False)
obs_size = (3, 4)
obs0 = np.random.random(obs_size)
terminal0 = False
obs1 = np.random.random(obs_size)
terminal1 = False
obs2 = np.random.random(obs_size)
terminal2 = False
obs3 = np.random.random(obs_size)
terminal3 = True
obs4 = np.random.random(obs_size)
terminal4 = False
obs5 = np.random.random(obs_size)
terminal5 = True
obs6 = np.random.random(obs_size)
terminal6 = False
state = memory.get_recent_state(obs0)
assert state.shape == (2,) + obs_size
assert np.allclose(state[0], 0.)
assert np.all(state[1] == obs0)
# memory.append takes the current observation, the reward after taking an action and if
# the *new* observation is terminal, thus `obs0` and `terminal1` is correct.
memory.append(obs0, 0, 0., terminal1)
state = memory.get_recent_state(obs1)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == obs0)
assert np.all(state[1] == obs1)
memory.append(obs1, 0, 0., terminal2)
state = memory.get_recent_state(obs2)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == obs1)
assert np.all(state[1] == obs2)
memory.append(obs2, 0, 0., terminal3)
state = memory.get_recent_state(obs3)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == obs2)
assert np.all(state[1] == obs3)
memory.append(obs3, 0, 0., terminal4)
state = memory.get_recent_state(obs4)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == np.zeros(obs_size))
assert np.all(state[1] == obs4)
memory.append(obs4, 0, 0., terminal5)
state = memory.get_recent_state(obs5)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == obs4)
assert np.all(state[1] == obs5)
memory.append(obs5, 0, 0., terminal6)
state = memory.get_recent_state(obs6)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == np.zeros(obs_size))
assert np.all(state[1] == obs6)
def test_training_flag():
obs_size = (3, 4)
obs0 = np.random.random(obs_size)
terminal0 = False
obs1 = np.random.random(obs_size)
terminal1 = True
obs2 = np.random.random(obs_size)
terminal2 = False
for training in (True, False):
memory = SequentialMemory(3, window_length=2)
state = memory.get_recent_state(obs0)
assert state.shape == (2,) + obs_size
assert np.allclose(state[0], 0.)
assert np.all(state[1] == obs0)
assert memory.nb_entries == 0
memory.append(obs0, 0, 0., terminal1, training=training)
state = memory.get_recent_state(obs1)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == obs0)
assert np.all(state[1] == obs1)
if training:
assert memory.nb_entries == 1
else:
assert memory.nb_entries == 0
memory.append(obs1, 0, 0., terminal2, training=training)
state = memory.get_recent_state(obs2)
assert state.shape == (2,) + obs_size
assert np.allclose(state[0], 0.)
assert np.all(state[1] == obs2)
if training:
assert memory.nb_entries == 2
else:
assert memory.nb_entries == 0
def test_get_recent_state_without_episode_boundaries():
memory = SequentialMemory(3, window_length=2, ignore_episode_boundaries=True)
obs_size = (3, 4)
obs0 = np.random.random(obs_size)
terminal0 = False
obs1 = np.random.random(obs_size)
terminal1 = False
obs2 = np.random.random(obs_size)
terminal2 = False
obs3 = np.random.random(obs_size)
terminal3 = True
obs4 = np.random.random(obs_size)
terminal4 = False
obs5 = np.random.random(obs_size)
terminal5 = True
obs6 = np.random.random(obs_size)
terminal6 = False
state = memory.get_recent_state(obs0)
assert state.shape == (2,) + obs_size
assert np.allclose(state[0], 0.)
assert np.all(state[1] == obs0)
# memory.append takes the current observation, the reward after taking an action and if
# the *new* observation is terminal, thus `obs0` and `terminal1` is correct.
memory.append(obs0, 0, 0., terminal1)
state = memory.get_recent_state(obs1)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == obs0)
assert np.all(state[1] == obs1)
memory.append(obs1, 0, 0., terminal2)
state = memory.get_recent_state(obs2)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == obs1)
assert np.all(state[1] == obs2)
memory.append(obs2, 0, 0., terminal3)
state = memory.get_recent_state(obs3)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == obs2)
assert np.all(state[1] == obs3)
memory.append(obs3, 0, 0., terminal4)
state = memory.get_recent_state(obs4)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == obs3)
assert np.all(state[1] == obs4)
memory.append(obs4, 0, 0., terminal5)
state = memory.get_recent_state(obs5)
assert state.shape == (2,) + obs_size
assert np.all(state[0] == obs4)
assert
|
np.all(state[1] == obs5)
|
numpy.all
|
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import networkx as nx
import matplotlib.colors as mc
import matplotlib.ticker as mticker
import colorsys
font = {'family': 'sans-serif',
'weight': 'bold',
'size': 9}
N_NODE_FEAT = 7
N_EDGE_FEAT = 1
TIMESTEP = 0.5
class MultiAgentEnv(gym.Env):
def __init__(self, fractional_power_levels=[0.25, 0.0], eavesdropping=True, num_agents=40, initialization="Random",
aoi_reward=True, episode_length=500.0, comm_model="tw", min_sinr=1.0, last_comms=True):
super(MultiAgentEnv, self).__init__()
# Problem parameters
self.last_comms = last_comms
self.n_agents = num_agents
self.n_nodes = self.n_agents * self.n_agents
self.r_max = 500.0
self.n_features = N_NODE_FEAT # (TransTime, Parent Agent, PosX, PosY, VelX, VelY)
self.n_edges = self.n_agents * self.n_agents
self.carrier_frequency_ghz = 2.4
self.min_SINR_dbm = min_sinr # 10-15 is consider unreliable, cited paper uses -4
self.gaussian_noise_dBm = -50
self.gaussian_noise_mW = 10 ** (self.gaussian_noise_dBm / 10)
self.path_loss_exponent = 2
self.aoi_reward = aoi_reward
self.distance_scale = self.r_max * 2
self.fraction_of_rmax = fractional_power_levels
self.power_levels = self.find_power_levels()
self.r_max *= np.sqrt(self.n_agents / 40)
# initialize state matrices
self.edge_features = np.zeros((self.n_nodes, 1))
self.episode_length = episode_length
self.penalty = 0.0
self.x = np.zeros((self.n_agents, self.n_features))
self.network_buffer = np.zeros((self.n_agents, self.n_agents, self.n_features))
self.old_buffer = np.zeros((self.n_agents, self.n_agents, self.n_features))
self.relative_buffer = np.zeros((self.n_agents, self.n_agents, self.n_features))
self.diag = np.eye(self.n_agents, dtype=np.bool).reshape(self.n_agents, self.n_agents, 1)
# each agent has their own action space of a n_agent vector of weights
self.action_space = spaces.MultiDiscrete([self.n_agents * len(self.power_levels)] * self.n_agents)
self.observation_space = spaces.Dict(
[
# (nxn) by (features-1) we maintain parent references by edges
("nodes", spaces.Box(shape=(self.n_agents * self.n_agents, N_NODE_FEAT), low=-np.Inf, high=np.Inf,
dtype=np.float32)),
# upperbound, n fully connected trees (n-1) edges
# To-Do ensure these bounds don't affect anything
("edges", spaces.Box(shape=(self.n_edges, N_EDGE_FEAT), low=-np.Inf, high=np.Inf,
dtype=np.float32)),
# senders and receivers will each be one endpoint of an edge, and thus should be same size as edges
("senders", spaces.Box(shape=(self.n_edges, 1), low=0, high=self.n_agents,
dtype=np.float32)),
("receivers", spaces.Box(shape=(self.n_edges, 1), low=0, high=self.n_agents,
dtype=np.float32)),
("globals", spaces.Box(shape=(1, 1), low=0, high=self.episode_length, dtype=np.float32)),
]
)
# Plotting placeholders
self.fig = None
self.agent_markers = None
self.np_random = None
self.ax = None
self.agent0_marker = None
self._plot_text = None
self.arrows = None
self.current_arrow = None
self.diff = None
self.r2 = None
self.timestep = 0
self.avg_transmit_distance = 0
self.symmetric_comms = True
self.is_interference = True
self.mst_action = None
self.network_connected = False
self.recompute_solution = False
self.mobile_agents = False
self.flocking = False
self.biased_velocities = False
self.known_initial_positions = False
self.tx_power = None
self.eavesdroppers = None
self.eavesdroppers_response = None
self.attempted_transmissions = None
self.successful_transmissions = None
self.eavesdropping = eavesdropping
self.initial_formation = initialization
# 'push' : At each time step, agent selects which agent they want to 'push' their buffer to
# 'tw'': An agent requests/pushes their buffer to an agent, with hopes of getting their information back
self.comm_model = comm_model
if self.flocking:
self.render_radius = 2 * self.r_max
else:
self.render_radius = self.r_max
# Packing and unpacking information
self.keys = ['nodes', 'edges', 'senders', 'receivers', 'globals']
self.save_plots = False
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, attempted_transmissions):
"""
Apply agent actions to update environment.
:param attempted_transmissions: n-vector of index of who to communicate with
:return: Environment observations as a dict representing the graph.
"""
assert (self.comm_model is "push" or self.comm_model is "tw")
self.timestep = self.timestep + TIMESTEP
# my information is updated
self.network_buffer[:, :, 0] += np.eye(self.n_agents) * TIMESTEP
self.attempted_transmissions = attempted_transmissions // len(self.power_levels)
transmission_indexes = attempted_transmissions // len(self.power_levels)
self.tx_power = attempted_transmissions % len(self.power_levels)
self.attempted_transmissions = np.where(self.power_levels[self.tx_power.astype(np.int)] > 0.0,
self.attempted_transmissions, np.arange(self.n_agents))
if self.last_comms:
self.network_buffer[np.arange(self.n_agents), self.attempted_transmissions, 6] = self.timestep
if self.is_interference:
# calculates interference from attempted transmissions
transmission_indexes, response_indexes = self.interference(self.attempted_transmissions, self.tx_power)
self.successful_transmissions = transmission_indexes
self.update_buffers(transmission_indexes)
if self.comm_model is "tw":
# Two-Way Communications can be modeled as a sequence of a push and a response
self.timestep = self.timestep + TIMESTEP
# my information is updated
self.network_buffer[:, :, 0] += np.eye(self.n_agents) * TIMESTEP
self.update_buffers(response_indexes, push=False)
if not self.network_connected:
self.is_network_connected()
if self.timestep / TIMESTEP % 2 == 1:
reward = 0
else:
reward = - self.instant_cost() / self.episode_length
return self.get_relative_network_buffer_as_dict(), reward, self.timestep >= self.episode_length, {}
def get_relative_network_buffer_as_dict(self):
"""
Compute local node observations.
:return: A dict representing the current routing buffers.
"""
# timesteps and positions won't be relative within env, but need to be when passed out
self.relative_buffer[:] = self.network_buffer
self.relative_buffer[:, :, 0] -= self.timestep
self.relative_buffer[:, :, 0] /= self.episode_length
if self.last_comms:
self.relative_buffer[:, :, 6] -= self.timestep
self.relative_buffer[:, :, 6] /= self.episode_length
# fills rows of a nxn matrix, subtract that from relative_network_buffer
self.relative_buffer[:, :, 2:4] -= self.x[:, 0:2].reshape(self.n_agents, 1, 2)
self.relative_buffer[:, :, 2:4] /= self.distance_scale
if self.mobile_agents:
self.relative_buffer[:, :, 4:6] -= self.x[:, 2:4].reshape(self.n_agents, 1, 2)
self.relative_buffer[:, :, 4:6] /= self.distance_scale
# align to the observation space and then pass that input out
return self.map_to_observation_space(self.relative_buffer)
def map_to_observation_space(self, network_buffer):
"""
Compute local buffers as a Dict of representing a graph.
:return: A dict representing the current routing buffers.
"""
no_edge = np.not_equal(network_buffer[:, :, 1], -1)
senders = np.where(no_edge, self.n_agents * np.arange(self.n_agents)[:, np.newaxis] + network_buffer[:, :, 1],
-1)
receivers = np.where(no_edge, np.reshape(np.arange(self.n_nodes), (self.n_agents, self.n_agents)), -1)
step = np.reshape([self.timestep], (1, 1))
senders = np.reshape(senders.flatten(), (-1, 1))
receivers = np.reshape(receivers.flatten(), (-1, 1))
nodes = np.reshape(network_buffer, (self.n_nodes, -1))
nodes[:, 1] = 0 # zero out the neighbor node index
data_dict = {
"n_node": self.n_nodes,
"senders": senders,
"receivers": receivers,
"edges": self.edge_features,
"nodes": nodes,
"globals": step
}
return data_dict
def algebraic_connectivity(self, adjacency_matrix):
graph_laplacian = np.diag(np.sum(adjacency_matrix, axis=1)) - adjacency_matrix
v, _ = np.linalg.eigh(graph_laplacian)
return v[1]
def reset(self):
if self.initial_formation is "Grid":
x, y = self.compute_grid_with_bias(1, 1, self.n_agents)
perm = np.random.permutation(self.n_agents)
self.x[:, 0] = x[perm]
self.x[:, 1] = y[perm]
elif self.initial_formation is "Clusters":
n_clusters = int(np.sqrt(self.n_agents))
cluster_offset = self.r_max / (n_clusters * 1.5)
cent_x, cent_y = self.compute_grid_with_bias(1.5, 1.5, n_clusters, additional_offset=cluster_offset)
max_agents_per_cluster = int(np.ceil(self.n_agents / n_clusters))
agent_cluster_assignment_x = np.reshape(np.tile(cent_x, max_agents_per_cluster).T,
(max_agents_per_cluster * n_clusters))[:self.n_agents]
agent_cluster_assignment_y = np.reshape(np.tile(cent_y, max_agents_per_cluster).T,
(max_agents_per_cluster * n_clusters))[:self.n_agents]
perm = np.random.permutation(self.n_agents)
self.x[:, 0] = agent_cluster_assignment_x[perm] + np.random.uniform(-cluster_offset, cluster_offset,
size=(self.n_agents,))
self.x[:, 1] = agent_cluster_assignment_y[perm] + np.random.uniform(-cluster_offset, cluster_offset,
size=(self.n_agents,))
else:
alg_connect = 0.0
while np.around(alg_connect, 10) == 0.0:
self.x[:, 0:2] = np.random.uniform(-self.r_max, self.r_max, size=(self.n_agents, 2))
dist = self.compute_distances()
np.fill_diagonal(dist, 0.0)
dist = (dist <= self.fraction_of_rmax[0] * self.distance_scale).astype(np.float)
alg_connect = self.algebraic_connectivity(dist)
self.mst_action = None
self.network_connected = False
self.timestep = 0
self.network_buffer = np.zeros((self.n_agents, self.n_agents, self.n_features))
if self.known_initial_positions:
self.network_buffer[:, :, 2] = self.x[:, 0]
self.network_buffer[:, :, 3] = self.x[:, 1]
else:
self.network_buffer[:, :, 2] = np.where(np.eye(self.n_agents, dtype=np.bool),
self.x[:, 0].reshape(self.n_agents, 1),
self.network_buffer[:, :, 2])
self.network_buffer[:, :, 3] = np.where(np.eye(self.n_agents, dtype=np.bool),
self.x[:, 1].reshape(self.n_agents, 1),
self.network_buffer[:, :, 3])
# motivates agents to get information in the first time step
self.network_buffer[:, :, 0] = np.where(np.eye(self.n_agents, dtype=np.bool), 0, self.penalty)
self.network_buffer[:, :, 1] = -1 # no parent references yet
self.old_buffer[:] = self.network_buffer
self.relative_buffer[:] = self.network_buffer
if self.fig != None:
plt.close(self.fig)
self.fig = None
self.tx_power = None
self.eavesdroppers = None
self.eavesdroppers_response = None
return self.get_relative_network_buffer_as_dict()
def render(self, mode='human', save_plots=False, controller="Random"):
"""
Render the environment with agents as points in 2D space
"""
if mode == 'human':
if self.fig == None:
plt.ion()
self.fig, (self.ax1, self.ax2) = plt.subplots(1, 2, figsize=(10, 5))
self.ax1.set_aspect('equal')
self.ax2.set_aspect('equal')
self.ax1.set_ylim(-1.0 * self.render_radius - 0.075, 1.0 * self.render_radius + 0.075)
self.ax1.set_xlim(-1.0 * self.render_radius - 0.075, 1.0 * self.render_radius + 0.075)
self.ax2.set_ylim(-1.0 * self.render_radius - 0.075, 1.0 * self.render_radius + 0.075)
self.ax2.set_xlim(-1.0 * self.render_radius - 0.075, 1.0 * self.render_radius + 0.075)
self.ax1.set_xticklabels(self.ax1.get_xticks(), font)
self.ax1.set_yticklabels(self.ax1.get_yticks(), font)
self.ax2.set_xticklabels(self.ax2.get_xticks(), font)
self.ax2.set_yticklabels(self.ax2.get_yticks(), font)
self.ax1.set_title('Network Interference')
self.ax2.set_title('Agent 0\'s Buffer Tree')
if self.flocking:
type_agents = "Flocking"
elif self.mobile_agents:
type_agents = "Mobile"
else:
type_agents = "Stationary"
self.fig.suptitle('{0} Communication Policy for {1} Agents'.format(controller, type_agents), fontsize=16)
self.fig.subplots_adjust(top=0.9, left=0.1, right=0.9,
bottom=0.12) # create some space below the plots by increasing the bottom-value
self._plot_text = plt.text(x=-1.21 * self.render_radius, y=-1.28 * self.render_radius, ha='center',
va='center', s="", fontsize=11,
bbox={'facecolor': 'lightsteelblue', 'alpha': 0.5, 'pad': 5})
self.agent_markers1, = self.ax1.plot([], [], marker='o', color='royalblue', linestyle = '') # Returns a tuple of line objects, thus the comma
self.agent0_marker1, = self.ax1.plot([], [], 'go')
self.agent_markers1_eaves, = self.ax1.plot([], [], marker='o', color='lightsteelblue', linestyle = '')
self.agent_markers2, = self.ax2.plot([], [], marker='o', color='royalblue', linestyle = '')
self.agent0_marker2, = self.ax2.plot([], [], 'go')
self.arrows = []
self.failed_arrows = []
self.paths = []
for i in range(self.n_agents):
temp_arrow = self.ax1.quiver(self.x[i, 0], self.x[i, 1], 0, 0, scale=1, color='k', units='xy',
width=.015 * self.render_radius,
minshaft=.001, minlength=0)
self.arrows.append(temp_arrow)
temp_failed_arrow = self.ax1.quiver(self.x[i, 0], self.x[i, 1], 0, 0, color='r', scale=1,
units='xy',
width=.015 * self.render_radius, minshaft=.001, minlength=0)
self.failed_arrows.append(temp_failed_arrow)
temp_line, = self.ax2.plot([], [], 'k')
self.paths.append(temp_line)
if self.r_max >= 1000:
f = mticker.ScalarFormatter(useOffset=False, useMathText=True)
g = lambda x, pos: "${}$".format(f._formatSciNotation('%1.1e' % x))
self.ax1.xaxis.set_major_formatter(mticker.FuncFormatter(g))
self.ax1.yaxis.set_major_formatter(mticker.FuncFormatter(g))
self.ax2.xaxis.set_major_formatter(mticker.FuncFormatter(g))
self.ax2.yaxis.set_major_formatter(mticker.FuncFormatter(g))
eaves_x = np.where(np.sum(self.eavesdroppers, axis=0) > 0, self.x[:, 0], 0)
eaves_y = np.where(np.sum(self.eavesdroppers, axis=0) > 0, self.x[:, 1], 0)
noneaves_x = np.where(np.sum(self.eavesdroppers, axis=0) == 0, self.x[:, 0], 0)
noneaves_y = np.where(np.sum(self.eavesdroppers, axis=0) == 0, self.x[:, 1], 0)
self.agent_markers1.set_xdata(np.ma.masked_equal(noneaves_x,0))
self.agent_markers1.set_ydata(np.ma.masked_equal(noneaves_y,0))
self.agent0_marker1.set_xdata(self.x[0, 0])
self.agent0_marker1.set_ydata(self.x[0, 1])
self.agent_markers1_eaves.set_xdata(np.ma.masked_equal(eaves_x,0))
self.agent_markers1_eaves.set_ydata(np.ma.masked_equal(eaves_y,0))
if self.mobile_agents or self.timestep <= 1:
# Plot the agent locations at the start of the episode
self.agent_markers2.set_xdata(self.x[:, 0])
self.agent_markers2.set_ydata(self.x[:, 1])
self.agent0_marker2.set_xdata(self.x[0, 0])
self.agent0_marker2.set_ydata(self.x[0, 1])
if self.mobile_agents or len(self.power_levels) > 1:
for i in range(self.n_agents):
self.arrows[i].remove()
succ_color = self.lighten_color('k', 1 - (self.tx_power[i] / len(self.power_levels)))
temp_arrow = self.ax1.quiver(self.x[i, 0], self.x[i, 1], 0, 0, scale=1, color=succ_color,
units='xy',
width=.015 * self.render_radius,
minshaft=.001, minlength=0)
self.arrows[i] = temp_arrow
self.failed_arrows[i].remove()
fail_color = self.lighten_color('r', 1 - (self.tx_power[i] / len(self.power_levels)))
temp_failed_arrow = self.ax1.quiver(self.x[i, 0], self.x[i, 1], 0, 0, color=fail_color, scale=1,
units='xy',
width=.015 * self.render_radius, minshaft=.001, minlength=0)
self.failed_arrows[i] = temp_failed_arrow
transmit_distances = []
for i in range(self.n_agents):
j = int(self.network_buffer[0, i, 1])
if j != -1:
self.paths[i].set_xdata([self.x[i, 0], self.x[j, 0]])
self.paths[i].set_ydata([self.x[i, 1], self.x[j, 1]])
else:
self.paths[i].set_xdata([])
self.paths[i].set_ydata([])
if i != self.attempted_transmissions[i] and self.attempted_transmissions[i] != -1:
# agent chose to attempt transmission
transmit_distances.append(np.linalg.norm(self.x[i, 0:2] - self.x[j, 0:2]))
# agent chooses to communicate with j
j = self.attempted_transmissions[i]
# print(self.successful_transmissions[i][0][0])
if len(self.successful_transmissions[i]) > 0 and j == self.successful_transmissions[i][0]:
# communication linkage is successful - black
self.arrows[i].set_UVC(self.x[j, 0] - self.x[i, 0], self.x[j, 1] - self.x[i, 1])
self.failed_arrows[i].set_UVC(0, 0)
else:
# communication linkage is unsuccessful - red
self.arrows[i].set_UVC(0, 0)
self.failed_arrows[i].set_UVC(self.x[j, 0] - self.x[i, 0], self.x[j, 1] - self.x[i, 1])
else:
# agent chose to not attempt transmission
self.arrows[i].set_UVC(0, 0)
self.failed_arrows[i].set_UVC(0, 0)
cost = self.compute_current_aoi()
if len(transmit_distances) is 0:
self.avg_transmit_distance = 0.0
else:
self.avg_transmit_distance = np.mean(transmit_distances)
mean_hops = self.find_tree_hops()
succ_communication_percent = self.get_successful_communication_percent()
plot_str = 'Mean AoI: {0:2.2f} | Mean Hops: {1:2.2f} | Mean TX Dist: {2:2.2f} | Comm %: {3} | Connected Network: {4}'.format(
cost,
mean_hops,
self.avg_transmit_distance,
succ_communication_percent,
self.network_connected)
self._plot_text.set_text(plot_str)
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if save_plots:
plt.savefig('visuals/ts' + str(int(self.timestep)) + '.png')
def get_successful_communication_percent(self):
count_succ_comm = 0
count_att_comm = 0
for i in range(self.n_agents):
if i != self.attempted_transmissions[i] and self.attempted_transmissions[i] != -1:
# agent chose to attempt transmission
count_att_comm += 1
# agent chooses to communicate with j
j = self.attempted_transmissions[i]
if len(self.successful_transmissions[i]) > 0 and j == self.successful_transmissions[i][0]:
# communication linkage is successful - black
count_succ_comm += 1
if count_att_comm > 0:
succ_communication_percent = round((count_succ_comm / count_att_comm) * 100, 1)
else:
succ_communication_percent = 0.0
return succ_communication_percent
def close(self):
pass
def compute_distances(self):
diff = self.x[:, 0:2].reshape((self.n_agents, 1, 2)) - self.x[:, 0:2].reshape((1, self.n_agents, 2))
dist = np.linalg.norm(diff, axis=2)
np.fill_diagonal(dist, np.PINF)
return dist
def compute_current_aoi(self):
return - np.mean(self.network_buffer[:, :, 0] - self.timestep)
def instant_cost(self): # average time_delay for a piece of information plus comm distance
if self.flocking and not self.aoi_reward:
return np.sum(np.var(self.x[:, 2:4], axis=0))
elif self.is_interference or self.aoi_reward:
return self.compute_current_aoi()
else:
return self.compute_current_aoi()
def interference(self, attempted_transmissions, tx_power):
# converts attempted transmissions list to an adjacency matrix
# 0's - no communication, tx_power on indices that are communicating
# rows are transmitting agent, columns are receiver agents
tx_adj_mat = np.zeros((self.n_agents, self.n_agents)) + np.NINF
tx_adj_mat[np.arange(self.n_agents), attempted_transmissions] = self.power_levels[tx_power.astype(np.int)]
np.fill_diagonal(tx_adj_mat, np.NINF)
successful_tx_power, self.eavesdroppers = self.calculate_sinr(tx_adj_mat)
tx_idx = [np.nonzero(t)[0] for t in np.nan_to_num(successful_tx_power, nan=0.0, neginf=0.0)]
if self.comm_model is "push":
resp_idx = None
else:
resp_adj_mat = np.transpose(np.where(np.not_equal(successful_tx_power, np.NINF), tx_adj_mat, np.NINF))
successful_responses, self.eavesdroppers_response = self.calculate_sinr(resp_adj_mat)
resp_idx = [np.nonzero(t)[0] for t in np.nan_to_num(successful_responses, nan=0.0, neginf=0.0)]
return tx_idx, resp_idx
def calculate_sinr(self, tx_adj_mat_power_db):
# Calculate SINR for each possible transmission
free_space_path_loss = 10 * self.path_loss_exponent * np.log10(self.compute_distances()) + 20 * np.log10(
self.carrier_frequency_ghz * 10 ** 9) - 147.55 # dB
channel_gain =
|
np.power(.1, free_space_path_loss / 10)
|
numpy.power
|
import pandas as pd
import matplotlib.pyplot as plt
import json
import os
import os.path as osp
import numpy as np
import tikzplotlib
std_dev_plot = 0
min_max_plot = 1
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def plot_learning_curves(filename):
resSave = np.load(filename)
colorSafe = 'blue'
colorPerf = 'green'
colorBlend = 'red'
# reward_perf = resSave['rp']
# reward_safe = resSave['rs']
# reward_blend = resSave['rb']
# mp,ms,mb = np.mean(reward_perf,axis=0), np.mean(reward_safe,axis=0),\
# np.mean(reward_blend,axis=0)
# std_mp,std_ms,std_mb = np.std(reward_perf,axis=0), np.std(reward_safe,axis=0),\
# np.std(reward_blend,axis=0)
# cost_perf = resSave['cp']
# cost_safe = resSave['cs']
# cost_blend = resSave['cb']
# cp,cs,cb = np.mean(cost_perf,axis=0), np.mean(cost_safe,axis=0),\
# np.mean(cost_blend,axis=0)
# std_cp,std_cs,std_cb = np.std(cost_perf,axis=0), np.std(cost_safe,axis=0),\
# np.std(cost_blend,axis=0)
window = 10
correct_arm_save = resSave['c_arm']
# correct_arm_save = correct_arm_save
m_ca = np.mean(correct_arm_save, axis=0)
m_ca = moving_average(m_ca, window)
std_m_ca = np.std(correct_arm_save, axis=0)
std_m_ca = moving_average(std_m_ca, window)
# pr_regret_save=resSave['pr']
# m_pr = np.mean(pr_regret_save, axis=0)
# std_m_pr = np.std(pr_regret_save, axis=0)
avg_pr_regret_save=resSave['avg_pr']
m_avg_pr = np.mean(avg_pr_regret_save, axis=0)
std_avg_pr =
|
np.std(avg_pr_regret_save, axis=0)
|
numpy.std
|
#!/usr/bin/env python
from __future__ import print_function
from sim.utils import *
from random_box_map import *
from navi import *
import numpy as np
from scipy import ndimage, interpolate
from collections import OrderedDict
import pdb
import glob
import os
import multiprocessing
import errno
import re
import time
import random
import cv2
from recordtype import recordtype
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
import torchvision
from torchvision import transforms
from torchvision.models.densenet import densenet121, densenet169, densenet201, densenet161
# from logger import Logger
from copy import deepcopy
from networks import policy_A3C
from resnet_pm import resnet18, resnet34, resnet50, resnet101, resnet152
from torchvision.models.resnet import resnet18 as resnet18s
from torchvision.models.resnet import resnet34 as resnet34s
from torchvision.models.resnet import resnet50 as resnet50s
from torchvision.models.resnet import resnet101 as resnet101s
from torchvision.models.resnet import resnet152 as resnet152s
from networks import intrinsic_model
import math
import argparse
from datetime import datetime
from maze import generate_map
import matplotlib.pyplot as plt
import matplotlib.colors as cm
from matplotlib.patches import Wedge
import matplotlib.gridspec as gridspec
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def shift(grid, d, axis=None, fill = 0.5):
grid = np.roll(grid, d, axis=axis)
if axis == 0:
if d > 0:
grid[:d,:] = fill
elif d < 0:
grid[d:,:] = fill
elif axis == 1:
if d > 0:
grid[:,:d] = fill
elif d < 0:
grid[:,d:] = fill
return grid
def softmax(w, t = 1.0):
e = np.exp(np.array(w) / t)
dist = e / np.sum(e)
return dist
def softermax(w, t = 1.0):
w = np.array(w)
w = w - w.min() + np.exp(1)
e = np.log(w)
dist = e / np.sum(e)
return dist
def normalize(x):
if x.min() == x.max():
return 0.0*x
x = x-x.min()
x = x/x.max()
return x
Pose2d = recordtype("Pose2d", "theta x y")
Grid = recordtype("Grid", "head row col")
class Lidar():
def __init__(self, ranges, angle_min, angle_max,
range_min, range_max, noise=0):
# self.ranges = np.clip(ranges, range_min, range_max)
self.ranges = np.array(ranges)
self.angle_min = angle_min
self.angle_max = angle_max
num_data = len(self.ranges)
self.angle_increment = (self.angle_max-self.angle_min)/num_data #math.increment
self.angles_2pi= np.linspace(angle_min, angle_max, len(ranges), endpoint=True) % (2*np.pi)
idx = np.argsort(self.angles_2pi)
self.ranges_2pi = self.ranges[idx]
self.angles_2pi = self.angles_2pi[idx]
class LocalizationNode:
def __init__(self, args):
self.next_action = None
self.skip_to_end = False
self.action_time = 0
self.gtl_time = 0
self.lm_time = 0
self.args = args
self.rl_test = False
self.start_time = time.time()
if (self.args.use_gpu) > 0 and torch.cuda.is_available():
self.device = torch.device("cuda" )
torch.set_default_tensor_type(torch.cuda.FloatTensor)
else:
self.device = torch.device("cpu")
torch.set_default_tensor_type(torch.FloatTensor)
# self.args.n_maze_grids
# self.args.n_local_grids
# self.args.n_lm_grids
self.init_fig = False
self.n_maze_grids = None
self.grid_rows = self.args.n_local_grids #self.args.map_size * self.args.sub_resolution
self.grid_cols = self.args.n_local_grids #self.args.map_size * self.args.sub_resolution
self.grid_dirs = self.args.n_headings
num_dirs = 1
num_classes = self.args.n_lm_grids ** 2 * num_dirs
final_num_classes = num_classes
if self.args.n_pre_classes is not None:
num_classes = self.args.n_pre_classes
else:
num_classes = final_num_classes
if self.args.pm_net == "none":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = None
elif self.args.pm_net == "densenet121":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = densenet121(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 1024
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "densenet169":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = densenet169(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 1664
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "densenet201":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = densenet201(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 1920
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "densenet161":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = densenet161(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 2208
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet18s":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet18s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet34s":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet34s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet50s":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet50s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet101s":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet101s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet152s":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet152s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet18":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet18(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet34":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet34(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet50":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet50(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet101":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet101(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet152":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet152(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features # 2048
else:
raise Exception('pm-net required: resnet or densenet')
if self.args.RL_type == 0:
self.policy_model = policy_A3C(self.args.n_state_grids, 2+self.args.n_state_dirs, num_actions = self.args.num_actions)
elif self.args.RL_type == 1:
self.policy_model = policy_A3C(self.args.n_state_grids, 1+self.args.n_state_dirs, num_actions = self.args.num_actions)
elif self.args.RL_type == 2:
self.policy_model = policy_A3C(self.args.n_state_grids, 2*self.args.n_state_dirs, num_actions = self.args.num_actions, add_raw_map_scan = True)
self.intri_model = intrinsic_model(self.grid_rows)
## D.P. was here ##
if self.args.rl_model == "none":
self.args.rl_model = None
if self.args.pm_model == "none":
self.args.pm_model = None
# load models
if self.args.pm_model is not None:
state_dict = torch.load(self.args.pm_model)
new_state_dict = OrderedDict()
for k,v in state_dict.items():
if 'module.' in k:
name = k[7:]
else:
name = k
new_state_dict[name] = v
self.perceptual_model.load_state_dict(new_state_dict)
print ('perceptual model %s is loaded.'%self.args.pm_model)
if self.args.rl_model is not None:
state_dict = torch.load(self.args.rl_model)
new_state_dict = OrderedDict()
for k,v in state_dict.items():
if 'module.' in k:
name = k[7:]
else:
name = k
new_state_dict[name] = v
self.policy_model.load_state_dict(new_state_dict)
print ('policy model %s is loaded.'%self.args.rl_model)
if self.args.ir_model is not None:
self.intri_model.load_state_dict(torch.load(self.args.ir_model))
print ('intri model %s is loaded.'%self.args.ir_model)
# change n-classes
if self.args.n_pre_classes is not None:
# resize the output layer:
new_num_classes = final_num_classes
if "resnet" in self.args.pm_net:
self.perceptual_model.fc = nn.Linear(self.perceptual_model.fc.in_features, new_num_classes, bias=True)
elif "densenet" in args.pm_net:
num_ftrs = self.perceptual_model.classifier.in_features
self.perceptual_model.classifier = nn.Linear(num_ftrs, new_num_classes)
print ('model: num_classes now changed to', new_num_classes)
# data parallel, multi GPU
# https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html
if self.device==torch.device("cuda") and torch.cuda.device_count()>0:
print ("Use", torch.cuda.device_count(), 'GPUs')
if self.perceptual_model != None:
self.perceptual_model = nn.DataParallel(self.perceptual_model)
self.policy_model = nn.DataParallel(self.policy_model)
self.intri_model = nn.DataParallel(self.intri_model)
else:
print ("Use CPU")
if self.perceptual_model != None:
self.perceptual_model.to(self.device)
self.policy_model.to(self.device)
self.intri_model.to(self.device)
#
if self.perceptual_model != None:
if self.args.update_pm_by == "NONE":
self.perceptual_model.eval()
# self.perceptual_model.train()
else:
self.perceptual_model.train()
if self.args.update_rl:
self.policy_model.train()
else:
self.policy_model.eval()
self.min_scan_range, self.max_scan_range = self.args.scan_range #[0.1, 3.5]
self.prob=np.zeros((1,3))
self.values = []
self.log_probs = []
self.manhattans = []
self.xyerrs = []
self.manhattan = 0
self.rewards = []
self.intri_rewards = []
self.reward = 0
self.entropies = []
self.gamma = 0.99
self.tau = 0.95 #Are we sure?
self.entropy_coef = self.args.c_entropy
if self.args.update_pm_by == "NONE":
self.optimizer_pm = None
else:
self.optimizer_pm = torch.optim.Adam(list(self.perceptual_model.parameters()), lr=self.args.lrpm)
if self.args.schedule_pm:
self.scheduler_pm = StepLR(self.optimizer_pm, step_size=self.args.pm_step_size, gamma=self.args.pm_decay)
# self.scheduler_lp = ReduceLROnPlateau(self.optimizer_pm,
# factor = 0.5,
# patience = 2,
# verbose = True)
models = []
if self.args.update_pm_by=="RL" or self.args.update_pm_by=="BOTH":
models = models + list(self.perceptual_model.parameters())
if self.args.update_rl:
models = models + list(self.policy_model.parameters())
if self.args.update_ir:
models = models + list(self.intri_model.parameters())
if models==[]:
self.optimizer = None
print("WARNING: no model for RL")
else:
self.optimizer = torch.optim.Adam(models, lr=self.args.lrrl)
if self.args.schedule_rl:
self.scheduler_rl = StepLR(self.optimizer, step_size=self.args.rl_step_size, gamma=self.args.rl_decay)
self.pm_backprop_cnt = 0
self.rl_backprop_cnt = 0
self.step_count = 0
self.step_max = self.args.num[2]
self.episode_count = 0
self.acc_epi_cnt = 0
self.episode_max = self.args.num[1]
self.env_count = 0
self.env_max = self.args.num[0]
self.env_count = 0
self.next_bin = 0
self.done = False
if self.args.verbose>0:
print('maps, episodes, steps = %d, %d, %d'%(self.args.num[0], self.args.num[1], self.args.num[2]))
self.cx = torch.zeros(1,256) #Variable(torch.zeros(1, 256))
self.hx = torch.zeros(1,256) #Variable(torch.zeros(1, 256))
self.max_grad_norm = 40
map_side_len = 224 * self.args.map_pixel
self.xlim = (-0.5*map_side_len, 0.5*map_side_len)
self.ylim = (-0.5*map_side_len, 0.5*map_side_len)
self.xlim = np.array(self.xlim)
self.ylim = np.array(self.ylim)
self.map_width_meter = map_side_len
# decide maze grids for each env
# if self.args.maze_grids_range[0] == None:
# pass
# else:
# self.n_maze_grids = np.random.randint(self.args.maze_grids_range[0],self.args.maze_grids_range[1])
# self.hall_width = self.map_width_meter/self.n_maze_grids
# if self.args.thickness == None:
# self.obs_radius = 0.25*self.hall_width
# else:
# self.obs_radius = 0.5*self.args.thickness * self.hall_width
self.collision_radius = self.args.collision_radius #0.25 # robot radius for collision
self.longest = float(self.grid_dirs/2 + self.grid_rows-1 + self.grid_cols-1) #longest possible manhattan distance
self.cell_size = (self.xlim[1]-self.xlim[0])/self.grid_rows
self.heading_resol = 2*np.pi/self.grid_dirs
self.fwd_step_meters = self.cell_size*self.args.fwd_step
self.collision = False
self.collision_attempt = 0
self.sigma_xy = self.args.sigma_xy # self.cell_size * 0.05
self.cr_pixels = int(np.ceil(self.collision_radius / self.args.map_pixel))
self.front_margin_pixels = int(np.ceil((self.collision_radius+self.fwd_step_meters) / self.args.map_pixel)) # how many pixels robot moves forward per step.
self.side_margin_pixels = int(np.ceil(self.collision_radius / self.args.map_pixel))
self.scans_over_map = np.zeros((self.grid_rows,self.grid_cols,360))
self.scan_2d_low_tensor = torch.zeros((1,self.args.n_state_grids, self.args.n_state_grids),device=torch.device(self.device))
self.map_for_LM = np.zeros((self.map_rows, self.map_cols))
self.map_for_pose = np.zeros((self.grid_rows, self.grid_cols),dtype='float')
self.map_for_RL = torch.zeros((1,self.args.n_state_grids, self.args.n_state_grids),device=torch.device(self.device))
self.data_cnt = 0
self.explored_space = np.zeros((self.grid_dirs,self.grid_rows, self.grid_cols),dtype='float')
self.new_pose = False
self.new_bel = False
self.bel_list = []
self.scan_list = []
self.target_list = []
self.likelihood = torch.ones((self.grid_dirs,self.grid_rows, self.grid_cols),
device=torch.device(self.device),
dtype=torch.float)
self.likelihood = self.likelihood / self.likelihood.sum()
self.gt_likelihood = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols))
self.gt_likelihood_unnormalized = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols))
self.belief = torch.ones((self.grid_dirs,self.grid_rows, self.grid_cols),device=torch.device(self.device))
self.belief = self.belief / self.belief.sum()
self.bel_ent = (self.belief * torch.log(self.belief)).sum().detach()
# self.bel_ent = np.log(1.0/(self.grid_dirs*self.grid_rows*self.grid_cols))
self.loss_likelihood = [] # loss for training PM model
self.loss_ll=0
self.loss_policy = 0
self.loss_value = 0
self.turtle_loc = np.zeros((self.map_rows,self.map_cols))
self.policy_out = None
self.value_out = None
self.action_idx = -1
self.action_from_policy = -1
# what to do
# current pose: where the robot really is. motion incurs errors in pose
self.current_pose = Pose2d(0,0,0)
self.goal_pose = Pose2d(0,0,0)
self.last_pose = Pose2d(0,0,0)
self.perturbed_goal_pose = Pose2d(0,0,0)
self.start_pose = Pose2d(0,0,0)
self.collision_pose = Pose2d(0,0,0)
self.believed_pose = Pose2d(0,0,0)
#grid pose
self.true_grid = Grid(head=0,row=0,col=0)
self.bel_grid = Grid(head=0,row=0,col=0)
self.collision_grid = Grid(head=0,row=0,col=0)
self.action_space = list(("turn_left", "turn_right", "go_fwd", "hold"))
self.action_str = 'none'
self.current_state = "new_env_pose"
self.obj_act = None
self.obj_rew = None
self.obj_err = None
self.obj_map = None
self.obj_robot = None
self.obj_path = None
self.obj_heading = None
self.obj_robot_bel = None
self.obj_heading_bel = None
self.obj_pose = None
self.obj_scan = None
self.obj_gtl = None
self.obj_lik = None
self.obj_bel = None
self.obj_bel_dist = None
self.obj_gtl_dist = None
self.obj_lik_dist = None
self.obj_collision = None
if self.args.save:
home=os.environ['HOME']
str_date_time = datetime.now().strftime('%Y%m%d-%H%M%S')
# 1. try create /logs/YYMMDD-HHMMSS-00
# 2. if exist create /logs/YYMMDD-HHMMSS-01, and so on
i = 0
dir_made=False
while dir_made==False:
self.log_dir = os.path.join(self.args.save_loc, str_date_time+'-%02d'%i)
try:
os.mkdir(self.log_dir)
dir_made=True
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
i=i+1
if self.args.verbose > 0:
print ('new directory %s'%self.log_dir)
self.param_filepath = os.path.join(self.log_dir, 'param.txt')
with open(self.param_filepath,'w+') as param_file:
for arg in vars(self.args):
param_file.write('<%s=%s> '%(arg, getattr(self.args, arg)))
if self.args.verbose > -1:
print ('parameters saved at %s'%self.param_filepath)
self.log_filepath = os.path.join(self.log_dir, 'log.txt')
self.rollout_list = os.path.join(self.log_dir, 'rollout_list.txt')
self.pm_filepath = os.path.join(self.log_dir, 'perceptual.model')
self.rl_filepath = os.path.join(self.log_dir, 'rl.model')
self.ir_filepath = os.path.join(self.log_dir, 'ir.model')
self.data_path = os.path.join(self.log_dir, 'data')
self.fig_path = os.path.join(self.log_dir, 'figures')
# if self.args.save_data:
try:
os.mkdir(self.data_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
if self.args.figure:
try:
os.mkdir(self.fig_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
#end of init
def loop(self):
if self.current_state == "new_env_pose":
### place objects in the env
self.clear_objects()
if self.args.load_map == None or self.args.load_map == "maze":
self.set_maze_grid()
self.set_walls()
elif self.args.load_map == 'randombox':
self.random_box()
else:
self.read_map()
self.map_for_LM = fill_outer_rim(self.map_for_LM, self.map_rows, self.map_cols)
if self.args.distort_map:
self.map_for_LM = distort_map(self.map_for_LM, self.map_rows, self.map_cols)
self.make_low_dim_maps()
if self.args.gtl_off == False:
self.get_synth_scan_mp(self.scans_over_map, map_img=self.map_for_LM, xlim=self.xlim, ylim=self.ylim) # generate synthetic scan data over the map (and directions)
self.reset_explored()
if self.args.init_pose is not None:
placed = self.set_init_pose()
else:
placed = self.place_turtle()
if placed:
self.current_state = "update_likelihood"
else:
print ("place turtle failed. trying a new map")
return
if self.args.figure==True:
self.update_figure(newmap=True)
elif self.current_state == "new_pose":
self.reset_explored()
if self.args.init_pose is not None:
placed = self.set_init_pose()
else:
placed = self.place_turtle()
self.current_state = "update_likelihood"
elif self.current_state == "update_likelihood":
self.get_lidar()
self.update_explored()
if self.step_count == 0:
self.save_roll_out = self.args.save & np.random.choice([False, True], p=[1.0-self.args.prob_roll_out, self.args.prob_roll_out])
if self.save_roll_out:
#save roll-out for next episode.
self.roll_out_filepath = os.path.join(self.log_dir, 'roll-out-%03d-%03d.txt'%(self.env_count,self.episode_count))
print ('roll-out saving: %s'%self.roll_out_filepath)
self.scan_2d, self.scan_2d_low = self.get_scan_2d_n_headings(self.scan_data, self.xlim, self.ylim)
self.slide_scan()
### 2. update likelihood from observation
time_mark = time.time()
self.compute_gtl(self.scans_over_map)
self.gtl_time = time.time()-time_mark
print ("[TIME for GTL] %.2f sec"%(time.time()-time_mark))
if self.args.generate_data: # end the episode ... (no need for measurement/motion model)
self.generate_data()
if self.args.figure:
self.update_figure()
plt.pause(1e-4)
self.next_step()
return
self.likelihood = self.update_likelihood_rotate(self.map_for_LM, self.scan_2d)
if self.args.mask:
self.mask_likelihood()
# self.likelihood.register_hook(print)
### z(t) = like x belief
### z(t) = like x belief
# if self.collision == False:
self.product_belief()
### reward r(t)
self.update_bel_list()
self.get_reward()
### action a(t) given s(t) = (z(t)|Map)
if self.args.verbose>0:
self.report_status(end_episode=False)
if self.save_roll_out:
self.collect_data()
if self.args.figure:
self.update_figure()
if self.step_count >= self.step_max-1:
self.run_action_module(no_update_fig=True)
self.skip_to_end = True
else:
self.run_action_module()
if self.skip_to_end:
self.skip_to_end = False
self.next_ep()
return
### environment: set target
self.update_target_pose()
# do the rest: ation, trans-belief, update gt
self.collision_check()
self.execute_action_teleport()
### environment: change belief z_hat(t+1)
self.transit_belief()
### increase time step
# self.update_current_pose()
if self.collision == False:
self.update_true_grid()
self.next_step()
return
else:
print("undefined state name %s"%self.current_state)
self.current_state = None
exit()
return
def get_statistics(self, dis, name):
DIRS = 'NWSE'
this=[]
for i in range(self.grid_dirs):
# this.append('%s(%s%1.3f,%s%1.3f,%s%1.3f%s)'\
# %(DIRS[i], bcolors.WARNING,100*dis[i,:,:].max(),
# bcolors.OKGREEN,100*dis[i,:,:].median(),
# bcolors.FAIL,100*dis[i,:,:].min(),bcolors.ENDC))
this.append(' %s(%1.2f,%1.2f,%1.2f)'\
%(DIRS[i], 100*dis[i,:,:].max(),
100*dis[i,:,:].median(),
100*dis[i,:,:].min()))
return name+':%19s|%23s|%23s|%23s|'%tuple(this[th] for th in range(self.grid_dirs))
def circular_placement(self, x, n):
width = x.shape[2]
height = x.shape[1]
N = (n//2+1)*max(width,height)
img = np.zeros((N,N))
for i in range(n):
if i < n//4:
origin = (i, (n//4-i))
elif i < 2*n//4:
origin = (i, (i-n//4))
elif i < 3*n//4:
origin = (n-i, (i-n//4))
else:
origin = (n-i, n+n//4-i)
ox = origin[0]*height
oy = origin[1]*width
img[ox:ox+height, oy:oy+width] = x[i,:,:]
return img
# def square_clock(self, x, n):
# width = x.shape[2]
# height = x.shape[1]
# quater = n//4-1
# #even/odd
# even = 1 - quater % 2
# side = quater+2+even
# N = side*max(width,height)
# img = np.zeros((N,N))
# for i in range(n):
# s = (i+n//8)%n
# if s < n//4:
# org = (0, n//4-s)
# elif s < n//2:
# org = (s-n//4+even, 0)
# elif s < 3*n//4:
# org = (n//4+even, s-n//2+even)
# else:
# org = (n//4-(s-3*n//4), n//4+even)
# ox = org[0]*height
# oy = org[1]*width
# img[ox:ox+height, oy:oy+width] = x[i,:,:]
# del x
# return img, side
def draw_compass(self, ax):
cx = 0.9 * self.xlim[1]
cy = 0.9 * self.ylim[0]
lengthNS = self.xlim[1] * 0.1
lengthEW = self.ylim[1] * 0.075
theta = - self.current_pose.theta
Nx = cx + lengthNS * np.cos(theta)
Ny = cy + lengthNS* np.sin(theta)
Sx = cx + lengthNS * np.cos(theta+np.pi)
Sy = cy + lengthNS * np.sin(theta+np.pi)
Ni = to_index(Nx, self.map_rows, self.xlim)
Nj = to_index(Ny, self.map_cols, self.ylim)
Si = to_index(Sx, self.map_rows, self.xlim)
Sj = to_index(Sy, self.map_cols, self.ylim)
Ex = cx + lengthEW * np.cos(theta-np.pi/2)
Ey = cy + lengthEW * np.sin(theta-np.pi/2)
Wx = cx + lengthEW * np.cos(theta+np.pi/2)
Wy = cy + lengthEW * np.sin(theta+np.pi/2)
Ei = to_index(Ex, self.map_rows, self.xlim)
Ej = to_index(Ey, self.map_cols, self.ylim)
Wi = to_index(Wx, self.map_rows, self.xlim)
Wj = to_index(Wy, self.map_cols, self.ylim)
xdata = Sj, Nj, Wj, Ej
ydata = Si, Ni, Wi, Ei
if hasattr(self, 'obj_compass1'):
self.obj_compass1.update({'xdata':xdata, 'ydata':ydata})
else:
self.obj_compass1, = ax.plot(xdata, ydata, 'r', alpha = 0.5)
def draw_center(self, ax):
x = to_index(0, self.map_rows, self.xlim)
y = to_index(0, self.map_cols, self.ylim)
# radius = self.map_rows*0.4/self.grid_rows
radius = self.cr_pixels # self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows
theta = 0-np.pi/2
xdata = y, y+radius*3*np.cos(theta)
ydata = x, x+radius*3*np.sin(theta)
obj_robot = Wedge((y,x), radius, 0, 360, color='r',alpha=0.5)
obj_heading, = ax.plot(xdata, ydata, 'r', alpha=0.5)
ax.add_artist(obj_robot)
def draw_collision(self, ax, collision):
if collision == False:
if self.obj_collision == None:
return
else:
self.obj_collision.update({'visible':False})
else:
x = to_index(self.collision_pose.x, self.map_rows, self.xlim)
y = to_index(self.collision_pose.y, self.map_cols, self.ylim)
radius = self.cr_pixels #self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows
if self.obj_collision == None:
self.obj_collision = Wedge((y,x), radius, 0, 360, color='y',alpha=0.5, visible=True)
ax.add_artist(self.obj_collision)
else:
self.obj_collision.update({'center': [y,x], 'visible':True})
# self.obj_robot.set_data(self.turtle_loc)
# plt.pause(0.01)
def draw_robot(self, ax):
x = to_index(self.current_pose.x, self.map_rows, self.xlim)
y = to_index(self.current_pose.y, self.map_cols, self.ylim)
# radius = self.map_rows*0.4/self.grid_rows
radius = self.cr_pixels # self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows
theta = -self.current_pose.theta-np.pi/2
xdata = y, y+radius*3*np.cos(theta)
ydata = x, x+radius*3*np.sin(theta)
if self.obj_robot == None:
#self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.binary)
# self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.Reds,interpolation='nearest')
self.obj_robot = Wedge((y,x), radius, 0, 360, color='r',alpha=0.5)
self.obj_heading, = ax.plot(xdata, ydata, 'r', alpha=0.5)
ax.add_artist(self.obj_robot)
else:
self.obj_robot.update({'center': [y,x]})
self.obj_heading.update({'xdata':xdata, 'ydata':ydata})
# self.obj_robot.set_data(self.turtle_loc)
# plt.pause(0.01)
def update_believed_pose(self):
o_bel,i_bel,j_bel = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape)
x_bel = to_real(i_bel, self.xlim,self.grid_rows)
y_bel = to_real(j_bel, self.ylim,self.grid_cols)
theta = o_bel * self.heading_resol
self.believed_pose.x = x_bel
self.believed_pose.y = y_bel
self.believed_pose.theta = theta
def draw_bel(self, ax):
o_bel,i_bel,j_bel = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape)
x_bel = to_real(i_bel, self.xlim,self.grid_rows)
y_bel = to_real(j_bel, self.ylim,self.grid_cols)
x = to_index(x_bel, self.map_rows, self.xlim)
y = to_index(y_bel, self.map_cols, self.ylim)
# radius = self.map_rows*0.4/self.grid_rows
radius = self.cr_pixels # self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows
theta = o_bel * self.heading_resol
theta = -theta-np.pi/2
xdata = y, y+radius*3*np.cos(theta)
ydata = x, x+radius*3*np.sin(theta)
if self.obj_robot_bel == None:
#self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.binary)
# self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.Reds,interpolation='nearest')
self.obj_robot_bel = Wedge((y,x), radius*0.95, 0, 360, color='b',alpha=0.5)
self.obj_heading_bel, = ax.plot(xdata, ydata, 'b', alpha=0.5)
ax.add_artist(self.obj_robot_bel)
else:
self.obj_robot_bel.update({'center': [y,x]})
self.obj_heading_bel.update({'xdata':xdata, 'ydata':ydata})
def draw_path(self, ax, path):
xy = [grid_cell_to_map_cell(via.x, via.y, self.grid_rows, self.map_rows) for via in path]
x = [ elem[1] for elem in xy]
y = [ elem[0] for elem in xy]
print (x, y)
if self.obj_path == None:
self.obj_path, = ax.plot(x, y, 'g:', alpha=0.5)
self.obj_goal, = ax.plot(x[-1], y[-1], 'r*', alpha=0.5)
else:
self.obj_path.set_xdata(x)
self.obj_path.set_ydata(y)
self.obj_goal.set_xdata(x[-1])
self.obj_goal.set_ydata(y[-1])
def init_figure(self):
self.init_fig = True
if self.args.figure == True:# and self.obj_fig==None:
self.obj_fig = plt.figure(figsize=(16,12))
plt.set_cmap('viridis')
self.gridspec = gridspec.GridSpec(3,5)
self.ax_map = plt.subplot(self.gridspec[0,0])
self.ax_scan = plt.subplot(self.gridspec[1,0])
self.ax_pose = plt.subplot(self.gridspec[2,0])
self.ax_bel = plt.subplot(self.gridspec[0,1])
self.ax_lik = plt.subplot(self.gridspec[1,1])
self.ax_gtl = plt.subplot(self.gridspec[2,1])
self.ax_pbel = plt.subplot(self.gridspec[0,2:4])
self.ax_plik = plt.subplot(self.gridspec[1,2:4])
self.ax_pgtl = plt.subplot(self.gridspec[2,2:4])
self.ax_act = plt.subplot(self.gridspec[0,4])
self.ax_rew = plt.subplot(self.gridspec[1,4])
self.ax_err = plt.subplot(self.gridspec[2,4])
plt.subplots_adjust(hspace = 0.4, wspace=0.4, top=0.95, bottom=0.05)
def update_figure(self, newmap=False):
if self.init_fig==False:
self.init_figure()
if newmap:
ax=self.ax_map
if self.obj_map == None:
# self.ax_map = ax
self.obj_map = ax.imshow(self.map_for_LM, cmap=plt.cm.binary,interpolation='nearest')
ax.grid()
ticks = np.linspace(0,self.map_rows,self.grid_rows,endpoint=False)
ax.set_yticks(ticks)
ax.set_xticks(ticks)
ax.tick_params(axis='y', labelleft='off')
ax.tick_params(axis='x', labelbottom='off')
ax.tick_params(bottom="off", left="off")
else:
self.obj_map.set_data(self.map_for_LM)
self.draw_robot(ax)
return
ax=self.ax_map
self.draw_robot(ax)
self.draw_bel(ax)
self.draw_collision(ax, self.collision)
ax=self.ax_scan
if self.obj_scan == None:
self.obj_scan = ax.imshow(self.scan_2d[0,:,:], cmap = plt.cm.binary,interpolation='gaussian')
self.obj_scan_slide = ax.imshow(self.scan_2d_slide[:,:], cmap = plt.cm.Blues,interpolation='gaussian', alpha=0.5)
# self.obj_scan_low = ax.imshow(cv2.resize(1.0*self.scan_2d_low[:,:], (self.map_rows, self.map_cols), interpolation=cv2.INTER_NEAREST), cmap = plt.cm.binary,interpolation='nearest', alpha=0.5)
self.draw_center(ax)
self.draw_compass(ax)
ax.set_title('LiDAR Scan')
else:
self.obj_scan.set_data(self.scan_2d[0,:,:])
# self.obj_scan_low.set_data(cv2.resize(1.0*self.scan_2d_low[:,:], (self.map_rows, self.map_cols), interpolation=cv2.INTER_NEAREST))
self.obj_scan_slide.set_data(self.scan_2d_slide[:,:])
self.draw_compass(ax)
ax=self.ax_pose
self.update_pose_plot(ax)
## GTL ##
if self.args.gtl_off:
pass
else:
ax=self.ax_gtl
self.update_gtl_plot(ax)
## BELIEF ##
ax=self.ax_bel
self.update_belief_plot(ax)
## LIKELIHOOD ##
ax=self.ax_lik
self.update_likely_plot(ax)
ax=self.ax_pbel
self.update_bel_dist(ax)
ax=self.ax_pgtl
self.update_gtl_dist(ax)
ax=self.ax_plik
self.update_lik_dist(ax)
# show last step, and save
if self.step_count >= self.step_max-1:
self.ax_map.set_title('action(%d):%s'%(self.step_count,""))
# self.prob = np.array([0,0,0])
# self.action_from_policy=-1
self.clear_act_dist(self.ax_act)
act_lttr=['L','R','F','-']
self.obj_rew= self.update_list(self.ax_rew,self.rewards,self.obj_rew,"Reward", text=act_lttr[self.action_idx])
self.obj_err = self.update_list(self.ax_err,self.xyerrs,self.obj_err,"Error")
plt.pause(1e-4)
self.save_figure()
def save_figure(self):
if self.args.save and self.acc_epi_cnt % self.args.figure_save_freq == 0:
figname=os.path.join(self.fig_path,'%03d-%03d-%03d.png'%(self.env_count,
self.episode_count,
self.step_count))
plt.savefig(figname)
if self.args.verbose > 1:
print (figname)
def update_pose_plot(self, ax):
pose = np.zeros((self.grid_rows,self.grid_cols,3))
pose[:,:,0] = 1-self.map_for_pose
pose[:,:,1] = 1-self.map_for_pose
pose[:,:,2] = 1-self.map_for_pose
if (pose[self.true_grid.row, self.true_grid.col,:] == [0, 0, 0]).all():
pose[self.true_grid.row, self.true_grid.col, :] = [0.5, 0, 0]
# pose[self.true_grid.row, self.true_grid.col, 2] = [0.5, 0, 0]
elif (pose[self.true_grid.row, self.true_grid.col,:] == [1, 1, 1]).all():
pose[self.true_grid.row, self.true_grid.col, :] = [1.0, 0, 0]
if (pose[self.bel_grid.row, self.bel_grid.col, :] == [0,0,0]).all():
pose[self.bel_grid.row, self.bel_grid.col, :] = [0,0,0.5]
elif (pose[self.bel_grid.row, self.bel_grid.col, :] == [1,1,1]).all():
pose[self.bel_grid.row, self.bel_grid.col, :] = [0,0,1]
elif (pose[self.bel_grid.row, self.bel_grid.col, :] == [1,0,0]).all():
pose[self.bel_grid.row, self.bel_grid.col, :] = [.5,0,.5]
elif (pose[self.bel_grid.row, self.bel_grid.col, :] == [0.5,0,0]).all():
pose[self.bel_grid.row, self.bel_grid.col, :] = [0.25,0,0.25]
if self.collision:
pose[min(self.grid_rows-1, max(0, self.collision_grid.row)), min(self.grid_cols-1, max(0, self.collision_grid.col)),:] = [0.5, 0.5, 0]
if self.obj_pose == None:
self.obj_pose = ax.imshow(pose, cmap = plt.cm.binary,interpolation='nearest')
ax.grid()
ax.set_yticks(np.arange(0,self.grid_rows)-0.5)
ax.set_xticks(np.arange(0,self.grid_cols)-0.5)
ax.tick_params(axis='y', labelleft='off')
ax.tick_params(axis='x', labelbottom='off')
ax.tick_params(bottom="off", left="off")
ax.set_title("Occupancy Grid")
else:
self.obj_pose.set_data(pose)
def update_likely_plot(self,ax):
lik = self.likelihood.cpu().detach().numpy()
# if lik.min() == lik.max():
# lik *= 0
# lik -= lik.min()
# lik /= lik.max()
lik, side = square_clock(lik, self.grid_dirs)
# lik=self.circular_placement(lik, self.grid_dirs)
# lik = lik.reshape(self.grid_rows*self.grid_dirs,self.grid_cols)
# lik = np.swapaxes(lik,0,1)
# lik = lik.reshape(self.grid_rows, self.grid_dirs*self.grid_cols)
# lik = np.concatenate((lik[0,:,:],lik[1,:,:],lik[2,:,:],lik[3,:,:]), axis=1)
if self.obj_lik == None:
self.obj_lik = ax.imshow(lik,interpolation='nearest')
ax.grid()
ticks = np.linspace(0,self.grid_rows*side, side,endpoint=False)-0.5
ax.set_yticks(ticks)
ax.set_xticks(ticks)
ax.tick_params(axis='y', labelleft='off')
ax.tick_params(axis='x', labelbottom='off')
ax.tick_params(bottom="off", left="off")
ax.set_title('Likelihood from NN')
else:
self.obj_lik.set_data(lik)
self.obj_lik.set_norm(norm = cm.Normalize().autoscale(lik))
def update_act_dist(self, ax):
y = self.prob.flatten()
if self.obj_act == None:
x = range(y.size)
self.obj_act = ax.bar(x,y)
ax.set_ylim([0, 1.1])
ax.set_title("Action PDF")
ax.set_xticks(np.array([0,1,2]))
ax.set_xticklabels(('L','R','F'))
self.obj_act_act = None
else:
for bar,a in zip(self.obj_act, y):
bar.set_height(a)
if self.obj_act_act == None :
if self.action_from_policy is not -1:
z = y[min(self.action_from_policy,2)]
self.obj_act_act = ax.text(self.action_from_policy, z, '*')
else:
if self.action_from_policy is not -1:
z = y[min(self.action_from_policy,2)]
self.obj_act_act.set_position((self.action_from_policy, z))
def clear_act_dist(self, ax):
ax.clear()
if self.obj_act==None:
pass
else:
self.obj_act = None
if self.obj_act_act == None:
pass
else:
self.obj_act_act = None
def update_list(self,ax,y,obj,title, text=None):
# y = self.rewards
x = range(len(y))
if obj == None:
obj, = ax.plot(x,y,'.-')
ax.set_title(title)
else:
obj.set_ydata(y)
obj.set_xdata(x)
if text is not None:
ax.text(x[-1],y[-1], text)
# recompute the ax.dataLim
ax.relim()
# update ax.viewLim using the new dataLim
ax.autoscale_view()
return obj
def update_bel_dist(self,ax):
y = (self.belief.cpu().detach().numpy().flatten())
gt = np.zeros_like(self.belief.cpu().detach().numpy())
gt[self.true_grid.head, self.true_grid.row, self.true_grid.col] = 1
gt = gt.flatten()
gt_x = np.argmax(gt)
if self.obj_bel_dist == None:
x = range(y.size)
self.obj_bel_dist, = ax.plot(x,y,'.')
self.obj_bel_max, = ax.plot(np.argmax(y), np.max(y), 'x', color='r', label='bel')
self.obj_gt_bel, = ax.plot(gt_x, y[gt_x], '^', color='r', label='gt')
ax.legend()
self.obj_bel_val = ax.text(np.argmax(y), np.max(y), "%f"%np.max(y))
ax.set_ylim([0, y.max()*2])
# ax.set_ylabel('Belief')
# ax.set_xlabel('Pose')
ax.set_title("Belief")
else:
self.obj_bel_dist.set_ydata(y)
self.obj_bel_max.set_xdata(np.argmax(y))
self.obj_bel_max.set_ydata(np.max(y))
self.obj_gt_bel.set_xdata(gt_x)
self.obj_gt_bel.set_ydata(y[gt_x])
self.obj_bel_val.set_position((np.argmax(y), np.max(y)))
self.obj_bel_val.set_text("%f"%np.max(y))
ax.set_ylim([0, y.max()*2])
def update_gtl_dist(self,ax):
# y = (self.gt_likelihood.cpu().detach().numpy().flatten())
y = self.gt_likelihood.flatten()
if self.obj_gtl_dist == None:
x = range(y.size)
self.obj_gtl_dist, = ax.plot(x,y,'.')
self.obj_gtl_max, = ax.plot(np.argmax(y), np.max(y), 'rx')
ax.set_ylim([0, y.max()*2])
# ax.set_ylabel('GTL')
# ax.set_xlabel('Pose')
ax.set_title("GTL")
else:
self.obj_gtl_dist.set_ydata(y)
self.obj_gtl_max.set_ydata(np.max(y))
self.obj_gtl_max.set_xdata(np.argmax(y))
ax.set_ylim([0, y.max()*2])
def update_lik_dist(self,ax):
y = (self.likelihood.cpu().detach().numpy().flatten())
if self.obj_lik_dist == None:
x = range(y.size)
self.obj_lik_dist, = ax.plot(x,y,'.')
self.obj_lik_max, = ax.plot(np.argmax(y), np.max(y), 'rx')
ax.set_ylim([0, y.max()*2])
# ax.set_ylabel('Likelihood')
# ax.set_xlabel('Pose')
ax.set_title("Likelihood")
else:
self.obj_lik_dist.set_ydata(y)
self.obj_lik_max.set_ydata(np.max(y))
self.obj_lik_max.set_xdata(np.argmax(y))
ax.set_ylim([0, y.max()*2])
def update_belief_plot(self,ax):
bel = self.belief.cpu().detach().numpy()
# if bel.min() == bel.max():
# bel *= 0
# bel -= bel.min()
# bel /= bel.max()
bel,side = square_clock(bel, self.grid_dirs)
#bel=self.circular_placement(bel, self.grid_dirs)
# bel = bel.reshape(self.grid_rows*self.grid_dirs,self.grid_cols)
# bel = np.swapaxes(bel,0,1)
# bel = bel.reshape(self.grid_rows,self.grid_dirs*self.grid_cols)
# bel = np.concatenate((bel[0,:,:],bel[1,:,:],bel[2,:,:],bel[3,:,:]), axis=1)
if self.obj_bel == None:
self.obj_bel = ax.imshow(bel,interpolation='nearest')
ax.grid()
ticks = np.linspace(0,self.grid_rows*side, side,endpoint=False)-0.5
ax.set_yticks(ticks)
ax.set_xticks(ticks)
ax.tick_params(axis='y', labelleft='off')
ax.tick_params(axis='x', labelbottom='off')
ax.tick_params(bottom="off", left="off")
ax.set_title('Belief (%.3f)'%self.belief.cpu().detach().numpy().max())
else:
self.obj_bel.set_data(bel)
ax.set_title('Belief (%.3f)'%self.belief.cpu().detach().numpy().max())
self.obj_bel.set_norm(norm = cm.Normalize().autoscale(bel))
def update_gtl_plot(self,ax):
# gtl = self.gt_likelihood.cpu().detach().numpy()
gtl = self.gt_likelihood
gtl, side = square_clock(gtl, self.grid_dirs)
if self.obj_gtl == None:
self.obj_gtl = ax.imshow(gtl,interpolation='nearest')
ax.grid()
ticks = np.linspace(0,self.grid_rows*side, side,endpoint=False)-0.5
ax.set_yticks(ticks)
ax.set_xticks(ticks)
ax.tick_params(axis='y', labelleft='off')
ax.tick_params(axis='x', labelbottom='off')
ax.tick_params(bottom="off", left="off")
ax.set_title('Target Likelihood')
else:
self.obj_gtl.set_data(gtl)
self.obj_gtl.set_norm(norm = cm.Normalize().autoscale(gtl))
def report_status(self,end_episode=False):
if end_episode:
reward = sum(self.rewards)
loss = self.loss_ll #sum(self.loss_likelihood)
dist = sum(self.manhattans)
else:
reward = self.rewards[-1]
loss = self.loss_ll
dist = self.manhattan
eucl = self.get_euclidean()
if self.optimizer == None:
lr_rl = 0
else:
lr_rl = self.optimizer.param_groups[0]['lr']
if self.optimizer_pm == None:
lr_pm = 0
else:
lr_pm = self.optimizer_pm.param_groups[0]['lr']
if self.args.save:
with open(self.log_filepath,'a') as flog:
flog.write('%d %d %d %f %f %f %f %f %f %f %f %e %e %f %f %f %f\n'%(self.env_count, self.episode_count,self.step_count,
loss, dist, reward,
self.loss_policy, self.loss_value,
self.prob[0,0],self.prob[0,1],self.prob[0,2],
lr_rl,
lr_pm,
eucl,
self.action_time,
self.gtl_time,
self.lm_time
))
print('%d %d %d %f %f %f %f %f %f %f %f %e %e %f %f %f %f'%(self.env_count, self.episode_count,self.step_count,
loss, dist, reward,
self.loss_policy, self.loss_value,
self.prob[0,0],self.prob[0,1],self.prob[0,2],
lr_rl,
lr_pm,
eucl,
self.action_time,
self.gtl_time,
self.lm_time
))
def process_link_state(self, pose):
return np.array([
pose.position.x,
pose.position.y,
pose.position.z,
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
pose.orientation.w
])
def process_model_state(self, pose):
return np.array([
pose.position.x,
pose.position.y,
pose.position.z,
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
pose.orientation.w
])
def update_current_pose_from_gazebo(self):
rospy.wait_for_service('/gazebo/get_model_state')
loc = self.get_model_state(self.robot_model_name,'')
qtn=loc.pose.orientation
roll,pitch,yaw=quaternion_to_euler_angle(qtn.w, qtn.x, qtn.y, qtn.z)
self.current_pose = Pose2d(theta=yaw, x=loc.pose.position.x, y=loc.pose.position.y)
def update_current_pose_from_robot(self):
self.current_pose.x = self.live_pose.x
self.current_pose.y = self.live_pose.y
self.current_pose.theta = self.live_pose.theta
def update_true_grid(self):
self.true_grid.row=to_index(self.current_pose.x, self.grid_rows, self.xlim)
self.true_grid.col=to_index(self.current_pose.y, self.grid_cols, self.ylim)
heading = self.current_pose.theta
self.true_grid.head = self.grid_dirs * wrap(heading + np.pi/self.grid_dirs) / 2.0 / np.pi
self.true_grid.head = int(self.true_grid.head % self.grid_dirs)
def teleport_turtle(self):
if self.args.verbose>1: print("inside turtle teleportation")
# if self.args.perturb > 0:
self.current_pose.x = self.perturbed_goal_pose.x
self.current_pose.y = self.perturbed_goal_pose.y
self.current_pose.theta = self.perturbed_goal_pose.theta
# pose = self.turtle_pose_msg
# twist = self.turtle_twist_msg
# msg = ModelState()
# msg.model_name = self.robot_model_name
# msg.pose = pose
# msg.twist = twist
# if self.args.verbose > 1:
# print("teleport target = %f,%f"%(msg.pose.position.x, msg.pose.position.y))
# rospy.wait_for_service('/gazebo/set_model_state')
# resp = self.set_model_state(msg)
# while True:
# rospy.wait_for_service("/gazebo/get_model_state")
# loc = self.get_model_state(self.robot_model_name,'')
# if np.abs(self.process_model_state(loc.pose) - self.process_model_state(msg.pose)).sum():
# break
# if self.args.verbose > 1:
# print("teleport result = %f,%f"%(loc.pose.position.x, loc.pose.position.y))
def set_maze_grid(self):
# decide maze grids for each env
# if self.args.maze_grids_range[0] == None:
# pass
# else:
self.n_maze_grids = np.random.choice(self.args.n_maze_grids)
self.hall_width = self.map_width_meter/self.n_maze_grids
if self.args.thickness == None:
self.obs_radius = 0.25*self.hall_width
else:
self.obs_radius = 0.5*self.args.thickness * self.hall_width
def random_map(self):
self.set_maze_grid()
self.set_walls()
self.map_for_LM = fill_outer_rim(self.map_for_LM, self.map_rows, self.map_cols)
if self.args.distort_map:
self.map_for_LM = distort_map(self.map_for_LM, self.map_rows, self.map_cols)
self.map_for_LM = fill_outer_rim(self.map_for_LM, self.map_rows, self.map_cols)
def random_box(self):
#rooms_row: number of rooms in a row [a,b): a <= n < b
#rooms_col: number of rooms in a col [a,b): a <= n < b
kwargs = {'rooms_row':(2,3), 'rooms_col':(1,3),
'slant_scale':2, 'n_boxes':(1,8), 'thick':50, 'thick_scale':3}
ps = PartitionSpace(**kwargs)
# p_open : probability to have the doors open between rooms
ps.connect_rooms(p_open=1.0)
# set output map size
self.map_for_LM = ps.get_map(self.map_rows,self.map_cols)
def read_map(self):
'''
set map_design (grid_rows x grid_cols),
map_2d (map_rows x map_cols),
map_for_RL for RL state (n_state_grids x n_state_grids)
'''
self.map_for_LM = np.load(self.args.load_map)
# self.map_for_pose = np.load(self.args.load_map_LM)
# mdt = np.load(self.args.load_map_RL)
# self.map_for_RL[0,:,:] = torch.tensor(mdt).float().to(self.device)
def set_walls(self):
'''
set map_design, map_2d, map_for_RL
'''
if self.args.test_mode:
map_file = os.path.join(self.args.test_data_path, "map-design-%05d.npy"%self.env_count)
maze = np.load(map_file)
else:
if self.args.random_rm_cells[1]>0:
low=self.args.random_rm_cells[0]
high=self.args.random_rm_cells[1]
num_cells_to_delete = np.random.randint(low, high)
else:
num_cells_to_delete = self.args.rm_cells
if self.args.save_boundary == 'y':
save_boundary = True
elif self.args.save_boundary == 'n':
save_boundary = False
else:
save_boundary = True if np.random.random()>0.5 else False
maze_options = {'save_boundary': save_boundary,
"min_blocks": 10}
maze = generate_map(self.n_maze_grids, num_cells_to_delete, **maze_options )
for i in range(self.n_maze_grids):
for j in range(self.n_maze_grids):
if i < self.n_maze_grids-1:
if maze[i,j]==1 and maze[i+1,j]==1:
#place vertical
self.set_a_wall([i,j],[i+1,j],self.n_maze_grids,horizontal=False)
if j < self.n_maze_grids-1:
if maze[i,j]==1 and maze[i,j+1] ==1:
#place horizontal wall
self.set_a_wall([i,j],[i,j+1],self.n_maze_grids,horizontal=True)
if i>0 and i<self.n_maze_grids-1 and j>0 and j<self.n_maze_grids-1:
if maze[i,j]==1 and maze[i-1,j] == 0 and maze[i+1,j]==0 and maze[i,j-1]==0 and maze[i,j+1]==0:
self.set_a_pillar([i,j], self.n_maze_grids)
def make_low_dim_maps(self):
self.map_for_pose = cv2.resize(self.map_for_LM, (self.grid_rows, self.grid_cols),interpolation=cv2.INTER_AREA)
self.map_for_pose = normalize(self.map_for_pose)
self.map_for_pose = np.clip(self.map_for_pose, 0.0, 1.0)
mdt = cv2.resize(self.map_for_LM,(self.args.n_state_grids,self.args.n_state_grids), interpolation=cv2.INTER_AREA)
mdt = normalize(mdt)
mdt = np.clip(mdt, 0.0, 1.0)
self.map_for_RL[0,:,:] = torch.tensor(mdt).float().to(self.device)
def clear_objects(self):
self.map_for_LM = np.zeros((self.map_rows, self.map_cols))
self.map_for_pose = np.zeros((self.grid_rows, self.grid_cols),dtype='float')
self.map_for_RL = torch.zeros((1,self.args.n_state_grids, self.args.n_state_grids),device=torch.device(self.device))
def set_a_pillar(self, a, grids):
x=to_real(a[0], self.xlim, grids)
y=to_real(a[1], self.ylim, grids)
#rad = self.obs_radius
if self.args.backward_compatible_maps:
rad = 0.15
elif self.args.random_thickness:
rad = np.random.normal(loc=self.obs_radius, scale=self.hall_width*0.25)
rad = np.clip(rad, self.hall_width*0.25, self.hall_width*0.5)
else:
rad = self.obs_radius
corner0 = [x+rad,y+rad]
corner1 = [x-rad,y-rad]
x0 = to_index(corner0[0], self.map_rows, self.xlim)
y0 = to_index(corner0[1], self.map_cols, self.ylim)
x1 = to_index(corner1[0], self.map_rows, self.xlim)
y1 = to_index(corner1[1], self.map_cols, self.ylim)
for ir in range(x0,x1+1):
for ic in range(y0,y1+1):
dx = to_real(ir, self.xlim, self.map_rows) - x
dy = to_real(ic, self.ylim, self.map_cols) - y
dist = np.sqrt(dx**2+dy**2)
if dist <= rad:
self.map_for_LM[ir,ic]=1.0
def set_a_wall(self,a,b,grids,horizontal=True):
ax = to_real(a[0], self.xlim, grids)
ay = to_real(a[1], self.ylim, grids)
bx = to_real(b[0], self.xlim, grids)
by = to_real(b[1], self.ylim, grids)
# if horizontal:
# yaw=math.radians(90)
# else:
# yaw=math.radians(0)
#rad = self.obs_radius
if self.args.backward_compatible_maps:
rad = 0.1*np.ones(4)
elif self.args.random_thickness:
rad = np.random.normal(loc=self.obs_radius, scale=self.hall_width*0.25, size=4)
rad = np.clip(rad, self.hall_width*0.1, self.hall_width*0.5)
else:
rad = self.obs_radius*np.ones(4)
corner0 = [ax+rad[0],ay+rad[1]]
corner1 = [bx-rad[2],by-rad[3]]
x0 = to_index(corner0[0], self.map_rows, self.xlim)
y0 = to_index(corner0[1], self.map_cols, self.ylim)
if self.args.backward_compatible_maps:
x1 = to_index(corner1[0], self.map_rows, self.xlim)
y1 = to_index(corner1[1], self.map_cols, self.ylim)
else:
x1 = to_index(corner1[0], self.map_rows, self.xlim)#+1
y1 = to_index(corner1[1], self.map_cols, self.ylim)#+1
self.map_for_LM[x0:x1, y0:y1]=1.0
# x0 = to_index(corner0[0], self.grid_rows, self.xlim)
# y0 = to_index(corner0[1], self.grid_cols, self.ylim)
# x1 = to_index(corner1[0], self.grid_rows, self.xlim)+1
# y1 = to_index(corner1[1], self.grid_cols, self.ylim)+1
# self.map_for_pose[x0:x1, y0:y1]=1.0
def sample_a_pose(self):
# new turtle location (random)
check = True
collision_radius = 0.50
while (check):
turtle_can = range(self.grid_rows*self.grid_cols)
turtle_bin = np.random.choice(turtle_can,1)
self.true_grid.row = turtle_bin//self.grid_cols
self.true_grid.col = turtle_bin% self.grid_cols
self.true_grid.head = np.random.randint(self.grid_dirs)
self.goal_pose.x = to_real(self.true_grid.row, self.xlim, self.grid_rows)
self.goal_pose.y = to_real(self.true_grid.col, self.ylim, self.grid_cols)
self.goal_pose.theta = wrap(self.true_grid.head*self.heading_resol)
check = self.collision_fnc(self.goal_pose.x, self.goal_pose.y, collision_radius, self.map_for_LM)
def set_init_pose(self):
self.true_grid.head = self.args.init_pose[0]
self.true_grid.row = self.args.init_pose[1]
self.true_grid.col = self.args.init_pose[2]
self.goal_pose.x = to_real(self.true_grid.row, self.xlim, self.grid_rows)
self.goal_pose.y = to_real(self.true_grid.col, self.ylim, self.grid_cols)
self.goal_pose.theta = wrap(self.true_grid.head*self.heading_resol)
check = True
cnt = 0
while (check):
if cnt > 100:
return False
cnt += 1
if self.args.init_error == "XY" or self.args.init_error == "BOTH":
delta_x = (0.5-np.random.rand())*(self.xlim[1]-self.xlim[0])/self.grid_rows
delta_y = (0.5-np.random.rand())*(self.ylim[1]-self.ylim[0])/self.grid_cols
else:
delta_x=0
delta_y=0
if self.args.init_error == "THETA" or self.args.init_error == "BOTH":
delta_theta = (0.5-np.random.rand())*self.heading_resol
else:
delta_theta=0
self.perturbed_goal_pose.x = self.goal_pose.x+delta_x
self.perturbed_goal_pose.y = self.goal_pose.y+delta_y
self.perturbed_goal_pose.theta = self.goal_pose.theta+delta_theta
check = self.collision_fnc(self.perturbed_goal_pose.x, self.perturbed_goal_pose.y, self.collision_radius, self.map_for_LM)
self.teleport_turtle()
self.update_true_grid()
return True
def place_turtle(self):
# new turtle location (random)
check = True
cnt = 0
while (check):
if cnt > 100:
return False
cnt += 1
turtle_can = range(self.grid_rows*self.grid_cols)
turtle_bin = np.random.choice(turtle_can,1)
self.true_grid.row = turtle_bin//self.grid_cols
self.true_grid.col = turtle_bin% self.grid_cols
self.true_grid.head = np.random.randint(self.grid_dirs)
self.goal_pose.x = to_real(self.true_grid.row, self.xlim, self.grid_rows)
self.goal_pose.y = to_real(self.true_grid.col, self.ylim, self.grid_cols)
self.goal_pose.theta = wrap(self.true_grid.head*self.heading_resol)
check = self.collision_fnc(self.goal_pose.x, self.goal_pose.y, self.collision_radius, self.map_for_LM)
check = True
cnt = 0
while (check):
if cnt > 100:
return False
cnt += 1
if self.args.init_error == "XY" or self.args.init_error == "BOTH":
delta_x = (0.5-np.random.rand())*(self.xlim[1]-self.xlim[0])/self.grid_rows
delta_y = (0.5-np.random.rand())*(self.ylim[1]-self.ylim[0])/self.grid_cols
else:
delta_x=0
delta_y=0
if self.args.init_error == "THETA" or self.args.init_error == "BOTH":
delta_theta = (0.5-np.random.rand())*self.heading_resol
else:
delta_theta=0
self.perturbed_goal_pose.x = self.goal_pose.x+delta_x
self.perturbed_goal_pose.y = self.goal_pose.y+delta_y
self.perturbed_goal_pose.theta = self.goal_pose.theta+delta_theta
check = self.collision_fnc(self.perturbed_goal_pose.x, self.perturbed_goal_pose.y, self.collision_radius, self.map_for_LM)
if self.args.test_mode:
pg_pose_file = os.path.join(self.args.test_data_path, "pg-pose-%05d.npy"%self.env_count)
g_pose_file = os.path.join(self.args.test_data_path, "g-pose-%05d.npy"%self.env_count)
pg_pose = np.load(pg_pose_file)
g_pose = np.load(g_pose_file)
self.goal_pose.theta = g_pose[0]
self.goal_pose.x = g_pose[1]
self.goal_pose.y = g_pose[2]
if self.args.init_error == "XY" or self.args.init_error == "BOTH":
self.perturbed_goal_pose.x = pg_pose[1]
self.perturbed_goal_pose.y = pg_pose[2]
else:
self.perturbed_goal_pose.x = g_pose[1]
self.perturbed_goal_pose.y = g_pose[2]
if self.args.init_error == "THETA" or self.args.init_error == "BOTH":
self.perturbed_goal_pose.theta = pg_pose[0]
else:
self.perturbed_goal_pose.theta = g_pose[0]
if self.args.verbose > 1:
print ('gt_row,col,head = %f,%f,%d'%(self.true_grid.row,self.true_grid.col,self.true_grid.head))
print('x_goal,y_goal,target_ori=%f,%f,%f'%(self.goal_pose.x,self.goal_pose.y,self.goal_pose.theta))
# self.turtle_pose_msg.position.x = self.goal_pose.x
# self.turtle_pose_msg.position.y = self.goal_pose.y
# yaw = self.goal_pose.theta
# self.turtle_pose_msg.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, 0, yaw))
self.teleport_turtle()
self.update_true_grid()
# self.update_current_pose()
return True
def reset_explored(self): # reset explored area to all 0's
self.explored_space = np.zeros((self.grid_dirs,self.grid_rows, self.grid_cols),dtype='float')
self.new_pose = False
return
def update_bel_list(self):
guess = self.bel_grid
# guess = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape)
if guess not in self.bel_list:
self.new_bel = True
self.bel_list.append(guess)
if self.args.verbose > 2:
print ("bel_list", len(self.bel_list))
else:
self.new_bel = False
def update_explored(self):
if self.explored_space[self.true_grid.head,self.true_grid.row, self.true_grid.col] == 0.0:
self.new_pose = True
else:
self.new_pose = False
self.explored_space[self.true_grid.head,self.true_grid.row, self.true_grid.col] = 1.0
return
def normalize_gtl(self):
gt = self.gt_likelihood
self.gt_likelihood_unnormalized = np.copy(self.gt_likelihood)
if self.args.gtl_output == "softmax":
gt = softmax(gt, self.args.temperature)
# gt = torch.from_numpy(softmax(gt)).float().to(self.device)
elif self.args.gtl_output == "softermax":
gt = softermax(gt)
# gt = torch.from_numpy(softmin(gt)).float().to(self.device)
elif self.args.gtl_output == "linear":
gt = np.clip(gt, 1e-5, 1.0)
gt=gt/gt.sum()
# gt = torch.from_numpy(gt/gt.sum()).float().to(self.device)
# self.gt_likelihood = torch.tensor(gt).float().to(self.device)
self.gt_likelihood = gt
def get_gtl_cos_mp(self, ref_scans, scan_data, my_dirs, return_dict):
chk_rad = 0.05
offset = 360.0/self.grid_dirs
y= np.array(scan_data.ranges_2pi)[::self.args.pm_scan_step]
y = np.clip(y, self.min_scan_range, self.max_scan_range)
# y = np.clip(y, self.min_scan_range, np.inf)
for heading in my_dirs:
X = np.roll(ref_scans, -int(offset*heading),axis=2)[:,:,::self.args.pm_scan_step]
gtl = np.zeros((self.grid_rows, self.grid_cols))
for i_ld in range(self.grid_rows):
for j_ld in range(self.grid_cols):
if self.collision_fnc(to_real(i_ld, self.xlim, self.grid_rows), to_real(j_ld, self.ylim, self.grid_cols), chk_rad, self.map_for_LM):
# if self.map_for_pose[i_ld, j_ld]>0.4:
gtl[i_ld,j_ld]=0.0
else:
x = X[i_ld,j_ld,:]
x = np.clip(x, self.min_scan_range, self.max_scan_range)
# x = np.clip(x, self.min_scan_range, np.inf)
gtl[i_ld,j_ld] = self.get_cosine_sim(x,y)
###
return_dict[heading] = {'gtl': gtl}
def get_gtl_cos_mp2(self, my_dirs, scan_data, return_dict):
chk_rad = 0.05
offset = 360.0/self.grid_dirs
y= np.array(scan_data.ranges_2pi)[::self.args.pm_scan_step]
y = np.clip(y, self.min_scan_range, self.max_scan_range)
for heading in my_dirs:
X = np.roll(self.scans_over_map, -int(offset*heading), axis=2)[:,:,::self.args.pm_scan_step]
gtl = np.zeros((self.grid_rows, self.grid_cols))
for i_ld in range(self.grid_rows):
for j_ld in range(self.grid_cols):
if self.collision_fnc(to_real(i_ld, self.xlim, self.grid_rows), to_real(j_ld, self.ylim, self.grid_cols), chk_rad, self.map_for_LM):
# if self.map_for_pose[i_ld, j_ld]>0.4:
gtl[i_ld,j_ld]=0.0
else:
x = X[i_ld,j_ld,:]
x = np.clip(x, self.min_scan_range, self.max_scan_range)
gtl[i_ld,j_ld] = self.get_cosine_sim(x,y)
###
return_dict[heading] = {'gtl': gtl}
def get_gtl_corr_mp(self, ref_scans, my_dirs, return_dict, clip):
chk_rad = 0.05
offset = 360/self.grid_dirs
y= np.array(self.scan_data_at_unperturbed.ranges_2pi)[::self.args.pm_scan_step]
y = np.clip(y, self.min_scan_range, self.max_scan_range)
for heading in my_dirs:
X = np.roll(ref_scans, -offset*heading,axis=2)[:,:,::self.args.pm_scan_step]
gtl = np.zeros((self.grid_rows, self.grid_cols))
for i_ld in range(self.grid_rows):
for j_ld in range(self.grid_cols):
if self.collision_fnc(to_real(i_ld, self.xlim, self.grid_rows), to_real(j_ld, self.ylim, self.grid_cols), chk_rad, self.map_for_LM):
# if self.map_for_pose[i_ld, j_ld]>0.4:
gtl[i_ld,j_ld]=0.0
else:
x = X[i_ld,j_ld,:]
x = np.clip(x, self.min_scan_range, self.max_scan_range)
gtl[i_ld,j_ld] = self.get_corr(x,y,clip=clip)
###
return_dict[heading] = {'gtl': gtl}
def get_gt_likelihood_cossim(self, ref_scans, scan_data):
# start_time = time.time()
manager = multiprocessing.Manager()
return_dict = manager.dict()
accum = 0
procs = []
for i_worker in range(min(self.args.n_workers, self.grid_dirs)):
n_dirs = self.grid_dirs//self.args.n_workers
if i_worker < self.grid_dirs % self.args.n_workers:
n_dirs +=1
my_dirs = range(accum, accum+n_dirs)
accum += n_dirs
if len(my_dirs)>0:
pro = multiprocessing.Process(target = self.get_gtl_cos_mp,
args = [ref_scans, scan_data, my_dirs, return_dict])
procs.append(pro)
[pro.start() for pro in procs]
[pro.join() for pro in procs]
gtl = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols))
for i in range(self.grid_dirs):
ret = return_dict[i]
gtl[i,:,:] = ret['gtl']
return gtl
# for i in range(self.grid_dirs):
# ret = return_dict[i]
# self.gt_likelihood[i,:,:] = ret['gtl']
# # self.gt_likelihood[i,:,:] = torch.tensor(ret['gtl']).float().to(self.device)
def get_gt_likelihood_cossim2(self, scan_data):
# start_time = time.time()
manager = multiprocessing.Manager()
return_dict = manager.dict()
accum = 0
procs = []
for i_worker in range(min(self.args.n_workers, self.grid_dirs)):
n_dirs = self.grid_dirs//self.args.n_workers
if i_worker < self.grid_dirs % self.args.n_workers:
n_dirs +=1
my_dirs = range(accum, accum+n_dirs)
accum += n_dirs
if len(my_dirs)>0:
pro = multiprocessing.Process(target = self.get_gtl_cos_mp2,
args = [ref_scans, scan_data, my_dirs, return_dict])
procs.append(pro)
[pro.start() for pro in procs]
[pro.join() for pro in procs]
gtl = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols))
for i in range(self.grid_dirs):
ret = return_dict[i]
gtl[i,:,:] = ret['gtl']
return gtl
def get_gt_likelihood_corr(self, ref_scans, clip=0):
# start_time = time.time()
manager = multiprocessing.Manager()
return_dict = manager.dict()
accum = 0
procs = []
for i_worker in range(min(self.args.n_workers, self.grid_dirs)):
n_dirs = self.grid_dirs//self.args.n_workers
if i_worker < self.grid_dirs % self.args.n_workers:
n_dirs +=1
my_dirs = range(accum, accum+n_dirs)
accum += n_dirs
if len(my_dirs)>0:
pro = multiprocessing.Process(target = self.get_gtl_corr_mp,
args = [ref_scans, my_dirs, return_dict, clip])
procs.append(pro)
[pro.start() for pro in procs]
[pro.join() for pro in procs]
for i in range(self.grid_dirs):
ret = return_dict[i]
self.gt_likelihood[i,:,:] = ret['gtl']
# self.gt_likelihood[i,:,:] = torch.tensor(ret['gtl']).float().to(self.device)
def get_cosine_sim(self,x,y):
# numpy arrays.
non_inf_x = ~np.isinf(x)
non_nan_x = ~np.isnan(x)
non_inf_y = ~np.isinf(y)
non_nan_y = ~np.isnan(y)
numbers_only = non_inf_x & non_nan_x & non_inf_y & non_nan_y
x=x[numbers_only]
y=y[numbers_only]
return sum(x*y)/np.linalg.norm(y,2)/np.linalg.norm(x,2)
def get_corr(self,x,y,clip=1):
mx=np.mean(x)
my=np.mean(y)
corr=sum((x-mx)*(y-my))/np.linalg.norm(y-my,2)/np.linalg.norm(x-mx,2)
# return 0.5*(corr+1.0)
if clip==1:
return np.clip(corr, 0, 1.0)
else:
return 0.5*(corr+1.0)
def get_a_scan(self, x_real, y_real, offset=0, scan_step=1, noise=0, sigma=0, fov=False):
#class member variables: map_rows, map_cols, xlim, ylim, min_scan_range, max_scan_range, map_2d
row_hd = to_index(x_real, self.map_rows, self.xlim) # from real to hd
col_hd = to_index(y_real, self.map_cols, self.ylim) # from real to hd
scan = np.zeros(360)
missing = np.random.choice(360, noise, replace=False)
gaussian_noise = np.random.normal(scale=sigma, size=360)
for i_ray in range(0,360, scan_step):
if fov and i_ray > self.args.fov[0] and i_ray < self.args.fov[1]:
scan[i_ray]=np.nan
continue
else:
pass
theta = math.radians(i_ray)+offset
if i_ray in missing:
dist = np.inf
else:
dist = self.min_scan_range
while True:
if dist >= self.max_scan_range:
dist = np.inf
break
x_probe = x_real + dist * np.cos(theta)
y_probe = y_real + dist * np.sin(theta)
# see if there's something
i_hd_prb = to_index(x_probe, self.map_rows, self.xlim)
j_hd_prb = to_index(y_probe, self.map_cols, self.ylim)
if i_hd_prb < 0 or i_hd_prb >= self.map_rows:
dist = np.inf
break
if j_hd_prb < 0 or j_hd_prb >= self.map_cols:
dist = np.inf
break
if self.map_for_LM[i_hd_prb, j_hd_prb] >= 0.5:
break
dist += 0.01+0.01*(np.random.rand())
scan[i_ray]=dist+gaussian_noise[i_ray]
return scan
def get_a_scan_mp(self, range_place, return_dict, offset=0, scan_step=1, map_img=None, xlim=None, ylim=None, fov=False):
# print (os.getpid(), min(range_place), max(range_place))
for i_place in range_place:
#class member variables: map_rows, map_cols, xlim, ylim, min_scan_range, max_scan_range, map_2d
row_ld = i_place // self.grid_cols
col_ld = i_place % self.grid_cols
x_real = to_real(row_ld, xlim, self.grid_rows ) # from low-dim location to real
y_real = to_real(col_ld, ylim, self.grid_cols ) # from low-dim location to real
row_hd = to_index(x_real, self.map_rows, xlim) # from real to hd
col_hd = to_index(y_real, self.map_cols, ylim) # from real to hd
scan = np.zeros(360)
for i_ray in range(0,360, scan_step):
if fov and i_ray > self.args.fov[0] and i_ray < self.args.fov[1]:
scan[i_ray]=np.nan
continue
else:
pass
theta = math.radians(i_ray)+offset
dist = self.min_scan_range
while True:
if dist >= self.max_scan_range:
dist = np.inf
break
x_probe = x_real + dist * np.cos(theta)
y_probe = y_real + dist * np.sin(theta)
# see if there's something
i_hd_prb = to_index(x_probe, self.map_rows, xlim)
j_hd_prb = to_index(y_probe, self.map_cols, ylim)
if i_hd_prb < 0 or i_hd_prb >= self.map_rows:
dist = np.inf
break
if j_hd_prb < 0 or j_hd_prb >= self.map_cols:
dist = np.inf
break
if map_img[i_hd_prb, j_hd_prb] >= 0.5:
break
dist += 0.01+0.01*(np.random.rand())
scan[i_ray]=dist
#return scan
return_dict[i_place]={'scan':scan}
# def get_synth_scan(self):
# # start_time = time.time()
# # place sensor at a location, then reach out in 360 rays all around it and record when each ray gets hit.
# n_places=self.grid_rows * self.grid_cols
# for i_place in range(n_places):
# row_ld = i_place // self.grid_cols
# col_ld = i_place % self.grid_cols
# x_real = to_real(row_ld, self.xlim, self.grid_rows ) # from low-dim location to real
# y_real = to_real(col_ld, self.ylim, self.grid_cols ) # from low-dim location to real
# scan = self.get_a_scan(x_real, y_real,scan_step=self.args.pm_scan_step)
# self.scans_over_map[row_ld, col_ld,:] = np.clip(scan, 1e-10, self.max_scan_range)
# if i_place%10==0: print ('.')
# # print ('scans', time.time()-start_time)
def get_synth_scan_mp(self, scans, map_img=None, xlim=None, ylim=None):
# print (multiprocessing.cpu_count())
# start_time = time.time()
# place sensor at a location, then reach out in 360 rays all around it and record when each ray gets hit.
n_places=self.grid_rows * self.grid_cols
manager = multiprocessing.Manager()
return_dict = manager.dict()
procs = []
accum = 0
for worker in range(min(self.args.n_workers, n_places)):
n_myplaces = n_places//self.args.n_workers
if worker < n_places % self.args.n_workers:
n_myplaces += 1
range_place = range(accum, accum+n_myplaces)
accum += n_myplaces
kwargs = {'scan_step': self.args.pm_scan_step, 'map_img':map_img, 'xlim':xlim, 'ylim':ylim, 'fov':False}
pro = multiprocessing.Process(target = self.get_a_scan_mp, args = [range_place, return_dict ], kwargs = kwargs)
procs.append(pro)
[pro.start() for pro in procs]
[pro.join() for pro in procs]
# scans = np.ndarray((self.grid_rows*self.grid_cols, 360))
for i_place in range(n_places):
### multi-processing
rd = return_dict[i_place]
scan = rd['scan']
# scans [i_place, :] = np.clip(scan, self.min_scan_range, self.max_scan_range)
row_ld = i_place // self.grid_cols
col_ld = i_place % self.grid_cols
# scans[row_ld, col_ld,:] = np.clip(scan, self.min_scan_range, np.inf)
scans[row_ld, col_ld,:] = np.clip(scan, self.min_scan_range, self.max_scan_range)
# self.scans_over_map[row_ld, col_ld,:] = np.clip(scan, self.min_scan_range, self.max_scan_range)
def slide_scan(self):
# slide scan_2d downward for self.front_margin_pixels, and then left/righ for collision radius
self.scan_2d_slide = np.copy(self.scan_2d[0,:,:])
for i in range(self.front_margin_pixels):
self.scan_2d_slide += shift(self.scan_2d_slide, 1, axis=0, fill=1.0)
# self.scan_2d_slide = np.clip(self.scan_2d_slide,0.0,1.0)
for i in range(self.side_margin_pixels):
self.scan_2d_slide += shift(self.scan_2d_slide, +1, axis=1, fill=1.0)
self.scan_2d_slide += shift(self.scan_2d_slide, -1, axis=1, fill=1.0)
self.scan_2d_slide = np.clip(self.scan_2d_slide,0.0,1.0)
def get_scan_2d_n_headings(self, scan_data, xlim, ylim):
if self.args.verbose > 1:
print('get_scan_2d_n_headings')
data = scan_data
if self.map_rows == None :
return None, None
if self.map_cols == None:
return None, None
O=self.grid_dirs
N=self.map_rows
M=self.map_cols
scan_2d = np.zeros(shape=(O,N,M))
angles = np.linspace(data.angle_min, data.angle_max, data.ranges.size, endpoint=False)
for i,dist in enumerate(data.ranges):
for rotate in range(O):
offset = 2*np.pi/O*rotate
angle = offset + angles[i]
if angle > math.radians(self.args.fov[0]) and angle < math.radians(self.args.fov[1]):
continue
if ~np.isinf(dist) and ~np.isnan(dist):
x = (dist)*np.cos(angle)
y = (dist)*np.sin(angle)
n = to_index(x, N, xlim)
m = to_index(y, M, ylim)
if n>=0 and n<N and m>0 and m<M:
scan_2d[rotate,n,m] = 1.0
rows1 = self.args.n_state_grids
cols1 = self.args.n_state_grids
rows2 = self.args.n_local_grids
cols2 = rows2
center=self.args.n_local_grids//2
if self.args.binary_scan:
scan_2d_low = np.ceil(normalize(cv2.resize(scan_2d[0,:,:], (rows1, cols1),interpolation=cv2.INTER_AREA)))
else:
scan_2d_low = normalize(cv2.resize(scan_2d[0,:,:], (rows1, cols1),interpolation=cv2.INTER_AREA))
return scan_2d, scan_2d_low
def do_scan_2d_n_headings(self):
if self.args.verbose > 1:
print('get_scan_2d_n_headings')
data = self.scan_data
if self.map_rows == None :
return
if self.map_cols == None:
return
O=self.grid_dirs
N=self.map_rows
M=self.map_cols
self.scan_2d = np.zeros(shape=(O,N,M))
angles = np.linspace(data.angle_min, data.angle_max, data.ranges.size, endpoint=False)
for i,dist in enumerate(data.ranges):
for rotate in range(O):
offset = 2*np.pi/O*rotate
angle = offset + angles[i]
if angle > math.radians(self.args.fov[0]) and angle < math.radians(self.args.fov[1]):
continue
if ~np.isinf(dist) and ~np.isnan(dist):
x = (dist)*np.cos(angle)
y = (dist)*np.sin(angle)
n = to_index(x, N, self.xlim)
m = to_index(y, M, self.ylim)
if n>=0 and n<N and m>0 and m<M:
self.scan_2d[rotate,n,m] = 1.0
rows1 = self.args.n_state_grids
cols1 = self.args.n_state_grids
rows2 = self.args.n_local_grids
cols2 = rows2
center=self.args.n_local_grids//2
if self.args.binary_scan:
self.scan_2d_low = np.ceil(normalize(cv2.resize(self.scan_2d[0,:,:], (rows1, cols1),interpolation=cv2.INTER_AREA)))
else:
self.scan_2d_low = normalize(cv2.resize(self.scan_2d[0,:,:], (rows1, cols1),interpolation=cv2.INTER_AREA))
return
def generate_data(self):
# data index: D
# n envs : E
# n episodes: N
# file-number(D) = D//N = E,
# data index in the file = D % N
# map file number = D//N = E
index = "%05d"%(self.data_cnt)
target_data = self.gt_likelihood_unnormalized
range_data=np.array(self.scan_data.ranges)
angle_array = np.linspace(self.scan_data.angle_min, self.scan_data.angle_max,range_data.size, endpoint=False)
scan_data_to_save = np.stack((range_data,angle_array),axis=1) #first column: range, second column: angle
self.target_list.append(target_data)
self.scan_list.append(scan_data_to_save)
if self.args.verbose > 2:
print ("target_list", len(self.target_list))
print ("scan_list", len(self.scan_list))
if self.done:
scans = np.stack(self.scan_list, axis=0)
targets = np.stack(self.target_list, axis=0)
np.save(os.path.join(self.data_path, 'scan-%s.npy'%index), scans)
np.save(os.path.join(self.data_path, 'map-%s.npy'%index), self.map_for_LM)
np.save(os.path.join(self.data_path, 'target-%s.npy'%index), targets)
self.scan_list = []
self.target_list = []
self.data_cnt+=1
if args.verbose > 0:
print ("%d: map %s, scans %s, targets %s"%(index, self.map_for_LM.shape, scans.shape, targets.shape ))
return
def stack_data(self):
target_data = self.gt_likelihood_unnormalized
range_data = np.array(self.scan_data.ranges_2pi, np.float32)
angle_array = np.array(self.scan_data.angles_2pi, np.float32)
scan_data_to_save = np.stack((range_data,angle_array),axis=1) #first column: range, second column: angle
self.target_list.append(target_data)
self.scan_list.append(scan_data_to_save)
if self.args.verbose > 2:
print ("target_list", len(self.target_list))
print ("scan_list", len(self.scan_list))
def save_generated_data(self):
scans = np.stack(self.scan_list, axis=0)
targets = np.stack(self.target_list, axis=0)
np.save(os.path.join(self.data_path, 'scan-%05d.npy'%self.data_cnt), scans)
np.save(os.path.join(self.data_path, 'map-%05d.npy'%self.data_cnt), self.map_for_LM)
np.save(os.path.join(self.data_path, 'target-%05d.npy'%self.data_cnt), targets)
if args.verbose > 0:
print ("%05d: map %s, scans %s, targets %s"%(self.data_cnt, self.map_for_LM.shape, scans.shape, targets.shape ))
self.scan_list = []
self.target_list = []
self.data_cnt+=1
def collect_data(self):
# ENV-EPI-STP-CNT
# map, scan, belief, likelihood, GTL, policy, action, reward
# input = [map, scan]
# target = [GTL]
# state = [map-low-dim, bel, scan-low-dim]
# action_reward = [action, p0, p1, p2, reward]
# index = "%03d-%03d-%03d-%04d"%(self.env_count,self.episode_count,self.step_count,self.data_cnt)
index = "%05d"%(self.data_cnt)
env_index = "%05d"%(self.env_count)
with open(self.rollout_list,'a') as ro:
ro.write('%d %d %d %d\n'%(self.env_count,self.episode_count,self.step_count,self.data_cnt))
map_file = os.path.join(self.data_path, 'map-%s.npy'%env_index)
if not os.path.isfile(map_file):
#save the map
np.save(map_file, self.map_for_LM)
target_data = self.gt_likelihood_unnormalized
gt_pose = np.array((self.true_grid.head,self.true_grid.row,self.true_grid.col)).reshape(1,-1)
map_num = np.array([self.env_count])
range_data=np.array(self.scan_data.ranges)
angle_array = np.linspace(self.scan_data.angle_min, self.scan_data.angle_max,range_data.size, endpoint=False)
scan_data_to_save = np.stack((range_data,angle_array),axis=1) #first column: range, second column: angle
real_pose = np.array((self.current_pose.theta, self.current_pose.x, self.current_pose.y)).reshape(1,-1)
dict_to_save = {'scan':scan_data_to_save,
'mapindex':map_num,
'target':target_data,
'belief': self.belief.detach().cpu().numpy(),
'like':self.likelihood.detach().cpu().numpy(),
'action': self.action_idx,
'prob':self.prob.reshape(1,-1),
'reward': self.reward_vector.reshape(1,-1),
'gt_pose': gt_pose,
'real_pose': real_pose}
np.save(os.path.join(self.data_path, 'data-%s.npy'%index), dict_to_save)
self.data_cnt+=1
return
def compute_gtl(self, ref_scans):
if self.args.gtl_off == True:
gt = np.random.rand(self.grid_dirs, self.grid_rows, self.grid_cols)
gt = np.clip(gt, 1e-5, 1.0)
gt=gt/gt.sum()
self.gt_likelihood = gt
# self.gt_likelihood = torch.tensor(gt).float().to(self.device)
else:
if self.args.gtl_src == 'hd-corr':
self.get_gt_likelihood_corr(ref_scans, clip=0)
elif self.args.gtl_src == 'hd-corr-clip':
self.get_gt_likelihood_corr(ref_scans, clip=1)
elif self.args.gtl_src == 'hd-cos':
self.gt_likelihood = self.get_gt_likelihood_cossim(ref_scans, self.scan_data_at_unperturbed)
else:
raise Exception('GTL source required: --gtl-src= [low-dim-map, high-dim-map]')
self.normalize_gtl()
def run_action_module(self, no_update_fig=False):
if self.args.random_policy:
fwd_collision = self.collision_fnc(0, 0, 0, self.scan_2d_slide)
if fwd_collision:
num_actions = 2
else:
num_actions = 3
self.action_from_policy = np.random.randint(num_actions)
self.action_str = self.action_space[self.action_from_policy]
elif self.args.navigate_to is not None:
self.navigate()
else:
mark_time = time.time()
self.get_action()
self.action_time = time.time()-mark_time
print('[ACTION] %.3f sec '%(time.time()-mark_time))
if no_update_fig:
return
if self.args.figure:
# update part of figure after getting action
self.ax_map.set_title('action(%d):%s'%(self.step_count,self.action_str))
ax = self.ax_act
self.update_act_dist(ax)
ax=self.ax_rew
act_lttr=['L','R','F','-']
self.obj_rew= self.update_list(ax,self.rewards,self.obj_rew,"Reward", text=act_lttr[self.action_idx])
ax=self.ax_err
self.obj_err = self.update_list(ax,self.xyerrs,self.obj_err,"Error")
plt.pause(1e-4)
self.sample_action()
if self.args.figure:
# update part of figure after getting action
self.ax_map.set_title('action(%d):%s'%(self.step_count,self.action_str))
self.save_figure()
def update_likelihood_rotate(self, map_img, scan_imgs, compute_loss=True):
map_img = map_img.copy()
if self.args.flip_map > 0:
locs = np.random.randint(0, map_img.shape[0], (2, np.random.randint(self.args.flip_map+1)))
xs = locs[0]
ys = locs[1]
map_img[xs,ys]=1-map_img[xs,ys]
time_mark = time.time()
if self.perceptual_model == None:
return self.likelihood
else:
likelihood = torch.zeros((self.grid_dirs,self.grid_rows, self.grid_cols),
device=torch.device(self.device),
dtype=torch.float)
if self.args.verbose>1: print("update_likelihood_rotate")
if self.args.ch3=="ZERO":
input_batch = np.zeros((self.grid_dirs, 3, self.map_rows, self.map_cols))
for i in range(self.grid_dirs): # for all orientations
input_batch[i, 0, :,:] = map_img
input_batch[i, 1, :,:] = scan_imgs[i,:,:]
input_batch[i, 2, :,:] = np.zeros_like(map_img)
elif self.args.ch3=="RAND":
input_batch = np.zeros((self.grid_dirs, 3, self.map_rows, self.map_cols))
for i in range(self.grid_dirs): # for all orientations
input_batch[i, 0, :,:] = map_img
input_batch[i, 1, :,:] = scan_imgs[i,:,:]
input_batch[i, 2, :,:] = np.random.random(map_img.shape)
else:
input_batch = np.zeros((self.grid_dirs, 2, self.map_rows, self.map_cols))
for i in range(self.grid_dirs): # for all orientations
input_batch[i, 0, :,:] = map_img
input_batch[i, 1, :,:] = scan_imgs[i,:,:]
input_batch = torch.from_numpy(input_batch).float()
output = self.perceptual_model.forward(input_batch)
output_softmax = F.softmax(output.view([1,-1])/self.args.temperature, dim= 1) # shape (1,484)
if self.args.n_lm_grids != self.args.n_local_grids:
# LM output size != localization space size: adjust LM output to fit to localization space.
nrows = self.args.n_lm_grids #self.grid_rows/self.args.sub_resolution
ncols = self.args.n_lm_grids #self.grid_cols/self.args.sub_resolution
like = output_softmax.cpu().detach().numpy().reshape((self.grid_dirs, nrows, ncols))
for i in range(self.grid_dirs):
likelihood[i,:,:] = torch.tensor(cv2.resize(like[i,:,:], (self.grid_rows,self.grid_cols))).float().to(self.device)
likelihood /= likelihood.sum()
else:
likelihood = output_softmax.reshape(likelihood.shape)
self.lm_time = time.time()-time_mark
print ("[TIME for LM] %.2f sec"%(self.lm_time))
del output_softmax, input_batch, output
if compute_loss:
self.compute_loss(likelihood)
return likelihood
# self.likelihood = torch.clamp(self.likelihood, 1e-9, 1.0)
# self.likelihood = self.likelihood/self.likelihood.sum()
def compute_loss(self, likelihood):
gtl = torch.tensor(self.gt_likelihood).float().to(self.device)
if self.args.pm_loss == "KL":
self.loss_ll = (gtl * torch.log(gtl/likelihood)).sum()
elif self.args.pm_loss == "L1":
self.loss_ll = torch.abs(likelihood - gtl).sum()
if self.args.update_pm_by=="GTL" or self.args.update_pm_by=="BOTH":
if len(self.loss_likelihood) < self.args.pm_batch_size:
self.loss_likelihood.append(self.loss_ll)
if self.args.verbose > 2:
print ("loss_likelihood", len(self.loss_likelihood))
if len(self.loss_likelihood) >= self.args.pm_batch_size:
self.back_prop_pm()
self.loss_likelihood = []
del gtl
def mask_likelihood(self):
the_mask = torch.tensor(np.ones([self.grid_dirs, self.grid_rows, self.grid_cols])).float().to(self.device)
for i in range(self.grid_rows):
for j in range(self.grid_cols):
if self.map_for_pose[i, j]>0.5:
the_mask[:,i,j]=0.0
self.likelihood = self.likelihood * the_mask
#self.likelihood = torch.clamp(self.likelihood, 1e-9, 1.0)
self.likelihood = self.likelihood/self.likelihood.sum()
def product_belief(self):
if self.args.verbose>1: print("product_belief")
if self.args.use_gt_likelihood :
# gt = torch.from_numpy(self.gt_likelihood/self.gt_likelihood.sum()).float().to(self.divice)
gt = torch.tensor(self.gt_likelihood).float().to(self.device)
self.belief = self.belief * (gt)
#self.belief = self.belief * (self.gt_likelihood)
else:
self.belief = self.belief * (self.likelihood)
#normalize belief
self.belief /= self.belief.sum()
#update bel_grid
guess = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape)
self.bel_grid = Grid(head=guess[0],row=guess[1],col=guess[2])
def do_the_honors(self, pose, belief):
scan_data = self.get_virtual_lidar(pose)
scan_2d, _ = self.get_scan_2d_n_headings(scan_data, self.xlim, self.ylim)
if self.args.use_gt_likelihood:
gtl = self.get_gt_likelihood_cossim(self.scans_over_map, scan_data)
likelihood = softmax(gtl, self.args.temperature)
likelihood = torch.tensor(likelihood).float().to(self.device)
else:
likelihood = self.update_likelihood_rotate(self.map_for_LM, scan_2d,
compute_loss=False)
bel = belief * likelihood
bel /= bel.sum()
new_bel_ent = float((bel * torch.log(bel)).sum())
return new_bel_ent - self.bel_ent
def get_markov_action(self):
max_ent_diff = -np.inf
sampled_action_str = ""
# update belief entropy
self.bel_ent = (self.belief * torch.log(self.belief)).sum().detach()
fwd_collision = self.collision_fnc(0, 0, 0, self.scan_2d_slide)
if fwd_collision:
action_space = ['turn_left','turn_right']
else:
action_space = ['turn_left','turn_right','go_fwd']
for afp, action_str in enumerate(action_space):
virtual_target = self.get_virtual_target_pose(action_str)
### transit the belief according to the action
bel = self.belief.cpu().detach().numpy() # copy current belief into numpy
bel = self.trans_bel(bel, action_str) # transition off the actual trajectory
bel = torch.from_numpy(bel).float().to(self.device)#$ requires_grad=True)
ent_diff = self.do_the_honors(virtual_target, bel)
if ent_diff > max_ent_diff:
max_ent_diff = ent_diff
sampled_action_str = action_str
self.action_str = sampled_action_str
self.action_from_policy = afp
def get_action(self):
if self.args.use_aml:
self.get_markov_action()
return
if self.args.verbose>1: print("get_action")
if self.step_count==0:
self.cx = torch.zeros(1, 256)
self.hx = torch.zeros(1, 256)
# self.cx = Variable(torch.zeros(1, 256))
# self.hx = Variable(torch.zeros(1, 256))
else:
# these are internal states of LSTM. not for back-prop. so, detach them.
self.cx = self.cx.detach() #Variable(self.cx.data)
self.hx = self.hx.detach() #Variable(self.hx.data)
self.scan_2d_low_tensor[0,:,:]=torch.from_numpy(self.scan_2d_low).float().to(self.device)
# state = torch.cat((self.map_for_RL.detach(), self.belief, self.scan_2d_low_tensor.detach()), dim=0)
if self.args.n_state_grids == self.args.n_local_grids and self.args.n_state_dirs == self.args.n_headings:
# no downsample. preserve the path for backprop
belief_downsample = self.belief
else:
belief_downsample = np.zeros((self.args.n_state_dirs, self.args.n_state_grids, self.args.n_state_grids))
dirs = range(self.bel_grid.head%(self.grid_dirs//self.args.n_state_dirs),self.grid_dirs,self.grid_dirs//self.args.n_state_dirs)
for i,j in enumerate(dirs):
bel = self.belief[j,:,:].cpu().detach().numpy()
bel = cv2.resize(bel, (self.args.n_state_grids,self.args.n_state_grids))#,interpolation=cv2.INTER_NEAREST)
belief_downsample[i,:,:] = bel
belief_downsample /= belief_downsample.sum()
belief_downsample = torch.from_numpy(belief_downsample).float().to(self.device)
if self.args.n_state_grids == self.args.n_local_grids and self.args.n_state_dirs == self.args.n_headings:
# no downsample. preserve the path for backprop
likelihood_downsample = self.likelihood
else:
likelihood_downsample = np.zeros((self.args.n_state_dirs, self.args.n_state_grids, self.args.n_state_grids))
dirs = range(self.bel_grid.head%(self.grid_dirs//self.args.n_state_dirs),self.grid_dirs,self.grid_dirs//self.args.n_state_dirs)
for i,j in enumerate(dirs):
lik = self.likelihood[j,:,:].cpu().detach().numpy()
lik = cv2.resize(lik, (self.args.n_state_grids,self.args.n_state_grids))#,interpolation=cv2.INTER_NEAREST)
likelihood_downsample[i,:,:] = lik
likelihood_downsample /= likelihood_downsample.sum()
likelihood_downsample = torch.from_numpy(likelihood_downsample).float().to(self.device)
## map_for_RL : resize it: n_maze_grids --> n_state_grids
## scan_2d_low_tensor: n_state_grids
if self.args.RL_type == 0:
state = torch.cat((self.map_for_RL.detach(),
belief_downsample,
self.scan_2d_low_tensor.detach()), dim=0)
elif self.args.RL_type == 1:
state = torch.cat((belief_downsample,
self.scan_2d_low_tensor.detach()), dim=0)
elif self.args.RL_type == 2:
state = torch.cat((belief_downsample, likelihood_downsample), dim=0)
state2 = torch.stack((torch.from_numpy(self.map_for_LM.astype(np.float32)), torch.from_numpy(self.scan_2d_slide.astype(np.float32))), dim=0)
if self.args.update_pm_by=="BOTH" or self.args.update_pm_by=="RL":
if self.args.RL_type == 2:
value, logit, (self.hx, self.cx) = self.policy_model.forward((state.unsqueeze(0), state2.unsqueeze(0), (self.hx, self.cx)))
else:
value, logit, (self.hx, self.cx) = self.policy_model.forward((state.unsqueeze(0), (self.hx, self.cx)))
else:
if self.args.RL_type == 2:
value, logit, (self.hx, self.cx) = self.policy_model.forward((state.detach().unsqueeze(0), state2.detach().unsqueeze(0), (self.hx, self.cx)))
else:
value, logit, (self.hx, self.cx) = self.policy_model.forward((state.detach().unsqueeze(0), (self.hx, self.cx)))
#state.register_hook(print)
prob = F.softmax(logit, dim=1)
log_prob = F.log_softmax(logit, dim=1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
if self.optimizer != None:
self.entropies.append(entropy)
if self.args.verbose>2:
print ("entropies", len(self.entropies))
self.prob=prob.cpu().detach().numpy()
#argmax for action
if self.args.action == 'argmax' or self.rl_test:
action = [[torch.argmax(prob)]]
action = torch.as_tensor(action)#, device=self.device)
elif self.args.action == 'multinomial':
#multinomial sampling for action
# prob = torch.clamp(prob, 1e-10, 1.0)
# if self.args.update_rl == False:
action = prob.multinomial(num_samples=1) #.cpu().detach()
else:
raise Exception('action sampling method required')
#action = sample(logit)
#log_prob = log_prob.gather(1, Variable(action))
log_prob = log_prob.gather(1, action)
#print ('1:%f, 2:%f'%(log_prob.gather(1,action), log_prob[0,action]))
# if self.args.detach_models == True:
# intri_reward = self.intri_model(Variable(state.unsqueeze(0)), action)
# else:
# intri_reward = self.intri_model(state.unsqueeze(0), action)
# self.intri_rewards.append(intri_reward)
if self.optimizer != None:
self.values.append(value)
self.log_probs.append(log_prob)
if self.args.verbose > 2:
print ("values", len(self.values))
print ("log_probs", len(self.log_probs))
#self.log_probs.append(log_prob[0,action])
self.action_str = self.action_space[action.item()]
self.action_from_policy = action.item()
# now see if the action is safe or valid.it applies only to 'fwd'
if self.action_str == 'go_fwd' and self.collision_fnc(0, 0, 0, self.scan_2d_slide):
# then need to chekc collision
self.collision_attempt = prob[0,2].item()
# print ('collision attempt: %f'%self.collision_attempt)
#sample from prob[0,:2]
self.action_from_policy = prob[0,:2].multinomial(num_samples=1).item()
self.action_str = self.action_space[self.action_from_policy]
# print ('action:%s'%self.action_str)
else:
self.collision_attempt = 0
del state, log_prob, value, action, belief_downsample, entropy, prob
def navigate(self):
if not hasattr(self, 'map_to_N'):
print ('generating maps')
kernel = np.ones((3,3),np.uint8)
navi_map = cv2.dilate(self.map_for_LM, kernel, iterations=self.cr_pixels+1)
if self.args.figure:
self.ax_map.imshow(navi_map, alpha=0.3)
self.map_to_N, self.map_to_E, self.map_to_S, self.map_to_W = generate_four_maps(navi_map, self.grid_rows, self.grid_cols)
bel_cell = Cell(self.bel_grid.row, self.bel_grid.col)
# print (self.bel_grid)
self.target_cell = Cell(self.args.navigate_to[0],self.args.navigate_to[1])
distance_map = compute_shortest(self.map_to_N,self.map_to_E,self.map_to_S,self.map_to_W, bel_cell, self.target_cell, self.grid_rows)
# print (distance_map)
shortest_path = give_me_path(distance_map, bel_cell, self.target_cell, self.grid_rows)
if self.args.figure:
self.draw_path(self.ax_map, shortest_path)
action_list = give_me_actions(shortest_path, self.bel_grid.head)
self.action_from_policy = action_list[0]
# print ('actions', action_list)
if self.next_action is None:
self.action_str = self.action_space[self.action_from_policy]
else:
self.action_from_policy = self.next_action
self.action_str = self.action_space[self.next_action]
self.next_action = None
if self.action_str == 'go_fwd' and self.collision_fnc(0, 0, 0, self.scan_2d_slide):
self.action_from_policy = np.random.randint(2)
self.action_str = self.action_space[self.action_from_policy]
self.next_action = 2
else:
self.next_action = None
if self.action_str == "hold":
self.skip_to_end = True
self.step_count = self.step_max -1
def sample_action(self):
if self.args.manual_control:
action = -1
while action < 0:
print ("suggested action: %s"%self.action_str)
if self.args.num_actions == 4:
keyin = raw_input ("[f]orward/[l]eft/[r]ight/[h]old/[a]uto/[c]ontinue/[n]ext_ep/[q]uit: ")
elif self.args.num_actions == 3:
keyin = raw_input ("[f]orward/[l]eft/[r]ight/[a]uto/[c]ontinue/[n]ext_ep/[q]uit: ")
if keyin == "f":
action = 2
elif keyin == "l":
action = 0
elif keyin == "r":
action = 1
elif keyin == "h" and self.args.num_actions == 4:
action = 3
elif keyin == "a":
action = self.action_from_policy
elif keyin == "c":
self.args.manual_control = False
action = self.action_from_policy
elif keyin == "n":
self.skip_to_end = True
self.step_count = self.step_max-1
action = self.action_from_policy
elif keyin == "q":
self.quit_sequence()
self.action_idx = action
self.action_str = self.action_space[self.action_idx]
else:
self.action_idx = self.action_from_policy
self.action_str = self.action_space[self.action_idx]
def quit_sequence(self):
self.wrap_up()
if self.args.jay1 or self.args.gazebo:
rospy.logwarn("Quit")
rospy.signal_shutdown("Quit")
exit()
def get_virtual_target_pose(self, action_str):
start_pose = Pose2d(0,0,0)
start_pose.x = self.believed_pose.x
start_pose.y = self.believed_pose.y
start_pose.theta = self.believed_pose.theta
goal_pose = Pose2d(0,0,0)
offset = self.heading_resol*self.args.rot_step
if action_str == "turn_right":
goal_pose.theta = wrap(start_pose.theta-offset)
goal_pose.x = start_pose.x
goal_pose.y = start_pose.y
elif action_str == "turn_left":
goal_pose.theta = wrap(start_pose.theta+offset)
goal_pose.x = start_pose.x
goal_pose.y = start_pose.y
elif action_str == "go_fwd":
goal_pose.x = start_pose.x + math.cos(start_pose.theta)*self.fwd_step_meters
goal_pose.y = start_pose.y + math.sin(start_pose.theta)*self.fwd_step_meters
goal_pose.theta = start_pose.theta
elif action_str == "hold":
return start_pose
else:
print('undefined action name %s'%action_str)
exit()
return goal_pose
def update_target_pose(self):
self.last_pose.x = self.perturbed_goal_pose.x
self.last_pose.y = self.perturbed_goal_pose.y
self.last_pose.theta = self.perturbed_goal_pose.theta
self.start_pose.x = self.perturbed_goal_pose.x
self.start_pose.y = self.perturbed_goal_pose.y
self.start_pose.theta = self.perturbed_goal_pose.theta
offset = self.heading_resol*self.args.rot_step
if self.action_str == "turn_right":
self.goal_pose.theta = wrap(self.start_pose.theta-offset)
self.goal_pose.x = self.start_pose.x
self.goal_pose.y = self.start_pose.y
elif self.action_str == "turn_left":
self.goal_pose.theta = wrap(self.start_pose.theta+offset)
self.goal_pose.x = self.start_pose.x
self.goal_pose.y = self.start_pose.y
elif self.action_str == "go_fwd":
self.goal_pose.x = self.start_pose.x + math.cos(self.start_pose.theta)*self.fwd_step_meters
self.goal_pose.y = self.start_pose.y + math.sin(self.start_pose.theta)*self.fwd_step_meters
self.goal_pose.theta = self.start_pose.theta
elif self.action_str == "hold":
return
else:
print('undefined action name %s'%self.action_str)
exit()
delta_x, delta_y = 0,0
delta_theta = 0
if self.args.process_error[0]>0 or self.args.process_error[1]>0:
delta_x, delta_y = np.random.normal(scale=self.args.process_error[0],size=2)
delta_theta = np.random.normal(scale=self.args.process_error[1])
if self.args.verbose > 1:
print ('%f, %f, %f'%(delta_x, delta_y, math.degrees(delta_theta)))
self.perturbed_goal_pose.x = self.goal_pose.x+delta_x
self.perturbed_goal_pose.y = self.goal_pose.y+delta_y
self.perturbed_goal_pose.theta = wrap(self.goal_pose.theta+delta_theta)
def collision_fnc(self, x, y, rad, img):
corner0 = [x+rad,y+rad]
corner1 = [x-rad,y-rad]
x0 = to_index(corner0[0], self.map_rows, self.xlim)
y0 = to_index(corner0[1], self.map_cols, self.ylim)
x1 = to_index(corner1[0], self.map_rows, self.xlim)
y1 = to_index(corner1[1], self.map_cols, self.ylim)
if x0 < 0 :
return True
if y0 < 0:
return True
if x1 >= self.map_rows:
return True
if y1 >= self.map_cols:
return True
# x0 = max(0, x0)
# y0 = max(0, y0)
# x1 = min(self.map_rows-1, x1)
# y1 = min(self.map_cols-1, y1)
if rad == 0:
if img[x0, y0] > 0.5 :
return True
else:
return False
else:
pass
for ir in range(x0,x1+1):
for ic in range(y0,y1+1):
dx = to_real(ir, self.xlim, self.map_rows) - x
dy = to_real(ic, self.ylim, self.map_cols) - y
dist = np.sqrt(dx**2+dy**2)
if dist <= rad and img[ir,ic]==1.0:
return True
return False
def collision_check(self):
row=to_index(self.perturbed_goal_pose.x, self.grid_rows, self.xlim)
col=to_index(self.perturbed_goal_pose.y, self.grid_cols, self.ylim)
x = self.perturbed_goal_pose.x
y = self.perturbed_goal_pose.y
rad = self.collision_radius
if self.args.collision_from == "scan" and self.action_str == "go_fwd":
self.collision = self.collision_fnc(0, 0, 0, self.scan_2d_slide)
elif self.args.collision_from == "map":
self.collision = self.collision_fnc(x,y,rad, self.map_for_LM)
else:
self.collision = False
if self.collision:
self.collision_pose.x = self.perturbed_goal_pose.x
self.collision_pose.y = self.perturbed_goal_pose.y
self.collision_pose.theta = self.perturbed_goal_pose.theta
self.collision_grid.row = row
self.collision_grid.col = col
self.collision_grid.head = self.true_grid.head
if self.collision:
#undo update target
self.perturbed_goal_pose.x = self.last_pose.x
self.perturbed_goal_pose.y = self.last_pose.y
self.perturbed_goal_pose.theta = self.last_pose.theta
def get_virtual_lidar(self, current_pose):
ranges = self.get_a_scan(current_pose.x, current_pose.y, offset=current_pose.theta, fov=True)
bearing_deg = np.arange(360.0)
mindeg=0
maxdeg=359
incrementdeg=1
params = {'ranges': ranges,
'angle_min': math.radians(mindeg),
'angle_max': math.radians(maxdeg),
'range_min': self.min_scan_range,
'range_max': self.max_scan_range}
scan_data = Lidar(**params)
return scan_data
def get_lidar(self):
# fix output resolution 1 deg
# fill unseen angles with nan's
# angle_min, angle_max: can be [-pi,pi], [0, 2pi], [-130, 130], etc.
# store them in [0, 2pi] format. and [-pi, pi] format too.
ranges = self.get_a_scan(self.current_pose.x, self.current_pose.y,
offset=self.current_pose.theta,
noise=self.args.lidar_noise,
sigma=self.args.lidar_sigma,
fov=True)
bearing_deg = np.arange(360.0)
mindeg=0
maxdeg=359
incrementdeg=1
params = {'ranges': ranges,
'angle_min': math.radians(mindeg),
'angle_max': math.radians(maxdeg),
'range_min': self.min_scan_range,
'range_max': self.max_scan_range}
self.scan_data = Lidar(**params)
## scan_data @ unperturbed pose
x = to_real(self.true_grid.row, self.xlim, self.grid_rows)
y = to_real(self.true_grid.col, self.ylim, self.grid_cols)
offset = self.heading_resol*self.true_grid.head
ranges = self.get_a_scan(x, y, offset=offset, noise=0, sigma=0, fov=True)
params = {'ranges': ranges,
'angle_min': math.radians(mindeg),
'angle_max': math.radians(maxdeg),
'range_min': self.min_scan_range,
'range_max': self.max_scan_range}
self.scan_data_at_unperturbed = Lidar(**params)
def fwd_clear(self):
robot_width = 2*self.collision_radius
safe_distance = 0.05 + self.collision_radius
left_corner = (wrap_2pi(np.arctan2(self.collision_radius, safe_distance)))
right_corner = (wrap_2pi(np.arctan2(-self.collision_radius, safe_distance)))
angles = self.scan_data.angles_2pi
ranges = self.scan_data.ranges_2pi[(angles < left_corner) | (angles > right_corner)]
ranges = ranges[(ranges != np.nan) & (ranges != np.inf) ]
if ranges.size == 0:
return True
else:
pass
val = np.min(ranges)
if val > safe_distance:
return True
else:
return False
def execute_action_teleport(self):
if self.args.verbose>1: print("execute_action_teleport")
if self.collision:
return False
# if self.action_str == "go_fwd_blocked":
# return True
# if self.args.perturb > 0:
# self.turtle_pose_msg.position.x = self.perturbed_goal_pose.x
# self.turtle_pose_msg.position.y = self.perturbed_goal_pose.y
# yaw = self.perturbed_goal_pose.theta
# else:
# self.turtle_pose_msg.position.x = self.goal_pose.x
# self.turtle_pose_msg.position.y = self.goal_pose.y
# yaw = self.goal_pose.theta
# self.turtle_pose_msg.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, 0, yaw))
self.teleport_turtle()
return True
def transit_belief(self):
if self.args.verbose>1: print("transit_belief")
self.belief = self.belief.cpu().detach().numpy()
if self.collision == True:
self.belief = torch.from_numpy(self.belief).float().to(self.device)
return
self.belief=self.trans_bel(self.belief, self.action_str)
self.belief = torch.from_numpy(self.belief).float().to(self.device)#$ requires_grad=True)
def trans_bel(self, bel, action):
rotation_step = self.args.rot_step
if action == "turn_right":
bel=np.roll(bel,-rotation_step, axis=0)
elif action == "turn_left":
bel=np.roll(bel, rotation_step, axis = 0)
elif action == "go_fwd":
if self.args.trans_belief == "roll":
i=0
bel[i,:,:]=np.roll(bel[i,:,:], -1, axis=0)
i=1
bel[i,:,:]=np.roll(bel[i,:,:], -1, axis=1)
i=2
bel[i,:,:]=
|
np.roll(bel[i,:,:], 1, axis=0)
|
numpy.roll
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.